OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [mtd/] [chips/] [cfi_cmdset_0001.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Common Flash Interface support:
3
 *   Intel Extended Vendor Command Set (ID 0x0001)
4
 *
5
 * (C) 2000 Red Hat. GPL'd
6
 *
7
 * $Id: cfi_cmdset_0001.c,v 1.1.1.1 2004-04-15 01:52:13 phoenix Exp $
8
 *
9
 *
10
 * 10/10/2000   Nicolas Pitre <nico@cam.org>
11
 *      - completely revamped method functions so they are aware and
12
 *        independent of the flash geometry (buswidth, interleave, etc.)
13
 *      - scalability vs code size is completely set at compile-time
14
 *        (see include/linux/mtd/cfi.h for selection)
15
 *      - optimized write buffer method
16
 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17
 *      - reworked lock/unlock/erase support for var size flash
18
 */
19
 
20
#include <linux/module.h>
21
#include <linux/types.h>
22
#include <linux/kernel.h>
23
#include <linux/sched.h>
24
#include <asm/io.h>
25
#include <asm/byteorder.h>
26
 
27
#include <linux/errno.h>
28
#include <linux/slab.h>
29
#include <linux/delay.h>
30
#include <linux/interrupt.h>
31
#include <linux/mtd/map.h>
32
#include <linux/mtd/cfi.h>
33
#include <linux/mtd/compatmac.h>
34
 
35
// debugging, turns off buffer write mode #define FORCE_WORD_WRITE
36
 
37
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
38
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
40
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
41
static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
42
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
43
static void cfi_intelext_sync (struct mtd_info *);
44
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
45
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
46
static int cfi_intelext_suspend (struct mtd_info *);
47
static void cfi_intelext_resume (struct mtd_info *);
48
 
49
static void cfi_intelext_destroy(struct mtd_info *);
50
 
51
struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
52
 
53
static struct mtd_info *cfi_intelext_setup (struct map_info *);
54
 
55
static int do_point (struct mtd_info *mtd, loff_t from, size_t len,
56
                     size_t *retlen, u_char **mtdbuf);
57
static void do_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
58
                        size_t len);
59
 
60
static struct mtd_chip_driver cfi_intelext_chipdrv = {
61
        probe: NULL, /* Not usable directly */
62
        destroy: cfi_intelext_destroy,
63
        name: "cfi_cmdset_0001",
64
        module: THIS_MODULE
65
};
66
 
67
/* #define DEBUG_LOCK_BITS */
68
/* #define DEBUG_CFI_FEATURES */
69
 
70
#ifdef DEBUG_CFI_FEATURES
71
static void cfi_tell_features(struct cfi_pri_intelext *extp)
72
{
73
        int i;
74
        printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
75
        printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
76
        printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
77
        printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
78
        printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
79
        printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
80
        printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
81
        printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
82
        printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
83
        printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
84
        for (i=9; i<32; i++) {
85
                if (extp->FeatureSupport & (1<<i))
86
                        printk("     - Unknown Bit %X:      supported\n", i);
87
        }
88
 
89
        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
90
        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
91
        for (i=1; i<8; i++) {
92
                if (extp->SuspendCmdSupport & (1<<i))
93
                        printk("     - Unknown Bit %X:               supported\n", i);
94
        }
95
 
96
        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
97
        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
98
        printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
99
        for (i=2; i<16; i++) {
100
                if (extp->BlkStatusRegMask & (1<<i))
101
                        printk("     - Unknown Bit %X Active: yes\n",i);
102
        }
103
 
104
        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
105
               extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
106
        if (extp->VppOptimal)
107
                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
108
                       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
109
}
110
#endif
111
 
112
/* This routine is made available to other mtd code via
113
 * inter_module_register.  It must only be accessed through
114
 * inter_module_get which will bump the use count of this module.  The
115
 * addresses passed back in cfi are valid as long as the use count of
116
 * this module is non-zero, i.e. between inter_module_get and
117
 * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
118
 */
119
struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
120
{
121
        struct cfi_private *cfi = map->fldrv_priv;
122
        int i;
123
        __u32 base = cfi->chips[0].start;
124
 
125
        if (cfi->cfi_mode == CFI_MODE_CFI) {
126
                /*
127
                 * It's a real CFI chip, not one for which the probe
128
                 * routine faked a CFI structure. So we read the feature
129
                 * table from it.
130
                 */
131
                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
132
                struct cfi_pri_intelext *extp;
133
                int ofs_factor = cfi->interleave * cfi->device_type;
134
 
135
                //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
136
                if (!adr)
137
                        return NULL;
138
 
139
                /* Switch it into Query Mode */
140
                cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
141
 
142
                extp = kmalloc(sizeof(*extp), GFP_KERNEL);
143
                if (!extp) {
144
                        printk(KERN_ERR "Failed to allocate memory\n");
145
                        return NULL;
146
                }
147
 
148
                /* Read in the Extended Query Table */
149
                for (i=0; i<sizeof(*extp); i++) {
150
                        ((unsigned char *)extp)[i] =
151
                                cfi_read_query(map, (base+((adr+i)*ofs_factor)));
152
                }
153
 
154
                if (extp->MajorVersion != '1' ||
155
                    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
156
                        printk(KERN_WARNING "  Unknown IntelExt Extended Query "
157
                               "version %c.%c.\n",  extp->MajorVersion,
158
                               extp->MinorVersion);
159
                        kfree(extp);
160
                        return NULL;
161
                }
162
 
163
                /* Do some byteswapping if necessary */
164
                extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
165
                extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
166
                extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
167
 
168
#ifdef DEBUG_CFI_FEATURES
169
                /* Tell the user about it in lots of lovely detail */
170
                cfi_tell_features(extp);
171
#endif  
172
 
173
                if(extp->SuspendCmdSupport & 1) {
174
//#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
175
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
176
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
177
                        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
178
                               "erase on write disabled.\n");
179
                        extp->SuspendCmdSupport &= ~1;
180
#else
181
                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
182
#endif
183
                }
184
                /* Install our own private info structure */
185
                cfi->cmdset_priv = extp;
186
        }
187
 
188
        for (i=0; i< cfi->numchips; i++) {
189
                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
190
                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
191
                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
192
                cfi->chips[i].ref_point_counter = 0;
193
        }
194
 
195
        map->fldrv = &cfi_intelext_chipdrv;
196
 
197
        /* Make sure it's in read mode */
198
        cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
199
        return cfi_intelext_setup(map);
200
}
201
 
202
static struct mtd_info *cfi_intelext_setup(struct map_info *map)
203
{
204
        struct cfi_private *cfi = map->fldrv_priv;
205
        struct mtd_info *mtd;
206
        unsigned long offset = 0;
207
        int i,j;
208
        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
209
 
210
        mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
211
        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
212
 
213
        if (!mtd) {
214
                printk(KERN_ERR "Failed to allocate memory for MTD device\n");
215
                goto setup_err;
216
        }
217
 
218
        memset(mtd, 0, sizeof(*mtd));
219
        mtd->priv = map;
220
        mtd->type = MTD_NORFLASH;
221
        mtd->size = devsize * cfi->numchips;
222
 
223
        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
224
        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
225
                        * mtd->numeraseregions, GFP_KERNEL);
226
        if (!mtd->eraseregions) {
227
                printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
228
                goto setup_err;
229
        }
230
 
231
        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
232
                unsigned long ernum, ersize;
233
                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
234
                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
235
 
236
                if (mtd->erasesize < ersize) {
237
                        mtd->erasesize = ersize;
238
                }
239
                for (j=0; j<cfi->numchips; j++) {
240
                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
241
                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
242
                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
243
                }
244
                offset += (ersize * ernum);
245
        }
246
 
247
        if (offset != devsize) {
248
                /* Argh */
249
                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
250
                goto setup_err;
251
        }
252
 
253
        for (i=0; i<mtd->numeraseregions;i++){
254
                printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
255
                       i,mtd->eraseregions[i].offset,
256
                       mtd->eraseregions[i].erasesize,
257
                       mtd->eraseregions[i].numblocks);
258
        }
259
 
260
        /* Also select the correct geometry setup too */
261
        mtd->erase = cfi_intelext_erase_varsize;
262
        mtd->read = cfi_intelext_read;
263
 
264
        if(map->point && map->unpoint){
265
                mtd->point = do_point;
266
                mtd->unpoint = do_unpoint;
267
        }
268
 
269
#ifndef FORCE_WORD_WRITE
270
        if ( cfi->cfiq->BufWriteTimeoutTyp ) {
271
                printk("Using buffer write method\n" );
272
                mtd->write = cfi_intelext_write_buffers;
273
        } else {
274
#else
275
        {
276
#endif
277
                printk("Using word write method\n" );
278
                mtd->write = cfi_intelext_write_words;
279
        }
280
        mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
281
        mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
282
        mtd->sync = cfi_intelext_sync;
283
        mtd->lock = cfi_intelext_lock;
284
        mtd->unlock = cfi_intelext_unlock;
285
        mtd->suspend = cfi_intelext_suspend;
286
        mtd->resume = cfi_intelext_resume;
287
        mtd->flags = MTD_CAP_NORFLASH;
288
        map->fldrv = &cfi_intelext_chipdrv;
289
        MOD_INC_USE_COUNT;
290
        mtd->name = map->name;
291
        return mtd;
292
 
293
 setup_err:
294
        if(mtd) {
295
                if(mtd->eraseregions)
296
                        kfree(mtd->eraseregions);
297
                kfree(mtd);
298
        }
299
        kfree(cfi->cmdset_priv);
300
        kfree(cfi->cfiq);
301
        return NULL;
302
}
303
 
304
static int do_point_onechip (struct map_info *map,  struct flchip *chip, loff_t adr, size_t len)
305
{
306
        cfi_word status, status_OK;
307
        unsigned long timeo;
308
        DECLARE_WAITQUEUE(wait, current);
309
        unsigned long cmd_addr;
310
        struct cfi_private *cfi = map->fldrv_priv;
311
 
312
        adr += chip->start;
313
 
314
        /* Ensure cmd read/writes are aligned. */
315
        cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
316
 
317
        /* Let's determine this according to the interleave only once */
318
        status_OK = CMD(0x80);
319
 
320
        timeo = jiffies + HZ;
321
 retry:
322
        spin_lock(chip->mutex);
323
 
324
        /* Check that the chip's ready to talk to us.
325
         * If it's in FL_ERASING state, suspend it and make it talk now.
326
         */
327
        switch (chip->state) {
328
 
329
        case FL_READY:
330
        case FL_POINT:
331
                break;
332
 
333
        case FL_CFI_QUERY:
334
        case FL_JEDEC_QUERY:
335
                cfi_write(map, CMD(0x70), cmd_addr);
336
                chip->state = FL_STATUS;
337
 
338
        case FL_STATUS:
339
                status = cfi_read(map, cmd_addr);
340
                if ((status & status_OK) == status_OK) {
341
                        cfi_write(map, CMD(0xff), cmd_addr);
342
                        chip->state = FL_READY;
343
                        break;
344
                }
345
 
346
                /* Urgh. Chip not yet ready to talk to us. */
347
                if (time_after(jiffies, timeo)) {
348
                        spin_unlock(chip->mutex);
349
                        printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %llx\n", (__u64)status);
350
                        return -EIO;
351
                }
352
 
353
                /* Latency issues. Drop the lock, wait a while and retry */
354
                spin_unlock(chip->mutex);
355
                cfi_udelay(1);
356
                goto retry;
357
 
358
        default:
359
                /* Stick ourselves on a wait queue to be woken when
360
                   someone changes the status */
361
                set_current_state(TASK_UNINTERRUPTIBLE);
362
                add_wait_queue(&chip->wq, &wait);
363
                spin_unlock(chip->mutex);
364
                schedule();
365
                remove_wait_queue(&chip->wq, &wait);
366
                timeo = jiffies + HZ;
367
                goto retry;
368
        }
369
 
370
        chip->state = FL_POINT;
371
        chip->ref_point_counter++;
372
        spin_unlock(chip->mutex);
373
        return 0;
374
}
375
static int do_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
376
{
377
        struct map_info *map = mtd->priv;
378
        struct cfi_private *cfi = map->fldrv_priv;
379
        unsigned long ofs;
380
        int chipnum;
381
        int ret = 0;
382
 
383
        if (from + len > mtd->size)
384
                return -EINVAL;
385
 
386
        *mtdbuf = map->point(map, from, len);
387
        if(*mtdbuf == NULL)
388
                return -EINVAL; /* can not point this region */
389
        *retlen = 0;
390
 
391
        /* Now lock the chip(s) to POINT state */
392
 
393
        /* ofs: offset within the first chip that the first read should start */
394
        chipnum = (from >> cfi->chipshift);
395
        ofs = from - (chipnum <<  cfi->chipshift);
396
 
397
        while (len) {
398
                unsigned long thislen;
399
 
400
                if (chipnum >= cfi->numchips)
401
                        break;
402
 
403
                if ((len + ofs -1) >> cfi->chipshift)
404
                        thislen = (1<<cfi->chipshift) - ofs;
405
                else
406
                        thislen = len;
407
 
408
                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
409
                if (ret)
410
                        break;
411
 
412
                *retlen += thislen;
413
                len -= thislen;
414
 
415
                ofs = 0;
416
                chipnum++;
417
        }
418
        return 0;
419
}
420
 
421
static void do_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
422
{
423
        struct map_info *map = mtd->priv;
424
        struct cfi_private *cfi = map->fldrv_priv;
425
        unsigned long ofs;
426
        int chipnum;
427
 
428
        map->unpoint(map, addr, from, len);
429
        /* Now unlock the chip(s) POINT state */
430
 
431
        /* ofs: offset within the first chip that the first read should start */
432
        chipnum = (from >> cfi->chipshift);
433
        ofs = from - (chipnum <<  cfi->chipshift);
434
 
435
        while (len) {
436
                unsigned long thislen;
437
                struct flchip *chip;
438
 
439
                chip = &cfi->chips[chipnum];
440
                if (chipnum >= cfi->numchips)
441
                        break;
442
 
443
                if ((len + ofs -1) >> cfi->chipshift)
444
                        thislen = (1<<cfi->chipshift) - ofs;
445
                else
446
                        thislen = len;
447
 
448
                spin_lock(chip->mutex);
449
                if(chip->state == FL_POINT){
450
                        chip->ref_point_counter--;
451
                        if(chip->ref_point_counter == 0)
452
                                chip->state = FL_READY;
453
                } else
454
                        printk("Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
455
                wake_up(&chip->wq);
456
                spin_unlock(chip->mutex);
457
 
458
                len -= thislen;
459
                ofs = 0;
460
                chipnum++;
461
        }
462
}
463
 
464
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
465
{
466
        cfi_word status, status_OK;
467
        unsigned long timeo;
468
        DECLARE_WAITQUEUE(wait, current);
469
        int suspended = 0;
470
        unsigned long cmd_addr;
471
        struct cfi_private *cfi = map->fldrv_priv;
472
 
473
        adr += chip->start;
474
 
475
        /* Ensure cmd read/writes are aligned. */
476
        cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
477
 
478
        /* Let's determine this according to the interleave only once */
479
        status_OK = CMD(0x80);
480
 
481
        timeo = jiffies + HZ;
482
 retry:
483
        spin_lock(chip->mutex);
484
 
485
        /* Check that the chip's ready to talk to us.
486
         * If it's in FL_ERASING state, suspend it and make it talk now.
487
         */
488
        switch (chip->state) {
489
        case FL_ERASING:
490
                if (!cfi->cmdset_priv ||
491
                    !(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
492
                        goto sleep; /* We don't support erase suspend */
493
 
494
                cfi_write (map, CMD(0xb0), cmd_addr);
495
                /* If the flash has finished erasing, then 'erase suspend'
496
                 * appears to make some (28F320) flash devices switch to
497
                 * 'read' mode.  Make sure that we switch to 'read status'
498
                 * mode so we get the right data. --rmk
499
                 */
500
                cfi_write(map, CMD(0x70), cmd_addr);
501
                chip->oldstate = FL_ERASING;
502
                chip->state = FL_ERASE_SUSPENDING;
503
                //              printk("Erase suspending at 0x%lx\n", cmd_addr);
504
                for (;;) {
505
                        status = cfi_read(map, cmd_addr);
506
                        if ((status & status_OK) == status_OK)
507
                                break;
508
 
509
                        if (time_after(jiffies, timeo)) {
510
                                /* Urgh */
511
                                cfi_write(map, CMD(0xd0), cmd_addr);
512
                                /* make sure we're in 'read status' mode */
513
                                cfi_write(map, CMD(0x70), cmd_addr);
514
                                chip->state = FL_ERASING;
515
                                spin_unlock(chip->mutex);
516
                                printk(KERN_ERR "Chip not ready after erase "
517
                                       "suspended: status = 0x%llx\n", (__u64)status);
518
                                return -EIO;
519
                        }
520
 
521
                        spin_unlock(chip->mutex);
522
                        cfi_udelay(1);
523
                        spin_lock(chip->mutex);
524
                }
525
 
526
                suspended = 1;
527
                cfi_write(map, CMD(0xff), cmd_addr);
528
                chip->state = FL_READY;
529
                break;
530
 
531
#if 0
532
        case FL_WRITING:
533
                /* Not quite yet */
534
#endif
535
 
536
        case FL_READY:
537
        case FL_POINT:
538
                break;
539
 
540
        case FL_CFI_QUERY:
541
        case FL_JEDEC_QUERY:
542
                cfi_write(map, CMD(0x70), cmd_addr);
543
                chip->state = FL_STATUS;
544
 
545
        case FL_STATUS:
546
                status = cfi_read(map, cmd_addr);
547
                if ((status & status_OK) == status_OK) {
548
                        cfi_write(map, CMD(0xff), cmd_addr);
549
                        chip->state = FL_READY;
550
                        break;
551
                }
552
 
553
                /* Urgh. Chip not yet ready to talk to us. */
554
                if (time_after(jiffies, timeo)) {
555
                        spin_unlock(chip->mutex);
556
                        printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %llx\n", (__u64)status);
557
                        return -EIO;
558
                }
559
 
560
                /* Latency issues. Drop the lock, wait a while and retry */
561
                spin_unlock(chip->mutex);
562
                cfi_udelay(1);
563
                goto retry;
564
 
565
        default:
566
        sleep:
567
                /* Stick ourselves on a wait queue to be woken when
568
                   someone changes the status */
569
                set_current_state(TASK_UNINTERRUPTIBLE);
570
                add_wait_queue(&chip->wq, &wait);
571
                spin_unlock(chip->mutex);
572
                schedule();
573
                remove_wait_queue(&chip->wq, &wait);
574
                timeo = jiffies + HZ;
575
                goto retry;
576
        }
577
 
578
        map->copy_from(map, buf, adr, len);
579
 
580
        if (suspended) {
581
                chip->state = chip->oldstate;
582
                /* What if one interleaved chip has finished and the
583
                   other hasn't? The old code would leave the finished
584
                   one in READY mode. That's bad, and caused -EROFS
585
                   errors to be returned from do_erase_oneblock because
586
                   that's the only bit it checked for at the time.
587
                   As the state machine appears to explicitly allow
588
                   sending the 0x70 (Read Status) command to an erasing
589
                   chip and expecting it to be ignored, that's what we
590
                   do. */
591
                cfi_write(map, CMD(0xd0), cmd_addr);
592
                cfi_write(map, CMD(0x70), cmd_addr);
593
        }
594
 
595
        wake_up(&chip->wq);
596
        spin_unlock(chip->mutex);
597
        return 0;
598
}
599
 
600
static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
601
{
602
        struct map_info *map = mtd->priv;
603
        struct cfi_private *cfi = map->fldrv_priv;
604
        unsigned long ofs;
605
        int chipnum;
606
        int ret = 0;
607
 
608
        /* ofs: offset within the first chip that the first read should start */
609
        chipnum = (from >> cfi->chipshift);
610
        ofs = from - (chipnum <<  cfi->chipshift);
611
 
612
        *retlen = 0;
613
 
614
        while (len) {
615
                unsigned long thislen;
616
 
617
                if (chipnum >= cfi->numchips)
618
                        break;
619
 
620
                if ((len + ofs -1) >> cfi->chipshift)
621
                        thislen = (1<<cfi->chipshift) - ofs;
622
                else
623
                        thislen = len;
624
 
625
                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
626
                if (ret)
627
                        break;
628
 
629
                *retlen += thislen;
630
                len -= thislen;
631
                buf += thislen;
632
 
633
                ofs = 0;
634
                chipnum++;
635
        }
636
        return ret;
637
}
638
 
639
static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
640
{
641
        struct map_info *map = mtd->priv;
642
        struct cfi_private *cfi = map->fldrv_priv;
643
        struct cfi_pri_intelext *extp=cfi->cmdset_priv;
644
        int ofs_factor = cfi->interleave * cfi->device_type;
645
        int   count=len;
646
        struct flchip *chip;
647
        int chip_num,offst;
648
        unsigned long timeo;
649
        DECLARE_WAITQUEUE(wait, current);
650
 
651
        chip=0;
652
        /* Calculate which chip & protection register offset we need */
653
        chip_num=((unsigned int)from/reg_sz);
654
        offst=from-(reg_sz*chip_num)+base_offst;
655
 
656
        while(count){
657
 
658
                if(chip_num>=cfi->numchips)
659
                        goto out;
660
 
661
                /* Make sure that the chip is in the right state */
662
 
663
                timeo = jiffies + HZ;
664
                chip=&cfi->chips[chip_num];
665
        retry:
666
                spin_lock(chip->mutex);
667
 
668
                switch (chip->state) {
669
                case FL_READY:
670
                case FL_STATUS:
671
                case FL_CFI_QUERY:
672
                case FL_JEDEC_QUERY:
673
                        break;
674
 
675
                default:
676
                                /* Stick ourselves on a wait queue to be woken when
677
                                   someone changes the status */
678
                        set_current_state(TASK_UNINTERRUPTIBLE);
679
                        add_wait_queue(&chip->wq, &wait);
680
                        spin_unlock(chip->mutex);
681
                        schedule();
682
                        remove_wait_queue(&chip->wq, &wait);
683
                        timeo = jiffies + HZ;
684
                        goto retry;
685
                }
686
 
687
                /* Now read the data required from this flash */
688
 
689
                cfi_send_gen_cmd(0x90, 0x55,chip->start, map, cfi, cfi->device_type, NULL);
690
                while(count && ((offst-base_offst)<reg_sz)){
691
                        *buf=map->read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
692
                        buf++;
693
                        offst++;
694
                        count--;
695
                }
696
 
697
                chip->state=FL_CFI_QUERY;
698
                spin_unlock(chip->mutex);
699
                /* Move on to the next chip */
700
                chip_num++;
701
                offst=base_offst;
702
 
703
        }
704
 
705
 out:
706
        wake_up(&chip->wq);
707
        return len-count;
708
}
709
 
710
static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
711
{
712
        struct map_info *map = mtd->priv;
713
        struct cfi_private *cfi = map->fldrv_priv;
714
        struct cfi_pri_intelext *extp=cfi->cmdset_priv;
715
        int base_offst,reg_sz;
716
 
717
        /* Check that we actually have some protection registers */
718
        if(!(extp->FeatureSupport&64)){
719
                printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
720
                return 0;
721
        }
722
 
723
        base_offst=(1<<extp->FactProtRegSize);
724
        reg_sz=(1<<extp->UserProtRegSize);
725
 
726
        return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
727
}
728
 
729
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
730
{
731
        struct map_info *map = mtd->priv;
732
        struct cfi_private *cfi = map->fldrv_priv;
733
        struct cfi_pri_intelext *extp=cfi->cmdset_priv;
734
        int base_offst,reg_sz;
735
 
736
        /* Check that we actually have some protection registers */
737
        if(!(extp->FeatureSupport&64)){
738
                printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
739
                return 0;
740
        }
741
 
742
        base_offst=0;
743
        reg_sz=(1<<extp->FactProtRegSize);
744
 
745
        return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
746
}
747
 
748
 
749
static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
750
{
751
        struct cfi_private *cfi = map->fldrv_priv;
752
        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
753
        cfi_word status, status_OK;
754
        unsigned long timeo;
755
        DECLARE_WAITQUEUE(wait, current);
756
        int z, suspended=0, ret=0;
757
 
758
        adr += chip->start;
759
 
760
        /* Let's determine this according to the interleave only once */
761
        status_OK = CMD(0x80);
762
 
763
        timeo = jiffies + HZ;
764
 retry:
765
        spin_lock(chip->mutex);
766
 
767
        /* Check that the chip's ready to talk to us.
768
         * Later, we can actually think about interrupting it
769
         * if it's in FL_ERASING state.
770
         * Not just yet, though.
771
         */
772
        switch (chip->state) {
773
        case FL_READY:
774
                break;
775
 
776
        case FL_CFI_QUERY:
777
        case FL_JEDEC_QUERY:
778
                cfi_write(map, CMD(0x70), adr);
779
                chip->state = FL_STATUS;
780
 
781
        case FL_STATUS:
782
                status = cfi_read(map, adr);
783
                if ((status & status_OK) == status_OK)
784
                        break;
785
 
786
                /* Urgh. Chip not yet ready to talk to us. */
787
                if (time_after(jiffies, timeo)) {
788
                        spin_unlock(chip->mutex);
789
                        printk(KERN_ERR "waiting for chip to be ready timed out in read\n");
790
                        return -EIO;
791
                }
792
 
793
                /* Latency issues. Drop the lock, wait a while and retry */
794
                spin_unlock(chip->mutex);
795
                cfi_udelay(1);
796
                goto retry;
797
 
798
        case FL_ERASING:
799
                if (!extp ||
800
                    !((extp->FeatureSupport & 2) && (extp->SuspendCmdSupport & 1)))
801
                        goto sleep; /* We don't support erase suspend */
802
 
803
                cfi_write (map, CMD(0xb0), adr);
804
 
805
                /* If the flash has finished erasing, then 'erase suspend'
806
                 * appears to make some (28F320) flash devices switch to
807
                 * 'read' mode.  Make sure that we switch to 'read status'
808
                 * mode so we get the right data. --rmk
809
                 */
810
                cfi_write(map, CMD(0x70), adr);
811
                chip->oldstate = FL_ERASING;
812
                chip->state = FL_ERASE_SUSPENDING;
813
                for (;;) {
814
                        status = cfi_read(map, adr);
815
                        if ((status & status_OK) == status_OK)
816
                                break;
817
 
818
                        if (time_after(jiffies, timeo)) {
819
                                /* Urgh */
820
                                cfi_write(map, CMD(0xd0), adr);
821
                                /* make sure we're in 'read status' mode */
822
                                cfi_write(map, CMD(0x70), adr);
823
                                chip->state = FL_ERASING;
824
                                spin_unlock(chip->mutex);
825
                                printk(KERN_ERR "Chip not ready after erase "
826
                                       "suspended: status = 0x%x\n", status);
827
                                return -EIO;
828
                        }
829
 
830
                        spin_unlock(chip->mutex);
831
                        cfi_udelay(1);
832
                        spin_lock(chip->mutex);
833
                }
834
                suspended = 1;
835
                chip->state = FL_STATUS;
836
                break;
837
 
838
        default:
839
        sleep:
840
                /* Stick ourselves on a wait queue to be woken when
841
                   someone changes the status */
842
                set_current_state(TASK_UNINTERRUPTIBLE);
843
                add_wait_queue(&chip->wq, &wait);
844
                spin_unlock(chip->mutex);
845
                schedule();
846
                remove_wait_queue(&chip->wq, &wait);
847
                timeo = jiffies + HZ;
848
                goto retry;
849
        }
850
 
851
        ENABLE_VPP(map);
852
        cfi_write(map, CMD(0x40), adr);
853
        cfi_write(map, datum, adr);
854
        chip->state = FL_WRITING;
855
 
856
        spin_unlock(chip->mutex);
857
        cfi_udelay(chip->word_write_time);
858
        spin_lock(chip->mutex);
859
 
860
        timeo = jiffies + (HZ/2);
861
        z = 0;
862
        for (;;) {
863
                if (chip->state != FL_WRITING) {
864
                        /* Someone's suspended the write. Sleep */
865
                        set_current_state(TASK_UNINTERRUPTIBLE);
866
                        add_wait_queue(&chip->wq, &wait);
867
                        spin_unlock(chip->mutex);
868
                        schedule();
869
                        remove_wait_queue(&chip->wq, &wait);
870
                        timeo = jiffies + (HZ / 2); /* FIXME */
871
                        spin_lock(chip->mutex);
872
                        continue;
873
                }
874
 
875
                status = cfi_read(map, adr);
876
                if ((status & status_OK) == status_OK)
877
                        break;
878
 
879
                /* OK Still waiting */
880
                if (time_after(jiffies, timeo)) {
881
                        chip->state = FL_STATUS;
882
                        DISABLE_VPP(map);
883
                        printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
884
                        ret = -EIO;
885
                        goto out;
886
                }
887
 
888
                /* Latency issues. Drop the lock, wait a while and retry */
889
                spin_unlock(chip->mutex);
890
                z++;
891
                cfi_udelay(1);
892
                spin_lock(chip->mutex);
893
        }
894
        if (!z) {
895
                chip->word_write_time--;
896
                if (!chip->word_write_time)
897
                        chip->word_write_time++;
898
        }
899
        if (z > 1)
900
                chip->word_write_time++;
901
 
902
        /* Done and happy. */
903
        chip->state = FL_STATUS;
904
        /* check for lock bit */
905
        if (status & CMD(0x02)) {
906
                /* clear status */
907
                cfi_write(map, CMD(0x50), adr);
908
                /* put back into read status register mode */
909
                cfi_write(map, CMD(0x70), adr);
910
                ret = -EROFS;
911
                goto out;
912
        }
913
 out:
914
        if (suspended) {
915
                chip->state = chip->oldstate;
916
                /* What if one interleaved chip has finished and the
917
                   other hasn't? The old code would leave the finished
918
                   one in READY mode. That's bad, and caused -EROFS
919
                   errors to be returned from do_erase_oneblock because
920
                   that's the only bit it checked for at the time.
921
                   As the state machine appears to explicitly allow
922
                   sending the 0x70 (Read Status) command to an erasing
923
                   chip and expecting it to be ignored, that's what we
924
                   do. */
925
                cfi_write(map, CMD(0xd0), adr);
926
                cfi_write(map, CMD(0x70), adr);
927
        } else
928
                DISABLE_VPP(map); /* must not clear the VPP if there is a suspended erase to be resumed */
929
 
930
        wake_up(&chip->wq);
931
        spin_unlock(chip->mutex);
932
        return ret;
933
}
934
 
935
 
936
static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
937
{
938
        struct map_info *map = mtd->priv;
939
        struct cfi_private *cfi = map->fldrv_priv;
940
        int ret = 0;
941
        int chipnum;
942
        unsigned long ofs;
943
 
944
        *retlen = 0;
945
        if (!len)
946
                return 0;
947
 
948
        chipnum = to >> cfi->chipshift;
949
        ofs = to  - (chipnum << cfi->chipshift);
950
 
951
        /* If it's not bus-aligned, do the first byte write */
952
        if (ofs & (CFIDEV_BUSWIDTH-1)) {
953
                unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
954
                int gap = ofs - bus_ofs;
955
                int i = 0, n = 0;
956
                u_char tmp_buf[8];
957
                cfi_word datum;
958
 
959
                while (gap--)
960
                        tmp_buf[i++] = 0xff;
961
                while (len && i < CFIDEV_BUSWIDTH)
962
                        tmp_buf[i++] = buf[n++], len--;
963
                while (i < CFIDEV_BUSWIDTH)
964
                        tmp_buf[i++] = 0xff;
965
 
966
                if (cfi_buswidth_is_2()) {
967
                        datum = *(__u16*)tmp_buf;
968
                } else if (cfi_buswidth_is_4()) {
969
                        datum = *(__u32*)tmp_buf;
970
                } else if (cfi_buswidth_is_8()) {
971
                        datum = *(__u64*)tmp_buf;
972
                } else {
973
                        return -EINVAL;  /* should never happen, but be safe */
974
                }
975
 
976
                ret = do_write_oneword(map, &cfi->chips[chipnum],
977
                                               bus_ofs, datum);
978
                if (ret)
979
                        return ret;
980
 
981
                ofs += n;
982
                buf += n;
983
                (*retlen) += n;
984
 
985
                if (ofs >> cfi->chipshift) {
986
                        chipnum ++;
987
                        ofs = 0;
988
                        if (chipnum == cfi->numchips)
989
                                return 0;
990
                }
991
        }
992
 
993
        while(len >= CFIDEV_BUSWIDTH) {
994
                cfi_word datum;
995
 
996
                if (cfi_buswidth_is_1()) {
997
                        datum = *(__u8*)buf;
998
                } else if (cfi_buswidth_is_2()) {
999
                        datum = *(__u16*)buf;
1000
                } else if (cfi_buswidth_is_4()) {
1001
                        datum = *(__u32*)buf;
1002
                } else if (cfi_buswidth_is_8()) {
1003
                        datum = *(__u64*)buf;
1004
                } else {
1005
                        return -EINVAL;
1006
                }
1007
 
1008
                ret = do_write_oneword(map, &cfi->chips[chipnum],
1009
                                ofs, datum);
1010
                if (ret)
1011
                        return ret;
1012
 
1013
                ofs += CFIDEV_BUSWIDTH;
1014
                buf += CFIDEV_BUSWIDTH;
1015
                (*retlen) += CFIDEV_BUSWIDTH;
1016
                len -= CFIDEV_BUSWIDTH;
1017
 
1018
                if (ofs >> cfi->chipshift) {
1019
                        chipnum ++;
1020
                        ofs = 0;
1021
                        if (chipnum == cfi->numchips)
1022
                                return 0;
1023
                }
1024
        }
1025
 
1026
        if (len & (CFIDEV_BUSWIDTH-1)) {
1027
                int i = 0, n = 0;
1028
                u_char tmp_buf[8];
1029
                cfi_word datum;
1030
 
1031
                while (len--)
1032
                        tmp_buf[i++] = buf[n++];
1033
                while (i < CFIDEV_BUSWIDTH)
1034
                        tmp_buf[i++] = 0xff;
1035
 
1036
                if (cfi_buswidth_is_2()) {
1037
                        datum = *(__u16*)tmp_buf;
1038
                } else if (cfi_buswidth_is_4()) {
1039
                        datum = *(__u32*)tmp_buf;
1040
                } else if (cfi_buswidth_is_8()) {
1041
                        datum = *(__u64*)tmp_buf;
1042
                } else {
1043
                        return -EINVAL;  /* should never happen, but be safe */
1044
                }
1045
 
1046
                ret = do_write_oneword(map, &cfi->chips[chipnum],
1047
                                               ofs, datum);
1048
                if (ret)
1049
                        return ret;
1050
 
1051
                (*retlen) += n;
1052
        }
1053
 
1054
        return 0;
1055
}
1056
 
1057
 
1058
static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
1059
                                  unsigned long adr, const u_char *buf, int len)
1060
{
1061
        struct cfi_private *cfi = map->fldrv_priv;
1062
        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1063
        cfi_word status, status_OK;
1064
        unsigned long cmd_adr, timeo;
1065
        DECLARE_WAITQUEUE(wait, current);
1066
        int wbufsize, z, suspended=0, ret=0;
1067
 
1068
        wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1069
        adr += chip->start;
1070
        cmd_adr = adr & ~(wbufsize-1);
1071
 
1072
        /* Let's determine this according to the interleave only once */
1073
        status_OK = CMD(0x80);
1074
 
1075
        timeo = jiffies + HZ;
1076
 retry:
1077
        spin_lock(chip->mutex);
1078
 
1079
        /* Check that the chip's ready to talk to us.
1080
         * Later, we can actually think about interrupting it
1081
         * if it's in FL_ERASING state.
1082
         * Not just yet, though.
1083
         */
1084
        switch (chip->state) {
1085
        case FL_READY:
1086
        case FL_CFI_QUERY:
1087
        case FL_JEDEC_QUERY:
1088
                cfi_write(map, CMD(0x70), cmd_adr);
1089
                chip->state = FL_STATUS;
1090
 
1091
        case FL_STATUS:
1092
                status = cfi_read(map, cmd_adr);
1093
                if ((status & status_OK) == status_OK)
1094
                        break;
1095
                /* Urgh. Chip not yet ready to talk to us. */
1096
                if (time_after(jiffies, timeo)) {
1097
                        spin_unlock(chip->mutex);
1098
                        printk(KERN_ERR "waiting for chip to be ready timed out in buffer write\n");
1099
                        return -EIO;
1100
                }
1101
 
1102
                /* Latency issues. Drop the lock, wait a while and retry */
1103
                spin_unlock(chip->mutex);
1104
                cfi_udelay(1);
1105
                goto retry;
1106
 
1107
        case FL_ERASING:
1108
                if (!extp ||
1109
                    !((extp->FeatureSupport & 2) && (extp->SuspendCmdSupport & 1)))
1110
                        goto sleep; /* We don't support erase suspend */
1111
 
1112
                cfi_write (map, CMD(0xb0), adr);
1113
 
1114
                /* If the flash has finished erasing, then 'erase suspend'
1115
                 * appears to make some (28F320) flash devices switch to
1116
                 * 'read' mode.  Make sure that we switch to 'read status'
1117
                 * mode so we get the right data. --rmk
1118
                 */
1119
                cfi_write(map, CMD(0x70), adr);
1120
                chip->oldstate = FL_ERASING;
1121
                chip->state = FL_ERASE_SUSPENDING;
1122
                for (;;) {
1123
                        status = cfi_read(map, adr);
1124
                        if ((status & status_OK) == status_OK)
1125
                                break;
1126
 
1127
                        if (time_after(jiffies, timeo)) {
1128
                                /* Urgh */
1129
                                cfi_write(map, CMD(0xd0), adr);
1130
                                /* make sure we're in 'read status' mode */
1131
                                cfi_write(map, CMD(0x70), adr);
1132
                                chip->state = FL_ERASING;
1133
                                spin_unlock(chip->mutex);
1134
                                printk(KERN_ERR "Chip not ready after erase "
1135
                                       "suspended: status = 0x%x\n", status);
1136
                                return -EIO;
1137
                        }
1138
 
1139
                        spin_unlock(chip->mutex);
1140
                        cfi_udelay(1);
1141
                        spin_lock(chip->mutex);
1142
                }
1143
                suspended = 1;
1144
                chip->state = FL_STATUS;
1145
                break;
1146
 
1147
        default:
1148
        sleep:
1149
                /* Stick ourselves on a wait queue to be woken when
1150
                   someone changes the status */
1151
                set_current_state(TASK_UNINTERRUPTIBLE);
1152
                add_wait_queue(&chip->wq, &wait);
1153
                spin_unlock(chip->mutex);
1154
                schedule();
1155
                remove_wait_queue(&chip->wq, &wait);
1156
                timeo = jiffies + HZ;
1157
                goto retry;
1158
        }
1159
        /* We know we're now in FL_STATUS mode, and 'status' is current */
1160
        /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1161
           [...], the device will not accept any more Write to Buffer commands".
1162
           So we must check here and reset those bits if they're set. Otherwise
1163
           we're just pissing in the wind */
1164
        if (status & CMD(0x30)) {
1165
                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %x). Clearing.\n", status);
1166
                cfi_write(map, CMD(0x50), cmd_adr);
1167
                cfi_write(map, CMD(0x70), cmd_adr);
1168
        }
1169
        ENABLE_VPP(map);
1170
        chip->state = FL_WRITING_TO_BUFFER;
1171
 
1172
        z = 0;
1173
        for (;;) {
1174
                cfi_write(map, CMD(0xe8), cmd_adr);
1175
 
1176
                status = cfi_read(map, cmd_adr);
1177
                if ((status & status_OK) == status_OK)
1178
                        break;
1179
 
1180
                spin_unlock(chip->mutex);
1181
                cfi_udelay(1);
1182
                spin_lock(chip->mutex);
1183
 
1184
                if (++z > 20) {
1185
                        /* Argh. Not ready for write to buffer */
1186
                        cfi_write(map, CMD(0x70), cmd_adr);
1187
                        chip->state = FL_STATUS;
1188
                        DISABLE_VPP(map);
1189
                        printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
1190
                        /* Odd. Clear status bits */
1191
                        cfi_write(map, CMD(0x50), cmd_adr);
1192
                        cfi_write(map, CMD(0x70), cmd_adr);
1193
                        ret = -EIO;
1194
                        goto out;
1195
                }
1196
        }
1197
 
1198
        /* Write length of data to come */
1199
        cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
1200
 
1201
        /* Write data */
1202
        for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
1203
                if (cfi_buswidth_is_1()) {
1204
                        map->write8 (map, *((__u8*)buf)++, adr+z);
1205
                } else if (cfi_buswidth_is_2()) {
1206
                        map->write16 (map, *((__u16*)buf)++, adr+z);
1207
                } else if (cfi_buswidth_is_4()) {
1208
                        map->write32 (map, *((__u32*)buf)++, adr+z);
1209
                } else if (cfi_buswidth_is_8()) {
1210
                        map->write64 (map, *((__u64*)buf)++, adr+z);
1211
                } else {
1212
                        DISABLE_VPP(map);
1213
                        ret = -EINVAL;
1214
                        goto out;
1215
                }
1216
        }
1217
        /* GO GO GO */
1218
        cfi_write(map, CMD(0xd0), cmd_adr);
1219
        chip->state = FL_WRITING;
1220
 
1221
        spin_unlock(chip->mutex);
1222
        cfi_udelay(chip->buffer_write_time);
1223
        spin_lock(chip->mutex);
1224
 
1225
        timeo = jiffies + (HZ/2);
1226
        z = 0;
1227
        for (;;) {
1228
                if (chip->state != FL_WRITING) {
1229
                        /* Someone's suspended the write. Sleep */
1230
                        set_current_state(TASK_UNINTERRUPTIBLE);
1231
                        add_wait_queue(&chip->wq, &wait);
1232
                        spin_unlock(chip->mutex);
1233
                        schedule();
1234
                        remove_wait_queue(&chip->wq, &wait);
1235
                        timeo = jiffies + (HZ / 2); /* FIXME */
1236
                        spin_lock(chip->mutex);
1237
                        continue;
1238
                }
1239
 
1240
                status = cfi_read(map, cmd_adr);
1241
                if ((status & status_OK) == status_OK)
1242
                        break;
1243
 
1244
                /* OK Still waiting */
1245
                if (time_after(jiffies, timeo)) {
1246
                        chip->state = FL_STATUS;
1247
                        DISABLE_VPP(map);
1248
                        printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1249
                        ret = -EIO;
1250
                        goto out;
1251
                }
1252
 
1253
                /* Latency issues. Drop the lock, wait a while and retry */
1254
                spin_unlock(chip->mutex);
1255
                cfi_udelay(1);
1256
                z++;
1257
                spin_lock(chip->mutex);
1258
        }
1259
        if (!z) {
1260
                chip->buffer_write_time--;
1261
                if (!chip->buffer_write_time)
1262
                        chip->buffer_write_time++;
1263
        }
1264
        if (z > 1)
1265
                chip->buffer_write_time++;
1266
 
1267
        /* Done and happy. */
1268
        chip->state = FL_STATUS;
1269
        /* check for lock bit */
1270
        if (status & CMD(0x02)) {
1271
                /* clear status */
1272
                cfi_write(map, CMD(0x50), cmd_adr);
1273
                /* put back into read status register mode */
1274
                cfi_write(map, CMD(0x70), adr);
1275
                ret = -EROFS;
1276
                goto out;
1277
        }
1278
 out:
1279
        if (suspended) {
1280
                chip->state = chip->oldstate;
1281
                /* What if one interleaved chip has finished and the
1282
                   other hasn't? The old code would leave the finished
1283
                   one in READY mode. That's bad, and caused -EROFS
1284
                   errors to be returned from do_erase_oneblock because
1285
                   that's the only bit it checked for at the time.
1286
                   As the state machine appears to explicitly allow
1287
                   sending the 0x70 (Read Status) command to an erasing
1288
                   chip and expecting it to be ignored, that's what we
1289
                   do. */
1290
                cfi_write(map, CMD(0xd0), adr);
1291
                cfi_write(map, CMD(0x70), adr);
1292
        } else
1293
                DISABLE_VPP(map); /* must not clear the VPP if there is a suspended erase to be resumed */
1294
 
1295
        wake_up(&chip->wq);
1296
        spin_unlock(chip->mutex);
1297
        return ret;
1298
}
1299
 
1300
static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1301
                                       size_t len, size_t *retlen, const u_char *buf)
1302
{
1303
        struct map_info *map = mtd->priv;
1304
        struct cfi_private *cfi = map->fldrv_priv;
1305
        int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1306
        int ret = 0;
1307
        int chipnum;
1308
        unsigned long ofs;
1309
 
1310
        *retlen = 0;
1311
        if (!len)
1312
                return 0;
1313
 
1314
        chipnum = to >> cfi->chipshift;
1315
        ofs = to  - (chipnum << cfi->chipshift);
1316
 
1317
        /* If it's not bus-aligned, do the first word write */
1318
        if (ofs & (CFIDEV_BUSWIDTH-1)) {
1319
                size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
1320
                if (local_len > len)
1321
                        local_len = len;
1322
                ret = cfi_intelext_write_words(mtd, to, local_len,
1323
                                               retlen, buf);
1324
                if (ret)
1325
                        return ret;
1326
                ofs += local_len;
1327
                buf += local_len;
1328
                len -= local_len;
1329
 
1330
                if (ofs >> cfi->chipshift) {
1331
                        chipnum ++;
1332
                        ofs = 0;
1333
                        if (chipnum == cfi->numchips)
1334
                                return 0;
1335
                }
1336
        }
1337
 
1338
        /* Write buffer is worth it only if more than one word to write... */
1339
        while(len > CFIDEV_BUSWIDTH) {
1340
                /* We must not cross write block boundaries */
1341
                int size = wbufsize - (ofs & (wbufsize-1));
1342
 
1343
                if (size > len)
1344
                        size = len & ~(CFIDEV_BUSWIDTH-1);
1345
                ret = do_write_buffer(map, &cfi->chips[chipnum],
1346
                                      ofs, buf, size);
1347
                if (ret)
1348
                        return ret;
1349
 
1350
                ofs += size;
1351
                buf += size;
1352
                (*retlen) += size;
1353
                len -= size;
1354
 
1355
                if (ofs >> cfi->chipshift) {
1356
                        chipnum ++;
1357
                        ofs = 0;
1358
                        if (chipnum == cfi->numchips)
1359
                                return 0;
1360
                }
1361
        }
1362
 
1363
        /* ... and write the remaining bytes */
1364
        if (len > 0) {
1365
                size_t local_retlen;
1366
                ret = cfi_intelext_write_words(mtd, ofs + (chipnum << cfi->chipshift),
1367
                                               len, &local_retlen, buf);
1368
                if (ret)
1369
                        return ret;
1370
                (*retlen) += local_retlen;
1371
        }
1372
 
1373
        return 0;
1374
}
1375
 
1376
typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1377
                              unsigned long adr, void *thunk);
1378
 
1379
static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1380
                                     loff_t ofs, size_t len, void *thunk)
1381
{
1382
        struct map_info *map = mtd->priv;
1383
        struct cfi_private *cfi = map->fldrv_priv;
1384
        unsigned long adr;
1385
        int chipnum, ret = 0;
1386
        int i, first;
1387
        struct mtd_erase_region_info *regions = mtd->eraseregions;
1388
 
1389
        if (ofs > mtd->size)
1390
                return -EINVAL;
1391
 
1392
        if ((len + ofs) > mtd->size)
1393
                return -EINVAL;
1394
 
1395
        /* Check that both start and end of the requested erase are
1396
         * aligned with the erasesize at the appropriate addresses.
1397
         */
1398
 
1399
        i = 0;
1400
 
1401
        /* Skip all erase regions which are ended before the start of
1402
           the requested erase. Actually, to save on the calculations,
1403
           we skip to the first erase region which starts after the
1404
           start of the requested erase, and then go back one.
1405
        */
1406
 
1407
        while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1408
               i++;
1409
        i--;
1410
 
1411
        /* OK, now i is pointing at the erase region in which this
1412
           erase request starts. Check the start of the requested
1413
           erase range is aligned with the erase size which is in
1414
           effect here.
1415
        */
1416
 
1417
        if (ofs & (regions[i].erasesize-1))
1418
                return -EINVAL;
1419
 
1420
        /* Remember the erase region we start on */
1421
        first = i;
1422
 
1423
        /* Next, check that the end of the requested erase is aligned
1424
         * with the erase region at that address.
1425
         */
1426
 
1427
        while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1428
                i++;
1429
 
1430
        /* As before, drop back one to point at the region in which
1431
           the address actually falls
1432
        */
1433
        i--;
1434
 
1435
        if ((ofs + len) & (regions[i].erasesize-1))
1436
                return -EINVAL;
1437
 
1438
        chipnum = ofs >> cfi->chipshift;
1439
        adr = ofs - (chipnum << cfi->chipshift);
1440
 
1441
        i=first;
1442
 
1443
        while(len) {
1444
                ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1445
 
1446
                if (ret)
1447
                        return ret;
1448
 
1449
                adr += regions[i].erasesize;
1450
                len -= regions[i].erasesize;
1451
 
1452
                if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1453
                        i++;
1454
 
1455
                if (adr >> cfi->chipshift) {
1456
                        adr = 0;
1457
                        chipnum++;
1458
 
1459
                        if (chipnum >= cfi->numchips)
1460
                        break;
1461
                }
1462
        }
1463
 
1464
        return 0;
1465
}
1466
 
1467
 
1468
static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1469
{
1470
        struct cfi_private *cfi = map->fldrv_priv;
1471
        cfi_word status, status_OK;
1472
        unsigned long timeo;
1473
        int retries = 3;
1474
        DECLARE_WAITQUEUE(wait, current);
1475
        int ret = 0;
1476
 
1477
        adr += chip->start;
1478
 
1479
        /* Let's determine this according to the interleave only once */
1480
        status_OK = CMD(0x80);
1481
 
1482
        timeo = jiffies + HZ;
1483
retry:
1484
        spin_lock(chip->mutex);
1485
 
1486
        /* Check that the chip's ready to talk to us. */
1487
        switch (chip->state) {
1488
        case FL_CFI_QUERY:
1489
        case FL_JEDEC_QUERY:
1490
        case FL_READY:
1491
                cfi_write(map, CMD(0x70), adr);
1492
                chip->state = FL_STATUS;
1493
 
1494
        case FL_STATUS:
1495
                status = cfi_read(map, adr);
1496
                if ((status & status_OK) == status_OK)
1497
                        break;
1498
 
1499
                /* Urgh. Chip not yet ready to talk to us. */
1500
                if (time_after(jiffies, timeo)) {
1501
                        spin_unlock(chip->mutex);
1502
                        printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
1503
                        return -EIO;
1504
                }
1505
 
1506
                /* Latency issues. Drop the lock, wait a while and retry */
1507
                spin_unlock(chip->mutex);
1508
                cfi_udelay(1);
1509
                goto retry;
1510
 
1511
        default:
1512
                /* Stick ourselves on a wait queue to be woken when
1513
                   someone changes the status */
1514
                set_current_state(TASK_UNINTERRUPTIBLE);
1515
                add_wait_queue(&chip->wq, &wait);
1516
                spin_unlock(chip->mutex);
1517
                schedule();
1518
                remove_wait_queue(&chip->wq, &wait);
1519
                timeo = jiffies + HZ;
1520
                goto retry;
1521
        }
1522
 
1523
        ENABLE_VPP(map);
1524
        /* Clear the status register first */
1525
        cfi_write(map, CMD(0x50), adr);
1526
 
1527
        /* Now erase */
1528
        cfi_write(map, CMD(0x20), adr);
1529
        cfi_write(map, CMD(0xD0), adr);
1530
        chip->state = FL_ERASING;
1531
        chip->oldstate = 0;
1532
 
1533
        spin_unlock(chip->mutex);
1534
        set_current_state(TASK_UNINTERRUPTIBLE);
1535
        schedule_timeout((chip->erase_time*HZ)/(2*1000));
1536
        spin_lock(chip->mutex);
1537
 
1538
        /* FIXME. Use a timer to check this, and return immediately. */
1539
        /* Once the state machine's known to be working I'll do that */
1540
 
1541
        timeo = jiffies + (HZ*20);
1542
        for (;;) {
1543
                if (chip->state != FL_ERASING) {
1544
                        /* Someone's suspended the erase. Sleep */
1545
                        set_current_state(TASK_UNINTERRUPTIBLE);
1546
                        add_wait_queue(&chip->wq, &wait);
1547
                        spin_unlock(chip->mutex);
1548
                        schedule();
1549
                        remove_wait_queue(&chip->wq, &wait);
1550
                        spin_lock(chip->mutex);
1551
                        continue;
1552
                }
1553
                if (chip->oldstate) {
1554
                        /* This erase was suspended and resumed.
1555
                           Adjust the timeout */
1556
                        timeo = jiffies + (HZ*20); /* FIXME */
1557
                        chip->oldstate = 0;
1558
                }
1559
 
1560
                status = cfi_read(map, adr);
1561
                if ((status & status_OK) == status_OK)
1562
                        break;
1563
 
1564
                /* OK Still waiting */
1565
                if (time_after(jiffies, timeo)) {
1566
                        cfi_write(map, CMD(0x70), adr);
1567
                        chip->state = FL_STATUS;
1568
                        printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
1569
                               adr, (__u64)status, (__u64)cfi_read(map, adr));
1570
                        /* Clear status bits */
1571
                        cfi_write(map, CMD(0x50), adr);
1572
                        cfi_write(map, CMD(0x70), adr);
1573
                        DISABLE_VPP(map);
1574
                        spin_unlock(chip->mutex);
1575
                        return -EIO;
1576
                }
1577
 
1578
                /* Latency issues. Drop the lock, wait a while and retry */
1579
                spin_unlock(chip->mutex);
1580
                set_current_state(TASK_UNINTERRUPTIBLE);
1581
                schedule_timeout(1);
1582
                spin_lock(chip->mutex);
1583
        }
1584
 
1585
        DISABLE_VPP(map);
1586
        ret = 0;
1587
 
1588
        /* We've broken this before. It doesn't hurt to be safe */
1589
        cfi_write(map, CMD(0x70), adr);
1590
        chip->state = FL_STATUS;
1591
        status = cfi_read(map, adr);
1592
 
1593
        /* check for lock bit */
1594
        if (status & CMD(0x3a)) {
1595
                unsigned char chipstatus = status;
1596
                if (status != CMD(status & 0xff)) {
1597
                        int i;
1598
                        for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1599
                                      chipstatus |= status >> (cfi->device_type * 8);
1600
                        }
1601
                        printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
1602
                }
1603
                /* Reset the error bits */
1604
                cfi_write(map, CMD(0x50), adr);
1605
                cfi_write(map, CMD(0x70), adr);
1606
 
1607
                if ((chipstatus & 0x30) == 0x30) {
1608
                        printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
1609
                        ret = -EIO;
1610
                } else if (chipstatus & 0x02) {
1611
                        /* Protection bit set */
1612
                        ret = -EROFS;
1613
                } else if (chipstatus & 0x8) {
1614
                        /* Voltage */
1615
                        printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
1616
                        ret = -EIO;
1617
                } else if (chipstatus & 0x20) {
1618
                        if (retries--) {
1619
                                printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
1620
                                timeo = jiffies + HZ;
1621
                                chip->state = FL_STATUS;
1622
                                spin_unlock(chip->mutex);
1623
                                goto retry;
1624
                        }
1625
                        printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
1626
                        ret = -EIO;
1627
                }
1628
        }
1629
 
1630
        wake_up(&chip->wq);
1631
        spin_unlock(chip->mutex);
1632
        return ret;
1633
}
1634
 
1635
int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1636
{
1637
        unsigned long ofs, len;
1638
        int ret;
1639
 
1640
        ofs = instr->addr;
1641
        len = instr->len;
1642
 
1643
        ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
1644
        if (ret)
1645
                return ret;
1646
 
1647
        instr->state = MTD_ERASE_DONE;
1648
        if (instr->callback)
1649
                instr->callback(instr);
1650
 
1651
        return 0;
1652
}
1653
 
1654
static void cfi_intelext_sync (struct mtd_info *mtd)
1655
{
1656
        struct map_info *map = mtd->priv;
1657
        struct cfi_private *cfi = map->fldrv_priv;
1658
        int i;
1659
        struct flchip *chip;
1660
        int ret = 0;
1661
        DECLARE_WAITQUEUE(wait, current);
1662
 
1663
        for (i=0; !ret && i<cfi->numchips; i++) {
1664
                chip = &cfi->chips[i];
1665
 
1666
        retry:
1667
                spin_lock(chip->mutex);
1668
 
1669
                switch(chip->state) {
1670
                case FL_READY:
1671
                case FL_STATUS:
1672
                case FL_CFI_QUERY:
1673
                case FL_JEDEC_QUERY:
1674
                        chip->oldstate = chip->state;
1675
                        chip->state = FL_SYNCING;
1676
                        /* No need to wake_up() on this state change -
1677
                         * as the whole point is that nobody can do anything
1678
                         * with the chip now anyway.
1679
                         */
1680
                case FL_SYNCING:
1681
                        spin_unlock(chip->mutex);
1682
                        break;
1683
 
1684
                default:
1685
                        /* Not an idle state */
1686
                        add_wait_queue(&chip->wq, &wait);
1687
 
1688
                        spin_unlock(chip->mutex);
1689
                        schedule();
1690
                        remove_wait_queue(&chip->wq, &wait);
1691
 
1692
                        goto retry;
1693
                }
1694
        }
1695
 
1696
        /* Unlock the chips again */
1697
 
1698
        for (i--; i >=0; i--) {
1699
                chip = &cfi->chips[i];
1700
 
1701
                spin_lock(chip->mutex);
1702
 
1703
                if (chip->state == FL_SYNCING) {
1704
                        chip->state = chip->oldstate;
1705
                        wake_up(&chip->wq);
1706
                }
1707
                spin_unlock(chip->mutex);
1708
        }
1709
}
1710
 
1711
#ifdef DEBUG_LOCK_BITS
1712
static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1713
{
1714
        struct cfi_private *cfi = map->fldrv_priv;
1715
        int ofs_factor = cfi->interleave * cfi->device_type;
1716
 
1717
        cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1718
        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1719
               adr, cfi_read_query(map, adr+(2*ofs_factor)));
1720
        cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1721
 
1722
        return 0;
1723
}
1724
#endif
1725
 
1726
#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1727
#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1728
 
1729
static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1730
{
1731
        struct cfi_private *cfi = map->fldrv_priv;
1732
        cfi_word status, status_OK;
1733
        unsigned long timeo = jiffies + HZ;
1734
        DECLARE_WAITQUEUE(wait, current);
1735
 
1736
        adr += chip->start;
1737
 
1738
        /* Let's determine this according to the interleave only once */
1739
        status_OK = CMD(0x80);
1740
 
1741
        timeo = jiffies + HZ;
1742
retry:
1743
        spin_lock(chip->mutex);
1744
 
1745
        /* Check that the chip's ready to talk to us. */
1746
        switch (chip->state) {
1747
        case FL_CFI_QUERY:
1748
        case FL_JEDEC_QUERY:
1749
        case FL_READY:
1750
                cfi_write(map, CMD(0x70), adr);
1751
                chip->state = FL_STATUS;
1752
 
1753
        case FL_STATUS:
1754
                status = cfi_read(map, adr);
1755
                if ((status & status_OK) == status_OK)
1756
                        break;
1757
 
1758
                /* Urgh. Chip not yet ready to talk to us. */
1759
                if (time_after(jiffies, timeo)) {
1760
                        spin_unlock(chip->mutex);
1761
                        printk(KERN_ERR "%s: waiting for chip to be ready timed out\n", __FUNCTION__);
1762
                        return -EIO;
1763
                }
1764
 
1765
                /* Latency issues. Drop the lock, wait a while and retry */
1766
                spin_unlock(chip->mutex);
1767
                cfi_udelay(1);
1768
                goto retry;
1769
 
1770
        default:
1771
                /* Stick ourselves on a wait queue to be woken when
1772
                   someone changes the status */
1773
                set_current_state(TASK_UNINTERRUPTIBLE);
1774
                add_wait_queue(&chip->wq, &wait);
1775
                spin_unlock(chip->mutex);
1776
                schedule();
1777
                remove_wait_queue(&chip->wq, &wait);
1778
                timeo = jiffies + HZ;
1779
                goto retry;
1780
        }
1781
 
1782
        ENABLE_VPP(map);
1783
        cfi_write(map, CMD(0x60), adr);
1784
 
1785
        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1786
                cfi_write(map, CMD(0x01), adr);
1787
                chip->state = FL_LOCKING;
1788
        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1789
                cfi_write(map, CMD(0xD0), adr);
1790
                chip->state = FL_UNLOCKING;
1791
        } else
1792
                BUG();
1793
 
1794
        spin_unlock(chip->mutex);
1795
        schedule_timeout(HZ);
1796
        spin_lock(chip->mutex);
1797
 
1798
        /* FIXME. Use a timer to check this, and return immediately. */
1799
        /* Once the state machine's known to be working I'll do that */
1800
 
1801
        timeo = jiffies + (HZ*20);
1802
        for (;;) {
1803
 
1804
                status = cfi_read(map, adr);
1805
                if ((status & status_OK) == status_OK)
1806
                        break;
1807
 
1808
                /* OK Still waiting */
1809
                if (time_after(jiffies, timeo)) {
1810
                        cfi_write(map, CMD(0x70), adr);
1811
                        chip->state = FL_STATUS;
1812
                        printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
1813
                        DISABLE_VPP(map);
1814
                        spin_unlock(chip->mutex);
1815
                        return -EIO;
1816
                }
1817
 
1818
                /* Latency issues. Drop the lock, wait a while and retry */
1819
                spin_unlock(chip->mutex);
1820
                cfi_udelay(1);
1821
                spin_lock(chip->mutex);
1822
        }
1823
 
1824
        /* Done and happy. */
1825
        chip->state = FL_STATUS;
1826
        DISABLE_VPP(map);
1827
        wake_up(&chip->wq);
1828
        spin_unlock(chip->mutex);
1829
        return 0;
1830
}
1831
 
1832
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1833
{
1834
        int ret;
1835
 
1836
#ifdef DEBUG_LOCK_BITS
1837
        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1838
               __FUNCTION__, ofs, len);
1839
        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1840
                                  ofs, len, 0);
1841
#endif
1842
 
1843
        ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1844
                                        ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1845
 
1846
#ifdef DEBUG_LOCK_BITS
1847
        printk(KERN_DEBUG __FUNCTION__
1848
               "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1849
        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1850
                                  ofs, len, 0);
1851
#endif
1852
 
1853
        return ret;
1854
}
1855
 
1856
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1857
{
1858
        int ret;
1859
 
1860
#ifdef DEBUG_LOCK_BITS
1861
        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1862
               __FUNCTION__, ofs, len);
1863
        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1864
                                  ofs, len, 0);
1865
#endif
1866
 
1867
        ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1868
                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1869
 
1870
#ifdef DEBUG_LOCK_BITS
1871
        printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1872
        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1873
                                  ofs, len, 0);
1874
#endif
1875
 
1876
        return ret;
1877
}
1878
 
1879
static int cfi_intelext_suspend(struct mtd_info *mtd)
1880
{
1881
        struct map_info *map = mtd->priv;
1882
        struct cfi_private *cfi = map->fldrv_priv;
1883
        int i;
1884
        struct flchip *chip;
1885
        int ret = 0;
1886
 
1887
        for (i=0; !ret && i<cfi->numchips; i++) {
1888
                chip = &cfi->chips[i];
1889
 
1890
                spin_lock(chip->mutex);
1891
 
1892
                switch(chip->state) {
1893
                case FL_READY:
1894
                case FL_STATUS:
1895
                case FL_CFI_QUERY:
1896
                case FL_JEDEC_QUERY:
1897
                        chip->oldstate = chip->state;
1898
                        chip->state = FL_PM_SUSPENDED;
1899
                        /* No need to wake_up() on this state change -
1900
                         * as the whole point is that nobody can do anything
1901
                         * with the chip now anyway.
1902
                         */
1903
                case FL_PM_SUSPENDED:
1904
                        break;
1905
 
1906
                default:
1907
                        ret = -EAGAIN;
1908
                        break;
1909
                }
1910
                spin_unlock(chip->mutex);
1911
        }
1912
 
1913
        /* Unlock the chips again */
1914
 
1915
        if (ret) {
1916
                for (i--; i >=0; i--) {
1917
                        chip = &cfi->chips[i];
1918
 
1919
                        spin_lock(chip->mutex);
1920
 
1921
                        if (chip->state == FL_PM_SUSPENDED) {
1922
                                /* No need to force it into a known state here,
1923
                                   because we're returning failure, and it didn't
1924
                                   get power cycled */
1925
                                chip->state = chip->oldstate;
1926
                                wake_up(&chip->wq);
1927
                        }
1928
                        spin_unlock(chip->mutex);
1929
                }
1930
        }
1931
 
1932
        return ret;
1933
}
1934
 
1935
static void cfi_intelext_resume(struct mtd_info *mtd)
1936
{
1937
        struct map_info *map = mtd->priv;
1938
        struct cfi_private *cfi = map->fldrv_priv;
1939
        int i;
1940
        struct flchip *chip;
1941
 
1942
        for (i=0; i<cfi->numchips; i++) {
1943
 
1944
                chip = &cfi->chips[i];
1945
 
1946
                spin_lock(chip->mutex);
1947
 
1948
                /* Go to known state. Chip may have been power cycled */
1949
                if (chip->state == FL_PM_SUSPENDED) {
1950
                        cfi_write(map, CMD(0xFF), 0);
1951
                        chip->state = FL_READY;
1952
                        wake_up(&chip->wq);
1953
                }
1954
 
1955
                spin_unlock(chip->mutex);
1956
        }
1957
}
1958
 
1959
static void cfi_intelext_destroy(struct mtd_info *mtd)
1960
{
1961
        struct map_info *map = mtd->priv;
1962
        struct cfi_private *cfi = map->fldrv_priv;
1963
        kfree(cfi->cmdset_priv);
1964
        kfree(cfi->cfiq);
1965
        kfree(cfi);
1966
        kfree(mtd->eraseregions);
1967
}
1968
 
1969
static char im_name_1[]="cfi_cmdset_0001";
1970
static char im_name_3[]="cfi_cmdset_0003";
1971
 
1972
int __init cfi_intelext_init(void)
1973
{
1974
        inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1975
        inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1976
        return 0;
1977
}
1978
 
1979
static void __exit cfi_intelext_exit(void)
1980
{
1981
        inter_module_unregister(im_name_1);
1982
        inter_module_unregister(im_name_3);
1983
}
1984
 
1985
module_init(cfi_intelext_init);
1986
module_exit(cfi_intelext_exit);
1987
 
1988
MODULE_LICENSE("GPL");
1989
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1990
MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.