OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [mtd/] [chips/] [amd_flash.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * MTD map driver for AMD compatible flash chips (non-CFI)
3
 *
4
 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5
 *
6
 * $Id: amd_flash.c,v 1.1.1.1 2004-04-15 01:52:08 phoenix Exp $
7
 *
8
 * Copyright (c) 2001 Axis Communications AB
9
 *
10
 * This file is under GPL.
11
 *
12
 */
13
 
14
#include <linux/module.h>
15
#include <linux/types.h>
16
#include <linux/kernel.h>
17
#include <linux/sched.h>
18
#include <linux/errno.h>
19
#include <linux/slab.h>
20
#include <linux/delay.h>
21
#include <linux/interrupt.h>
22
#include <linux/mtd/map.h>
23
#include <linux/mtd/mtd.h>
24
#include <linux/mtd/flashchip.h>
25
 
26
/* There's no limit. It exists only to avoid realloc. */
27
#define MAX_AMD_CHIPS 8
28
 
29
#define DEVICE_TYPE_X8  (8 / 8)
30
#define DEVICE_TYPE_X16 (16 / 8)
31
#define DEVICE_TYPE_X32 (32 / 8)
32
 
33
/* Addresses */
34
#define ADDR_MANUFACTURER               0x0000
35
#define ADDR_DEVICE_ID                  0x0001
36
#define ADDR_SECTOR_LOCK                0x0002
37
#define ADDR_HANDSHAKE                  0x0003
38
#define ADDR_UNLOCK_1                   0x0555
39
#define ADDR_UNLOCK_2                   0x02AA
40
 
41
/* Commands */
42
#define CMD_UNLOCK_DATA_1               0x00AA
43
#define CMD_UNLOCK_DATA_2               0x0055
44
#define CMD_MANUFACTURER_UNLOCK_DATA    0x0090
45
#define CMD_UNLOCK_BYPASS_MODE          0x0020
46
#define CMD_PROGRAM_UNLOCK_DATA         0x00A0
47
#define CMD_RESET_DATA                  0x00F0
48
#define CMD_SECTOR_ERASE_UNLOCK_DATA    0x0080
49
#define CMD_SECTOR_ERASE_UNLOCK_DATA_2  0x0030
50
 
51
#define CMD_UNLOCK_SECTOR               0x0060
52
 
53
/* Manufacturers */
54
#define MANUFACTURER_AMD        0x0001
55
#define MANUFACTURER_ATMEL      0x001F
56
#define MANUFACTURER_FUJITSU    0x0004
57
#define MANUFACTURER_ST         0x0020
58
#define MANUFACTURER_SST        0x00BF
59
#define MANUFACTURER_TOSHIBA    0x0098
60
 
61
/* AMD */
62
#define AM29F800BB      0x2258
63
#define AM29F800BT      0x22D6
64
#define AM29LV800BB     0x225B
65
#define AM29LV800BT     0x22DA
66
#define AM29LV160DT     0x22C4
67
#define AM29LV160DB     0x2249
68
#define AM29BDS323D     0x22D1
69
#define AM29BDS643D     0x227E
70
 
71
/* Atmel */
72
#define AT49xV16x       0x00C0
73
#define AT49xV16xT      0x00C2
74
 
75
/* Fujitsu */
76
#define MBM29LV160TE    0x22C4
77
#define MBM29LV160BE    0x2249
78
#define MBM29LV800BB    0x225B
79
 
80
/* ST - www.st.com */
81
#define M29W800T        0x00D7
82
#define M29W160DT       0x22C4
83
#define M29W160DB       0x2249
84
 
85
/* SST */
86
#define SST39LF800      0x2781
87
#define SST39LF160      0x2782
88
 
89
/* Toshiba */
90
#define TC58FVT160      0x00C2
91
#define TC58FVB160      0x0043
92
 
93
#define D6_MASK 0x40
94
 
95
struct amd_flash_private {
96
        int device_type;
97
        int interleave;
98
        int numchips;
99
        unsigned long chipshift;
100
//      const char *im_name;
101
        struct flchip chips[0];
102
};
103
 
104
struct amd_flash_info {
105
        const __u16 mfr_id;
106
        const __u16 dev_id;
107
        const char *name;
108
        const u_long size;
109
        const int numeraseregions;
110
        const struct mtd_erase_region_info regions[4];
111
};
112
 
113
 
114
 
115
static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
116
                          u_char *);
117
static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
118
                           const u_char *);
119
static int amd_flash_erase(struct mtd_info *, struct erase_info *);
120
static void amd_flash_sync(struct mtd_info *);
121
static int amd_flash_suspend(struct mtd_info *);
122
static void amd_flash_resume(struct mtd_info *);
123
static void amd_flash_destroy(struct mtd_info *);
124
static struct mtd_info *amd_flash_probe(struct map_info *map);
125
 
126
 
127
static struct mtd_chip_driver amd_flash_chipdrv = {
128
        probe: amd_flash_probe,
129
        destroy: amd_flash_destroy,
130
        name: "amd_flash",
131
        module: THIS_MODULE
132
};
133
 
134
 
135
 
136
static const char im_name[] = "amd_flash";
137
 
138
 
139
 
140
static inline __u32 wide_read(struct map_info *map, __u32 addr)
141
{
142
        if (map->buswidth == 1) {
143
                return map->read8(map, addr);
144
        } else if (map->buswidth == 2) {
145
                return map->read16(map, addr);
146
        } else if (map->buswidth == 4) {
147
                return map->read32(map, addr);
148
        }
149
 
150
        return 0;
151
}
152
 
153
static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
154
{
155
        if (map->buswidth == 1) {
156
                map->write8(map, val, addr);
157
        } else if (map->buswidth == 2) {
158
                map->write16(map, val, addr);
159
        } else if (map->buswidth == 4) {
160
                map->write32(map, val, addr);
161
        }
162
}
163
 
164
static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
165
{
166
        const struct amd_flash_private *private = map->fldrv_priv;
167
        if ((private->interleave == 2) &&
168
            (private->device_type == DEVICE_TYPE_X16)) {
169
                cmd |= (cmd << 16);
170
        }
171
 
172
        return cmd;
173
}
174
 
175
static inline void send_unlock(struct map_info *map, unsigned long base)
176
{
177
        wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
178
                   base + (map->buswidth * ADDR_UNLOCK_1));
179
        wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
180
                   base + (map->buswidth * ADDR_UNLOCK_2));
181
}
182
 
183
static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
184
{
185
        send_unlock(map, base);
186
        wide_write(map, make_cmd(map, cmd),
187
                   base + (map->buswidth * ADDR_UNLOCK_1));
188
}
189
 
190
static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
191
                                    __u32 cmd, unsigned long addr)
192
{
193
        send_unlock(map, base);
194
        wide_write(map, make_cmd(map, cmd), addr);
195
}
196
 
197
static inline int flash_is_busy(struct map_info *map, unsigned long addr,
198
                                int interleave)
199
{
200
 
201
        if ((interleave == 2) && (map->buswidth == 4)) {
202
                __u32 read1, read2;
203
 
204
                read1 = wide_read(map, addr);
205
                read2 = wide_read(map, addr);
206
 
207
                return (((read1 >> 16) & D6_MASK) !=
208
                        ((read2 >> 16) & D6_MASK)) ||
209
                       (((read1 & 0xffff) & D6_MASK) !=
210
                        ((read2 & 0xffff) & D6_MASK));
211
        }
212
 
213
        return ((wide_read(map, addr) & D6_MASK) !=
214
                (wide_read(map, addr) & D6_MASK));
215
}
216
 
217
static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
218
                                 int unlock)
219
{
220
        /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
221
        int SLA = unlock ?
222
                (sect_addr |  (0x40 * map->buswidth)) :
223
                (sect_addr & ~(0x40 * map->buswidth)) ;
224
 
225
        __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
226
 
227
        wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
228
        wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
229
        wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
230
        wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
231
}
232
 
233
static inline int is_sector_locked(struct map_info *map,
234
                                   unsigned long sect_addr)
235
{
236
        int status;
237
 
238
        wide_write(map, CMD_RESET_DATA, 0);
239
        send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
240
 
241
        /* status is 0x0000 for unlocked and 0x0001 for locked */
242
        status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
243
        wide_write(map, CMD_RESET_DATA, 0);
244
        return status;
245
}
246
 
247
static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
248
                               int is_unlock)
249
{
250
        struct map_info *map;
251
        struct mtd_erase_region_info *merip;
252
        int eraseoffset, erasesize, eraseblocks;
253
        int i;
254
        int retval = 0;
255
        int lock_status;
256
 
257
        map = mtd->priv;
258
 
259
        /* Pass the whole chip through sector by sector and check for each
260
           sector if the sector and the given interval overlap */
261
        for(i = 0; i < mtd->numeraseregions; i++) {
262
                merip = &mtd->eraseregions[i];
263
 
264
                eraseoffset = merip->offset;
265
                erasesize = merip->erasesize;
266
                eraseblocks = merip->numblocks;
267
 
268
                if (ofs > eraseoffset + erasesize)
269
                        continue;
270
 
271
                while (eraseblocks > 0) {
272
                        if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
273
                                unlock_sector(map, eraseoffset, is_unlock);
274
 
275
                                lock_status = is_sector_locked(map, eraseoffset);
276
 
277
                                if (is_unlock && lock_status) {
278
                                        printk("Cannot unlock sector at address %x length %xx\n",
279
                                               eraseoffset, merip->erasesize);
280
                                        retval = -1;
281
                                } else if (!is_unlock && !lock_status) {
282
                                        printk("Cannot lock sector at address %x length %x\n",
283
                                               eraseoffset, merip->erasesize);
284
                                        retval = -1;
285
                                }
286
                        }
287
                        eraseoffset += erasesize;
288
                        eraseblocks --;
289
                }
290
        }
291
        return retval;
292
}
293
 
294
static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
295
{
296
        return amd_flash_do_unlock(mtd, ofs, len, 1);
297
}
298
 
299
static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
300
{
301
        return amd_flash_do_unlock(mtd, ofs, len, 0);
302
}
303
 
304
 
305
/*
306
 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
307
 * matching table entry (-1 if not found or alias for already found chip).
308
 */
309
static int probe_new_chip(struct mtd_info *mtd, __u32 base,
310
                          struct flchip *chips,
311
                          struct amd_flash_private *private,
312
                          const struct amd_flash_info *table, int table_size)
313
{
314
        __u32 mfr_id;
315
        __u32 dev_id;
316
        struct map_info *map = mtd->priv;
317
        struct amd_flash_private temp;
318
        int i;
319
 
320
        temp.device_type = DEVICE_TYPE_X16;     // Assume X16 (FIXME)
321
        temp.interleave = 2;
322
        map->fldrv_priv = &temp;
323
 
324
        /* Enter autoselect mode. */
325
        send_cmd(map, base, CMD_RESET_DATA);
326
        send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
327
 
328
        mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
329
        dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
330
 
331
        if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
332
            ((dev_id >> 16) == (dev_id & 0xffff))) {
333
                mfr_id &= 0xffff;
334
                dev_id &= 0xffff;
335
        } else {
336
                temp.interleave = 1;
337
        }
338
 
339
        for (i = 0; i < table_size; i++) {
340
                if ((mfr_id == table[i].mfr_id) &&
341
                    (dev_id == table[i].dev_id)) {
342
                        if (chips) {
343
                                int j;
344
 
345
                                /* Is this an alias for an already found chip?
346
                                 * In that case that chip should be in
347
                                 * autoselect mode now.
348
                                 */
349
                                for (j = 0; j < private->numchips; j++) {
350
                                        __u32 mfr_id_other;
351
                                        __u32 dev_id_other;
352
 
353
                                        mfr_id_other =
354
                                                wide_read(map, chips[j].start +
355
                                                               (map->buswidth *
356
                                                                ADDR_MANUFACTURER
357
                                                               ));
358
                                        dev_id_other =
359
                                                wide_read(map, chips[j].start +
360
                                                               (map->buswidth *
361
                                                                ADDR_DEVICE_ID));
362
                                        if (temp.interleave == 2) {
363
                                                mfr_id_other &= 0xffff;
364
                                                dev_id_other &= 0xffff;
365
                                        }
366
                                        if ((mfr_id_other == mfr_id) &&
367
                                            (dev_id_other == dev_id)) {
368
 
369
                                                /* Exit autoselect mode. */
370
                                                send_cmd(map, base,
371
                                                         CMD_RESET_DATA);
372
 
373
                                                return -1;
374
                                        }
375
                                }
376
 
377
                                if (private->numchips == MAX_AMD_CHIPS) {
378
                                        printk(KERN_WARNING
379
                                               "%s: Too many flash chips "
380
                                               "detected. Increase "
381
                                               "MAX_AMD_CHIPS from %d.\n",
382
                                               map->name, MAX_AMD_CHIPS);
383
 
384
                                        return -1;
385
                                }
386
 
387
                                chips[private->numchips].start = base;
388
                                chips[private->numchips].state = FL_READY;
389
                                chips[private->numchips].mutex =
390
                                        &chips[private->numchips]._spinlock;
391
                                private->numchips++;
392
                        }
393
 
394
                        printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
395
                               temp.interleave, (table[i].size)/(1024*1024),
396
                               table[i].name, base);
397
 
398
                        mtd->size += table[i].size * temp.interleave;
399
                        mtd->numeraseregions += table[i].numeraseregions;
400
 
401
                        break;
402
                }
403
        }
404
 
405
        /* Exit autoselect mode. */
406
        send_cmd(map, base, CMD_RESET_DATA);
407
 
408
        if (i == table_size) {
409
                printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
410
                       "mfr id 0x%x, dev id 0x%x\n", map->name,
411
                       base, mfr_id, dev_id);
412
                map->fldrv_priv = NULL;
413
 
414
                return -1;
415
        }
416
 
417
        private->device_type = temp.device_type;
418
        private->interleave = temp.interleave;
419
 
420
        return i;
421
}
422
 
423
 
424
 
425
static struct mtd_info *amd_flash_probe(struct map_info *map)
426
{
427
        /* Keep this table on the stack so that it gets deallocated after the
428
         * probe is done.
429
         */
430
        const struct amd_flash_info table[] = {
431
        {
432
                mfr_id: MANUFACTURER_AMD,
433
                dev_id: AM29LV160DT,
434
                name: "AMD AM29LV160DT",
435
                size: 0x00200000,
436
                numeraseregions: 4,
437
                regions: {
438
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
439
                        { offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
440
                        { offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
441
                        { offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
442
                }
443
        }, {
444
                mfr_id: MANUFACTURER_AMD,
445
                dev_id: AM29LV160DB,
446
                name: "AMD AM29LV160DB",
447
                size: 0x00200000,
448
                numeraseregions: 4,
449
                regions: {
450
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
451
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
452
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
453
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
454
                }
455
        }, {
456
                mfr_id: MANUFACTURER_TOSHIBA,
457
                dev_id: TC58FVT160,
458
                name: "Toshiba TC58FVT160",
459
                size: 0x00200000,
460
                numeraseregions: 4,
461
                regions: {
462
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
463
                        { offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
464
                        { offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
465
                        { offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
466
                }
467
        }, {
468
                mfr_id: MANUFACTURER_FUJITSU,
469
                dev_id: MBM29LV160TE,
470
                name: "Fujitsu MBM29LV160TE",
471
                size: 0x00200000,
472
                numeraseregions: 4,
473
                regions: {
474
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
475
                        { offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
476
                        { offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
477
                        { offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
478
                }
479
        }, {
480
                mfr_id: MANUFACTURER_TOSHIBA,
481
                dev_id: TC58FVB160,
482
                name: "Toshiba TC58FVB160",
483
                size: 0x00200000,
484
                numeraseregions: 4,
485
                regions: {
486
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
487
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
488
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
489
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
490
                }
491
        }, {
492
                mfr_id: MANUFACTURER_FUJITSU,
493
                dev_id: MBM29LV160BE,
494
                name: "Fujitsu MBM29LV160BE",
495
                size: 0x00200000,
496
                numeraseregions: 4,
497
                regions: {
498
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
499
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
500
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
501
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
502
                }
503
        }, {
504
                mfr_id: MANUFACTURER_AMD,
505
                dev_id: AM29LV800BB,
506
                name: "AMD AM29LV800BB",
507
                size: 0x00100000,
508
                numeraseregions: 4,
509
                regions: {
510
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
511
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
512
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
513
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
514
                }
515
        }, {
516
                mfr_id: MANUFACTURER_AMD,
517
                dev_id: AM29F800BB,
518
                name: "AMD AM29F800BB",
519
                size: 0x00100000,
520
                numeraseregions: 4,
521
                regions: {
522
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
523
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
524
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
525
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
526
                }
527
        }, {
528
                mfr_id: MANUFACTURER_AMD,
529
                dev_id: AM29LV800BT,
530
                name: "AMD AM29LV800BT",
531
                size: 0x00100000,
532
                numeraseregions: 4,
533
                regions: {
534
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
535
                        { offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
536
                        { offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
537
                        { offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
538
                }
539
        }, {
540
                mfr_id: MANUFACTURER_AMD,
541
                dev_id: AM29F800BT,
542
                name: "AMD AM29F800BT",
543
                size: 0x00100000,
544
                numeraseregions: 4,
545
                regions: {
546
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
547
                        { offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
548
                        { offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
549
                        { offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
550
                }
551
        }, {
552
                mfr_id: MANUFACTURER_AMD,
553
                dev_id: AM29LV800BB,
554
                name: "AMD AM29LV800BB",
555
                size: 0x00100000,
556
                numeraseregions: 4,
557
                regions: {
558
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
559
                        { offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
560
                        { offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
561
                        { offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
562
                }
563
        }, {
564
                mfr_id: MANUFACTURER_FUJITSU,
565
                dev_id: MBM29LV800BB,
566
                name: "Fujitsu MBM29LV800BB",
567
                size: 0x00100000,
568
                numeraseregions: 4,
569
                regions: {
570
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
571
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
572
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
573
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
574
                }
575
        }, {
576
                mfr_id: MANUFACTURER_ST,
577
                dev_id: M29W800T,
578
                name: "ST M29W800T",
579
                size: 0x00100000,
580
                numeraseregions: 4,
581
                regions: {
582
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
583
                        { offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
584
                        { offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
585
                        { offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
586
                }
587
        }, {
588
                mfr_id: MANUFACTURER_ST,
589
                dev_id: M29W160DT,
590
                name: "ST M29W160DT",
591
                size: 0x00200000,
592
                numeraseregions: 4,
593
                regions: {
594
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
595
                        { offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
596
                        { offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
597
                        { offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
598
                }
599
        }, {
600
                mfr_id: MANUFACTURER_ST,
601
                dev_id: M29W160DB,
602
                name: "ST M29W160DB",
603
                size: 0x00200000,
604
                numeraseregions: 4,
605
                regions: {
606
                        { offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
607
                        { offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
608
                        { offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
609
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
610
                }
611
        }, {
612
                mfr_id: MANUFACTURER_AMD,
613
                dev_id: AM29BDS323D,
614
                name: "AMD AM29BDS323D",
615
                size: 0x00400000,
616
                numeraseregions: 3,
617
                regions: {
618
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 48 },
619
                        { offset: 0x300000, erasesize: 0x10000, numblocks: 15 },
620
                        { offset: 0x3f0000, erasesize: 0x02000, numblocks:  8 },
621
                }
622
        }, {
623
                mfr_id: MANUFACTURER_AMD,
624
                dev_id: AM29BDS643D,
625
                name: "AMD AM29BDS643D",
626
                size: 0x00800000,
627
                numeraseregions: 3,
628
                regions: {
629
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 96 },
630
                        { offset: 0x600000, erasesize: 0x10000, numblocks: 31 },
631
                        { offset: 0x7f0000, erasesize: 0x02000, numblocks:  8 },
632
                }
633
        }, {
634
                mfr_id: MANUFACTURER_ATMEL,
635
                dev_id: AT49xV16x,
636
                name: "Atmel AT49xV16x",
637
                size: 0x00200000,
638
                numeraseregions: 2,
639
                regions: {
640
                        { offset: 0x000000, erasesize: 0x02000, numblocks:  8 },
641
                        { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
642
                }
643
        }, {
644
                mfr_id: MANUFACTURER_ATMEL,
645
                dev_id: AT49xV16xT,
646
                name: "Atmel AT49xV16xT",
647
                size: 0x00200000,
648
                numeraseregions: 2,
649
                regions: {
650
                        { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
651
                        { offset: 0x1F0000, erasesize: 0x02000, numblocks:  8 }
652
                }
653
        }
654
        };
655
 
656
        struct mtd_info *mtd;
657
        struct flchip chips[MAX_AMD_CHIPS];
658
        int table_pos[MAX_AMD_CHIPS];
659
        struct amd_flash_private temp;
660
        struct amd_flash_private *private;
661
        u_long size;
662
        unsigned long base;
663
        int i;
664
        int reg_idx;
665
        int offset;
666
 
667
        mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
668
        if (!mtd) {
669
                printk(KERN_WARNING
670
                       "%s: kmalloc failed for info structure\n", map->name);
671
                return NULL;
672
        }
673
        memset(mtd, 0, sizeof(*mtd));
674
        mtd->priv = map;
675
 
676
        memset(&temp, 0, sizeof(temp));
677
 
678
        printk("%s: Probing for AMD compatible flash...\n", map->name);
679
 
680
        if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
681
                                           sizeof(table)/sizeof(table[0])))
682
            == -1) {
683
                printk(KERN_WARNING
684
                       "%s: Found no AMD compatible device at location zero\n",
685
                       map->name);
686
                kfree(mtd);
687
 
688
                return NULL;
689
        }
690
 
691
        chips[0].start = 0;
692
        chips[0].state = FL_READY;
693
        chips[0].mutex = &chips[0]._spinlock;
694
        temp.numchips = 1;
695
        for (size = mtd->size; size > 1; size >>= 1) {
696
                temp.chipshift++;
697
        }
698
        switch (temp.interleave) {
699
                case 2:
700
                        temp.chipshift += 1;
701
                        break;
702
                case 4:
703
                        temp.chipshift += 2;
704
                        break;
705
        }
706
 
707
        /* Find out if there are any more chips in the map. */
708
        for (base = (1 << temp.chipshift);
709
             base < map->size;
710
             base += (1 << temp.chipshift)) {
711
                int numchips = temp.numchips;
712
                table_pos[numchips] = probe_new_chip(mtd, base, chips,
713
                        &temp, table, sizeof(table)/sizeof(table[0]));
714
        }
715
 
716
        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
717
                                    mtd->numeraseregions, GFP_KERNEL);
718
        if (!mtd->eraseregions) {
719
                printk(KERN_WARNING "%s: Failed to allocate "
720
                       "memory for MTD erase region info\n", map->name);
721
                kfree(mtd);
722
                map->fldrv_priv = NULL;
723
                return 0;
724
        }
725
 
726
        reg_idx = 0;
727
        offset = 0;
728
        for (i = 0; i < temp.numchips; i++) {
729
                int dev_size;
730
                int j;
731
 
732
                dev_size = 0;
733
                for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
734
                        mtd->eraseregions[reg_idx].offset = offset +
735
                                (table[table_pos[i]].regions[j].offset *
736
                                 temp.interleave);
737
                        mtd->eraseregions[reg_idx].erasesize =
738
                                table[table_pos[i]].regions[j].erasesize *
739
                                temp.interleave;
740
                        mtd->eraseregions[reg_idx].numblocks =
741
                                table[table_pos[i]].regions[j].numblocks;
742
                        if (mtd->erasesize <
743
                            mtd->eraseregions[reg_idx].erasesize) {
744
                                mtd->erasesize =
745
                                        mtd->eraseregions[reg_idx].erasesize;
746
                        }
747
                        dev_size += mtd->eraseregions[reg_idx].erasesize *
748
                                    mtd->eraseregions[reg_idx].numblocks;
749
                        reg_idx++;
750
                }
751
                offset += dev_size;
752
        }
753
        mtd->type = MTD_NORFLASH;
754
        mtd->flags = MTD_CAP_NORFLASH;
755
        mtd->name = map->name;
756
        mtd->erase = amd_flash_erase;
757
        mtd->read = amd_flash_read;
758
        mtd->write = amd_flash_write;
759
        mtd->sync = amd_flash_sync;
760
        mtd->suspend = amd_flash_suspend;
761
        mtd->resume = amd_flash_resume;
762
        mtd->lock = amd_flash_lock;
763
        mtd->unlock = amd_flash_unlock;
764
 
765
        private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
766
                                              temp.numchips), GFP_KERNEL);
767
        if (!private) {
768
                printk(KERN_WARNING
769
                       "%s: kmalloc failed for private structure\n", map->name);
770
                kfree(mtd);
771
                map->fldrv_priv = NULL;
772
                return NULL;
773
        }
774
        memcpy(private, &temp, sizeof(temp));
775
        memcpy(private->chips, chips,
776
               sizeof(struct flchip) * private->numchips);
777
        for (i = 0; i < private->numchips; i++) {
778
                init_waitqueue_head(&private->chips[i].wq);
779
                spin_lock_init(&private->chips[i]._spinlock);
780
        }
781
 
782
        map->fldrv_priv = private;
783
 
784
        map->fldrv = &amd_flash_chipdrv;
785
        MOD_INC_USE_COUNT;
786
 
787
        return mtd;
788
}
789
 
790
 
791
 
792
static inline int read_one_chip(struct map_info *map, struct flchip *chip,
793
                               loff_t adr, size_t len, u_char *buf)
794
{
795
        DECLARE_WAITQUEUE(wait, current);
796
        unsigned long timeo = jiffies + HZ;
797
 
798
retry:
799
        spin_lock_bh(chip->mutex);
800
 
801
        if (chip->state != FL_READY){
802
                printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
803
                       map->name, chip->state);
804
                set_current_state(TASK_UNINTERRUPTIBLE);
805
                add_wait_queue(&chip->wq, &wait);
806
 
807
                spin_unlock_bh(chip->mutex);
808
 
809
                schedule();
810
                remove_wait_queue(&chip->wq, &wait);
811
 
812
                if(signal_pending(current)) {
813
                        return -EINTR;
814
                }
815
 
816
                timeo = jiffies + HZ;
817
 
818
                goto retry;
819
        }
820
 
821
        adr += chip->start;
822
 
823
        chip->state = FL_READY;
824
 
825
        map->copy_from(map, buf, adr, len);
826
 
827
        wake_up(&chip->wq);
828
        spin_unlock_bh(chip->mutex);
829
 
830
        return 0;
831
}
832
 
833
 
834
 
835
static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
836
                          size_t *retlen, u_char *buf)
837
{
838
        struct map_info *map = mtd->priv;
839
        struct amd_flash_private *private = map->fldrv_priv;
840
        unsigned long ofs;
841
        int chipnum;
842
        int ret = 0;
843
 
844
        if ((from + len) > mtd->size) {
845
                printk(KERN_WARNING "%s: read request past end of device "
846
                       "(0x%lx)\n", map->name, (unsigned long)from + len);
847
 
848
                return -EINVAL;
849
        }
850
 
851
        /* Offset within the first chip that the first read should start. */
852
        chipnum = (from >> private->chipshift);
853
        ofs = from - (chipnum <<  private->chipshift);
854
 
855
        *retlen = 0;
856
 
857
        while (len) {
858
                unsigned long this_len;
859
 
860
                if (chipnum >= private->numchips) {
861
                        break;
862
                }
863
 
864
                if ((len + ofs - 1) >> private->chipshift) {
865
                        this_len = (1 << private->chipshift) - ofs;
866
                } else {
867
                        this_len = len;
868
                }
869
 
870
                ret = read_one_chip(map, &private->chips[chipnum], ofs,
871
                                    this_len, buf);
872
                if (ret) {
873
                        break;
874
                }
875
 
876
                *retlen += this_len;
877
                len -= this_len;
878
                buf += this_len;
879
 
880
                ofs = 0;
881
                chipnum++;
882
        }
883
 
884
        return ret;
885
}
886
 
887
 
888
 
889
static int write_one_word(struct map_info *map, struct flchip *chip,
890
                          unsigned long adr, __u32 datum)
891
{
892
        unsigned long timeo = jiffies + HZ;
893
        struct amd_flash_private *private = map->fldrv_priv;
894
        DECLARE_WAITQUEUE(wait, current);
895
        int ret = 0;
896
        int times_left;
897
 
898
retry:
899
        spin_lock_bh(chip->mutex);
900
 
901
        if (chip->state != FL_READY){
902
                printk("%s: waiting for chip to write, state = %d\n",
903
                       map->name, chip->state);
904
                set_current_state(TASK_UNINTERRUPTIBLE);
905
                add_wait_queue(&chip->wq, &wait);
906
 
907
                spin_unlock_bh(chip->mutex);
908
 
909
                schedule();
910
                remove_wait_queue(&chip->wq, &wait);
911
                printk(KERN_INFO "%s: woke up to write\n", map->name);
912
                if(signal_pending(current))
913
                        return -EINTR;
914
 
915
                timeo = jiffies + HZ;
916
 
917
                goto retry;
918
        }
919
 
920
        chip->state = FL_WRITING;
921
 
922
        adr += chip->start;
923
        ENABLE_VPP(map);
924
        send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
925
        wide_write(map, datum, adr);
926
 
927
        times_left = 500000;
928
        while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
929
                if (need_resched()) {
930
                        spin_unlock_bh(chip->mutex);
931
                        schedule();
932
                        spin_lock_bh(chip->mutex);
933
                }
934
        }
935
 
936
        if (!times_left) {
937
                printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
938
                       map->name, adr);
939
                ret = -EIO;
940
        } else {
941
                __u32 verify;
942
                if ((verify = wide_read(map, adr)) != datum) {
943
                        printk(KERN_WARNING "%s: write to 0x%lx failed. "
944
                               "datum = %x, verify = %x\n",
945
                               map->name, adr, datum, verify);
946
                        ret = -EIO;
947
                }
948
        }
949
 
950
        DISABLE_VPP(map);
951
        chip->state = FL_READY;
952
        wake_up(&chip->wq);
953
        spin_unlock_bh(chip->mutex);
954
 
955
        return ret;
956
}
957
 
958
 
959
 
960
static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
961
                           size_t *retlen, const u_char *buf)
962
{
963
        struct map_info *map = mtd->priv;
964
        struct amd_flash_private *private = map->fldrv_priv;
965
        int ret = 0;
966
        int chipnum;
967
        unsigned long ofs;
968
        unsigned long chipstart;
969
 
970
        *retlen = 0;
971
        if (!len) {
972
                return 0;
973
        }
974
 
975
        chipnum = to >> private->chipshift;
976
        ofs = to  - (chipnum << private->chipshift);
977
        chipstart = private->chips[chipnum].start;
978
 
979
        /* If it's not bus-aligned, do the first byte write. */
980
        if (ofs & (map->buswidth - 1)) {
981
                unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
982
                int i = ofs - bus_ofs;
983
                int n = 0;
984
                u_char tmp_buf[4];
985
                __u32 datum;
986
 
987
                map->copy_from(map, tmp_buf,
988
                               bus_ofs + private->chips[chipnum].start,
989
                               map->buswidth);
990
                while (len && i < map->buswidth)
991
                        tmp_buf[i++] = buf[n++], len--;
992
 
993
                if (map->buswidth == 2) {
994
                        datum = *(__u16*)tmp_buf;
995
                } else if (map->buswidth == 4) {
996
                        datum = *(__u32*)tmp_buf;
997
                } else {
998
                        return -EINVAL;  /* should never happen, but be safe */
999
                }
1000
 
1001
                ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
1002
                                     datum);
1003
                if (ret) {
1004
                        return ret;
1005
                }
1006
 
1007
                ofs += n;
1008
                buf += n;
1009
                (*retlen) += n;
1010
 
1011
                if (ofs >> private->chipshift) {
1012
                        chipnum++;
1013
                        ofs = 0;
1014
                        if (chipnum == private->numchips) {
1015
                                return 0;
1016
                        }
1017
                }
1018
        }
1019
 
1020
        /* We are now aligned, write as much as possible. */
1021
        while(len >= map->buswidth) {
1022
                __u32 datum;
1023
 
1024
                if (map->buswidth == 1) {
1025
                        datum = *(__u8*)buf;
1026
                } else if (map->buswidth == 2) {
1027
                        datum = *(__u16*)buf;
1028
                } else if (map->buswidth == 4) {
1029
                        datum = *(__u32*)buf;
1030
                } else {
1031
                        return -EINVAL;
1032
                }
1033
 
1034
                ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1035
 
1036
                if (ret) {
1037
                        return ret;
1038
                }
1039
 
1040
                ofs += map->buswidth;
1041
                buf += map->buswidth;
1042
                (*retlen) += map->buswidth;
1043
                len -= map->buswidth;
1044
 
1045
                if (ofs >> private->chipshift) {
1046
                        chipnum++;
1047
                        ofs = 0;
1048
                        if (chipnum == private->numchips) {
1049
                                return 0;
1050
                        }
1051
                        chipstart = private->chips[chipnum].start;
1052
                }
1053
        }
1054
 
1055
        if (len & (map->buswidth - 1)) {
1056
                int i = 0, n = 0;
1057
                u_char tmp_buf[2];
1058
                __u32 datum;
1059
 
1060
                map->copy_from(map, tmp_buf,
1061
                               ofs + private->chips[chipnum].start,
1062
                               map->buswidth);
1063
                while (len--) {
1064
                        tmp_buf[i++] = buf[n++];
1065
                }
1066
 
1067
                if (map->buswidth == 2) {
1068
                        datum = *(__u16*)tmp_buf;
1069
                } else if (map->buswidth == 4) {
1070
                        datum = *(__u32*)tmp_buf;
1071
                } else {
1072
                        return -EINVAL;  /* should never happen, but be safe */
1073
                }
1074
 
1075
                ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1076
 
1077
                if (ret) {
1078
                        return ret;
1079
                }
1080
 
1081
                (*retlen) += n;
1082
        }
1083
 
1084
        return 0;
1085
}
1086
 
1087
 
1088
 
1089
static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1090
                                  unsigned long adr, u_long size)
1091
{
1092
        unsigned long timeo = jiffies + HZ;
1093
        struct amd_flash_private *private = map->fldrv_priv;
1094
        DECLARE_WAITQUEUE(wait, current);
1095
 
1096
retry:
1097
        spin_lock_bh(chip->mutex);
1098
 
1099
        if (chip->state != FL_READY){
1100
                set_current_state(TASK_UNINTERRUPTIBLE);
1101
                add_wait_queue(&chip->wq, &wait);
1102
 
1103
                spin_unlock_bh(chip->mutex);
1104
 
1105
                schedule();
1106
                remove_wait_queue(&chip->wq, &wait);
1107
 
1108
                if (signal_pending(current)) {
1109
                        return -EINTR;
1110
                }
1111
 
1112
                timeo = jiffies + HZ;
1113
 
1114
                goto retry;
1115
        }
1116
 
1117
        chip->state = FL_ERASING;
1118
 
1119
        adr += chip->start;
1120
        ENABLE_VPP(map);
1121
        send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1122
        send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1123
 
1124
        timeo = jiffies + (HZ * 20);
1125
 
1126
        spin_unlock_bh(chip->mutex);
1127
        schedule_timeout(HZ);
1128
        spin_lock_bh(chip->mutex);
1129
 
1130
        while (flash_is_busy(map, adr, private->interleave)) {
1131
 
1132
                if (chip->state != FL_ERASING) {
1133
                        /* Someone's suspended the erase. Sleep */
1134
                        set_current_state(TASK_UNINTERRUPTIBLE);
1135
                        add_wait_queue(&chip->wq, &wait);
1136
 
1137
                        spin_unlock_bh(chip->mutex);
1138
                        printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1139
                               map->name);
1140
                        schedule();
1141
                        remove_wait_queue(&chip->wq, &wait);
1142
 
1143
                        if (signal_pending(current)) {
1144
                                return -EINTR;
1145
                        }
1146
 
1147
                        timeo = jiffies + (HZ*2); /* FIXME */
1148
                        spin_lock_bh(chip->mutex);
1149
                        continue;
1150
                }
1151
 
1152
                /* OK Still waiting */
1153
                if (time_after(jiffies, timeo)) {
1154
                        chip->state = FL_READY;
1155
                        spin_unlock_bh(chip->mutex);
1156
                        printk(KERN_WARNING "%s: waiting for erase to complete "
1157
                               "timed out.\n", map->name);
1158
                        DISABLE_VPP(map);
1159
 
1160
                        return -EIO;
1161
                }
1162
 
1163
                /* Latency issues. Drop the lock, wait a while and retry */
1164
                spin_unlock_bh(chip->mutex);
1165
 
1166
                if (need_resched())
1167
                        schedule();
1168
                else
1169
                        udelay(1);
1170
 
1171
                spin_lock_bh(chip->mutex);
1172
        }
1173
 
1174
        /* Verify every single word */
1175
        {
1176
                int address;
1177
                int error = 0;
1178
                __u8 verify;
1179
 
1180
                for (address = adr; address < (adr + size); address++) {
1181
                        if ((verify = map->read8(map, address)) != 0xFF) {
1182
                                error = 1;
1183
                                break;
1184
                        }
1185
                }
1186
                if (error) {
1187
                        chip->state = FL_READY;
1188
                        spin_unlock_bh(chip->mutex);
1189
                        printk(KERN_WARNING
1190
                               "%s: verify error at 0x%x, size %ld.\n",
1191
                               map->name, address, size);
1192
                        DISABLE_VPP(map);
1193
 
1194
                        return -EIO;
1195
                }
1196
        }
1197
 
1198
        DISABLE_VPP(map);
1199
        chip->state = FL_READY;
1200
        wake_up(&chip->wq);
1201
        spin_unlock_bh(chip->mutex);
1202
 
1203
        return 0;
1204
}
1205
 
1206
 
1207
 
1208
static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1209
{
1210
        struct map_info *map = mtd->priv;
1211
        struct amd_flash_private *private = map->fldrv_priv;
1212
        unsigned long adr, len;
1213
        int chipnum;
1214
        int ret = 0;
1215
        int i;
1216
        int first;
1217
        struct mtd_erase_region_info *regions = mtd->eraseregions;
1218
 
1219
        if (instr->addr > mtd->size) {
1220
                return -EINVAL;
1221
        }
1222
 
1223
        if ((instr->len + instr->addr) > mtd->size) {
1224
                return -EINVAL;
1225
        }
1226
 
1227
        /* Check that both start and end of the requested erase are
1228
         * aligned with the erasesize at the appropriate addresses.
1229
         */
1230
 
1231
        i = 0;
1232
 
1233
        /* Skip all erase regions which are ended before the start of
1234
           the requested erase. Actually, to save on the calculations,
1235
           we skip to the first erase region which starts after the
1236
           start of the requested erase, and then go back one.
1237
        */
1238
 
1239
        while ((i < mtd->numeraseregions) &&
1240
               (instr->addr >= regions[i].offset)) {
1241
               i++;
1242
        }
1243
        i--;
1244
 
1245
        /* OK, now i is pointing at the erase region in which this
1246
         * erase request starts. Check the start of the requested
1247
         * erase range is aligned with the erase size which is in
1248
         * effect here.
1249
         */
1250
 
1251
        if (instr->addr & (regions[i].erasesize-1)) {
1252
                return -EINVAL;
1253
        }
1254
 
1255
        /* Remember the erase region we start on. */
1256
 
1257
        first = i;
1258
 
1259
        /* Next, check that the end of the requested erase is aligned
1260
         * with the erase region at that address.
1261
         */
1262
 
1263
        while ((i < mtd->numeraseregions) &&
1264
               ((instr->addr + instr->len) >= regions[i].offset)) {
1265
                i++;
1266
        }
1267
 
1268
        /* As before, drop back one to point at the region in which
1269
         * the address actually falls.
1270
         */
1271
 
1272
        i--;
1273
 
1274
        if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1275
                return -EINVAL;
1276
        }
1277
 
1278
        chipnum = instr->addr >> private->chipshift;
1279
        adr = instr->addr - (chipnum << private->chipshift);
1280
        len = instr->len;
1281
 
1282
        i = first;
1283
 
1284
        while (len) {
1285
                ret = erase_one_block(map, &private->chips[chipnum], adr,
1286
                                      regions[i].erasesize);
1287
 
1288
                if (ret) {
1289
                        return ret;
1290
                }
1291
 
1292
                adr += regions[i].erasesize;
1293
                len -= regions[i].erasesize;
1294
 
1295
                if ((adr % (1 << private->chipshift)) ==
1296
                    ((regions[i].offset + (regions[i].erasesize *
1297
                                           regions[i].numblocks))
1298
                     % (1 << private->chipshift))) {
1299
                        i++;
1300
                }
1301
 
1302
                if (adr >> private->chipshift) {
1303
                        adr = 0;
1304
                        chipnum++;
1305
                        if (chipnum >= private->numchips) {
1306
                                break;
1307
                        }
1308
                }
1309
        }
1310
 
1311
        instr->state = MTD_ERASE_DONE;
1312
        if (instr->callback) {
1313
                instr->callback(instr);
1314
        }
1315
 
1316
        return 0;
1317
}
1318
 
1319
 
1320
 
1321
static void amd_flash_sync(struct mtd_info *mtd)
1322
{
1323
        struct map_info *map = mtd->priv;
1324
        struct amd_flash_private *private = map->fldrv_priv;
1325
        int i;
1326
        struct flchip *chip;
1327
        int ret = 0;
1328
        DECLARE_WAITQUEUE(wait, current);
1329
 
1330
        for (i = 0; !ret && (i < private->numchips); i++) {
1331
                chip = &private->chips[i];
1332
 
1333
        retry:
1334
                spin_lock_bh(chip->mutex);
1335
 
1336
                switch(chip->state) {
1337
                case FL_READY:
1338
                case FL_STATUS:
1339
                case FL_CFI_QUERY:
1340
                case FL_JEDEC_QUERY:
1341
                        chip->oldstate = chip->state;
1342
                        chip->state = FL_SYNCING;
1343
                        /* No need to wake_up() on this state change -
1344
                         * as the whole point is that nobody can do anything
1345
                         * with the chip now anyway.
1346
                         */
1347
                case FL_SYNCING:
1348
                        spin_unlock_bh(chip->mutex);
1349
                        break;
1350
 
1351
                default:
1352
                        /* Not an idle state */
1353
                        add_wait_queue(&chip->wq, &wait);
1354
 
1355
                        spin_unlock_bh(chip->mutex);
1356
 
1357
                        schedule();
1358
 
1359
                        remove_wait_queue(&chip->wq, &wait);
1360
 
1361
                        goto retry;
1362
                }
1363
        }
1364
 
1365
        /* Unlock the chips again */
1366
        for (i--; i >= 0; i--) {
1367
                chip = &private->chips[i];
1368
 
1369
                spin_lock_bh(chip->mutex);
1370
 
1371
                if (chip->state == FL_SYNCING) {
1372
                        chip->state = chip->oldstate;
1373
                        wake_up(&chip->wq);
1374
                }
1375
                spin_unlock_bh(chip->mutex);
1376
        }
1377
}
1378
 
1379
 
1380
 
1381
static int amd_flash_suspend(struct mtd_info *mtd)
1382
{
1383
printk("amd_flash_suspend(): not implemented!\n");
1384
        return -EINVAL;
1385
}
1386
 
1387
 
1388
 
1389
static void amd_flash_resume(struct mtd_info *mtd)
1390
{
1391
printk("amd_flash_resume(): not implemented!\n");
1392
}
1393
 
1394
 
1395
 
1396
static void amd_flash_destroy(struct mtd_info *mtd)
1397
{
1398
        struct map_info *map = mtd->priv;
1399
        struct amd_flash_private *private = map->fldrv_priv;
1400
        kfree(private);
1401
}
1402
 
1403
int __init amd_flash_init(void)
1404
{
1405
        register_mtd_chip_driver(&amd_flash_chipdrv);
1406
        return 0;
1407
}
1408
 
1409
void __exit amd_flash_exit(void)
1410
{
1411
        unregister_mtd_chip_driver(&amd_flash_chipdrv);
1412
}
1413
 
1414
module_init(amd_flash_init);
1415
module_exit(amd_flash_exit);
1416
 
1417
MODULE_LICENSE("GPL");
1418
MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1419
MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.