mtd: cfi_cmdset_0002: add CFI detection for SST 38VF640x chips
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0002.c
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 *
20 * This code is GPL
21 */
22
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
40
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
43
44 #define MAX_WORD_RETRIES 3
45
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
50
51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_amdstd_sync (struct mtd_info *);
57 static int cfi_amdstd_suspend (struct mtd_info *);
58 static void cfi_amdstd_resume (struct mtd_info *);
59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61
62 static void cfi_amdstd_destroy(struct mtd_info *);
63
64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
66
67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
69 #include "fwh_lock.h"
70
71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
73
74 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75 .probe = NULL, /* Not usable directly */
76 .destroy = cfi_amdstd_destroy,
77 .name = "cfi_cmdset_0002",
78 .module = THIS_MODULE
79 };
80
81
82 /* #define DEBUG_CFI_FEATURES */
83
84
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
87 {
88 const char* erase_suspend[3] = {
89 "Not supported", "Read only", "Read/write"
90 };
91 const char* top_bottom[6] = {
92 "No WP", "8x8KiB sectors at top & bottom, no WP",
93 "Bottom boot", "Top boot",
94 "Uniform, Bottom WP", "Uniform, Top WP"
95 };
96
97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
98 printk(" Address sensitive unlock: %s\n",
99 (extp->SiliconRevision & 1) ? "Not required" : "Required");
100
101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
103 else
104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
105
106 if (extp->BlkProt == 0)
107 printk(" Block protection: Not supported\n");
108 else
109 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
110
111
112 printk(" Temporary block unprotect: %s\n",
113 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116 printk(" Burst mode: %s\n",
117 extp->BurstMode ? "Supported" : "Not supported");
118 if (extp->PageMode == 0)
119 printk(" Page mode: Not supported\n");
120 else
121 printk(" Page mode: %d word page\n", extp->PageMode << 2);
122
123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMin >> 4, extp->VppMin & 0xf);
125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 extp->VppMax >> 4, extp->VppMax & 0xf);
127
128 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
130 else
131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
132 }
133 #endif
134
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
138 {
139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv;
141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142 __u8 major = extp->MajorVersion;
143 __u8 minor = extp->MinorVersion;
144
145 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */
147
148 DEBUG(MTD_DEBUG_LEVEL1,
149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
150 map->name, cfi->mfr, cfi->id);
151
152 /* AFAICS all 29LV400 with a bottom boot block have a device ID
153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
154 * These were badly detected as they have the 0x80 bit set
155 * so treat them as a special case.
156 */
157 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
158
159 /* Macronix added CFI to their 2nd generation
160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
161 * Fujitsu, Spansion, EON, ESI and older Macronix)
162 * has CFI.
163 *
164 * Therefore also check the manufacturer.
165 * This reduces the risk of false detection due to
166 * the 8-bit device ID.
167 */
168 (cfi->mfr == CFI_MFR_MACRONIX)) {
169 DEBUG(MTD_DEBUG_LEVEL1,
170 "%s: Macronix MX29LV400C with bottom boot block"
171 " detected\n", map->name);
172 extp->TopBottom = 2; /* bottom boot */
173 } else
174 if (cfi->id & 0x80) {
175 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
176 extp->TopBottom = 3; /* top boot */
177 } else {
178 extp->TopBottom = 2; /* bottom boot */
179 }
180
181 DEBUG(MTD_DEBUG_LEVEL1,
182 "%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top");
185 }
186 }
187 #endif
188
189 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
190 {
191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv;
193 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
195 mtd->write = cfi_amdstd_write_buffers;
196 }
197 }
198
199 /* Atmel chips don't use the same PRI format as AMD chips */
200 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
201 {
202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv;
204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
205 struct cfi_pri_atmel atmel_pri;
206
207 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
208 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
209
210 if (atmel_pri.Features & 0x02)
211 extp->EraseSuspend = 2;
212
213 /* Some chips got it backwards... */
214 if (cfi->id == AT49BV6416) {
215 if (atmel_pri.BottomBoot)
216 extp->TopBottom = 3;
217 else
218 extp->TopBottom = 2;
219 } else {
220 if (atmel_pri.BottomBoot)
221 extp->TopBottom = 2;
222 else
223 extp->TopBottom = 3;
224 }
225
226 /* burst write mode not supported */
227 cfi->cfiq->BufWriteTimeoutTyp = 0;
228 cfi->cfiq->BufWriteTimeoutMax = 0;
229 }
230
231 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
232 {
233 /* Setup for chips with a secsi area */
234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
236 }
237
238 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
239 {
240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv;
242 if ((cfi->cfiq->NumEraseRegions == 1) &&
243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->erase = cfi_amdstd_erase_chip;
245 }
246
247 }
248
249 /*
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
251 * locked by default.
252 */
253 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
254 {
255 mtd->lock = cfi_atmel_lock;
256 mtd->unlock = cfi_atmel_unlock;
257 mtd->flags |= MTD_POWERUP_LOCK;
258 }
259
260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
261 {
262 struct map_info *map = mtd->priv;
263 struct cfi_private *cfi = map->fldrv_priv;
264
265 /*
266 * These flashes report two seperate eraseblock regions based on the
267 * sector_erase-size and block_erase-size, although they both operate on the
268 * same memory. This is not allowed according to CFI, so we just pick the
269 * sector_erase-size.
270 */
271 cfi->cfiq->NumEraseRegions = 1;
272 }
273
274 static void fixup_sst39vf(struct mtd_info *mtd, void *param)
275 {
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
278
279 fixup_old_sst_eraseregion(mtd);
280
281 cfi->addr_unlock1 = 0x5555;
282 cfi->addr_unlock2 = 0x2AAA;
283 }
284
285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
286 {
287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv;
289
290 fixup_old_sst_eraseregion(mtd);
291
292 cfi->addr_unlock1 = 0x555;
293 cfi->addr_unlock2 = 0x2AA;
294 }
295
296 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
297 {
298 struct map_info *map = mtd->priv;
299 struct cfi_private *cfi = map->fldrv_priv;
300
301 fixup_sst39vf_rev_b(mtd, param);
302
303 /*
304 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
305 * it should report a size of 8KBytes (0x0020*256).
306 */
307 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
308 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
309 }
310
311 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
312 {
313 struct map_info *map = mtd->priv;
314 struct cfi_private *cfi = map->fldrv_priv;
315
316 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
317 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
318 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
319 }
320 }
321
322 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
323 {
324 struct map_info *map = mtd->priv;
325 struct cfi_private *cfi = map->fldrv_priv;
326
327 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
328 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
329 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
330 }
331 }
332
333 /* Used to fix CFI-Tables of chips without Extended Query Tables */
334 static struct cfi_fixup cfi_nopri_fixup_table[] = {
335 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
336 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
337 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
338 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
339 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
340 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
341 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
342 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
343 { 0, 0, NULL, NULL }
344 };
345
346 static struct cfi_fixup cfi_fixup_table[] = {
347 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
348 #ifdef AMD_BOOTLOC_BUG
349 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
350 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
351 #endif
352 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
353 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
354 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
355 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
356 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
357 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
358 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
359 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
360 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
361 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
362 { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
363 { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
364 { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
365 { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
366 #if !FORCE_WORD_WRITE
367 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
368 #endif
369 { 0, 0, NULL, NULL }
370 };
371 static struct cfi_fixup jedec_fixup_table[] = {
372 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
373 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
374 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
375 { 0, 0, NULL, NULL }
376 };
377
378 static struct cfi_fixup fixup_table[] = {
379 /* The CFI vendor ids and the JEDEC vendor IDs appear
380 * to be common. It is like the devices id's are as
381 * well. This table is to pick all cases where
382 * we know that is the case.
383 */
384 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
385 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
386 { 0, 0, NULL, NULL }
387 };
388
389
390 static void cfi_fixup_major_minor(struct cfi_private *cfi,
391 struct cfi_pri_amdstd *extp)
392 {
393 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
394 extp->MajorVersion == '0')
395 extp->MajorVersion = '1';
396 /*
397 * SST 38VF640x chips report major=0xFF / minor=0xFF.
398 */
399 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
400 extp->MajorVersion = '1';
401 extp->MinorVersion = '0';
402 }
403 }
404
405 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
406 {
407 struct cfi_private *cfi = map->fldrv_priv;
408 struct mtd_info *mtd;
409 int i;
410
411 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
412 if (!mtd) {
413 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
414 return NULL;
415 }
416 mtd->priv = map;
417 mtd->type = MTD_NORFLASH;
418
419 /* Fill in the default mtd operations */
420 mtd->erase = cfi_amdstd_erase_varsize;
421 mtd->write = cfi_amdstd_write_words;
422 mtd->read = cfi_amdstd_read;
423 mtd->sync = cfi_amdstd_sync;
424 mtd->suspend = cfi_amdstd_suspend;
425 mtd->resume = cfi_amdstd_resume;
426 mtd->flags = MTD_CAP_NORFLASH;
427 mtd->name = map->name;
428 mtd->writesize = 1;
429
430 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
431
432 if (cfi->cfi_mode==CFI_MODE_CFI){
433 unsigned char bootloc;
434 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
435 struct cfi_pri_amdstd *extp;
436
437 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
438 if (extp) {
439 /*
440 * It's a real CFI chip, not one for which the probe
441 * routine faked a CFI structure.
442 */
443 cfi_fixup_major_minor(cfi, extp);
444
445 /*
446 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
447 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19
448 * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf
449 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
450 */
451 if (extp->MajorVersion != '1' ||
452 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
453 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
454 "version %c.%c (%#02x/%#02x).\n",
455 extp->MajorVersion, extp->MinorVersion,
456 extp->MajorVersion, extp->MinorVersion);
457 kfree(extp);
458 kfree(mtd);
459 return NULL;
460 }
461
462 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
463 extp->MajorVersion, extp->MinorVersion);
464
465 /* Install our own private info structure */
466 cfi->cmdset_priv = extp;
467
468 /* Apply cfi device specific fixups */
469 cfi_fixup(mtd, cfi_fixup_table);
470
471 #ifdef DEBUG_CFI_FEATURES
472 /* Tell the user about it in lots of lovely detail */
473 cfi_tell_features(extp);
474 #endif
475
476 bootloc = extp->TopBottom;
477 if ((bootloc < 2) || (bootloc > 5)) {
478 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
479 "bank location (%d). Assuming bottom.\n",
480 map->name, bootloc);
481 bootloc = 2;
482 }
483
484 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
485 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
486
487 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
488 int j = (cfi->cfiq->NumEraseRegions-1)-i;
489 __u32 swap;
490
491 swap = cfi->cfiq->EraseRegionInfo[i];
492 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
493 cfi->cfiq->EraseRegionInfo[j] = swap;
494 }
495 }
496 /* Set the default CFI lock/unlock addresses */
497 cfi->addr_unlock1 = 0x555;
498 cfi->addr_unlock2 = 0x2aa;
499 }
500 cfi_fixup(mtd, cfi_nopri_fixup_table);
501
502 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
503 kfree(mtd);
504 return NULL;
505 }
506
507 } /* CFI mode */
508 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
509 /* Apply jedec specific fixups */
510 cfi_fixup(mtd, jedec_fixup_table);
511 }
512 /* Apply generic fixups */
513 cfi_fixup(mtd, fixup_table);
514
515 for (i=0; i< cfi->numchips; i++) {
516 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
517 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
518 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
519 cfi->chips[i].ref_point_counter = 0;
520 init_waitqueue_head(&(cfi->chips[i].wq));
521 }
522
523 map->fldrv = &cfi_amdstd_chipdrv;
524
525 return cfi_amdstd_setup(mtd);
526 }
527 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
528 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
529 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
530 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
531 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
532
533 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
534 {
535 struct map_info *map = mtd->priv;
536 struct cfi_private *cfi = map->fldrv_priv;
537 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
538 unsigned long offset = 0;
539 int i,j;
540
541 printk(KERN_NOTICE "number of %s chips: %d\n",
542 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
543 /* Select the correct geometry setup */
544 mtd->size = devsize * cfi->numchips;
545
546 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
547 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
548 * mtd->numeraseregions, GFP_KERNEL);
549 if (!mtd->eraseregions) {
550 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
551 goto setup_err;
552 }
553
554 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
555 unsigned long ernum, ersize;
556 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
557 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
558
559 if (mtd->erasesize < ersize) {
560 mtd->erasesize = ersize;
561 }
562 for (j=0; j<cfi->numchips; j++) {
563 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
564 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
565 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
566 }
567 offset += (ersize * ernum);
568 }
569 if (offset != devsize) {
570 /* Argh */
571 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
572 goto setup_err;
573 }
574
575 __module_get(THIS_MODULE);
576 register_reboot_notifier(&mtd->reboot_notifier);
577 return mtd;
578
579 setup_err:
580 kfree(mtd->eraseregions);
581 kfree(mtd);
582 kfree(cfi->cmdset_priv);
583 kfree(cfi->cfiq);
584 return NULL;
585 }
586
587 /*
588 * Return true if the chip is ready.
589 *
590 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
591 * non-suspended sector) and is indicated by no toggle bits toggling.
592 *
593 * Note that anything more complicated than checking if no bits are toggling
594 * (including checking DQ5 for an error status) is tricky to get working
595 * correctly and is therefore not done (particulary with interleaved chips
596 * as each chip must be checked independantly of the others).
597 */
598 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
599 {
600 map_word d, t;
601
602 d = map_read(map, addr);
603 t = map_read(map, addr);
604
605 return map_word_equal(map, d, t);
606 }
607
608 /*
609 * Return true if the chip is ready and has the correct value.
610 *
611 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
612 * non-suspended sector) and it is indicated by no bits toggling.
613 *
614 * Error are indicated by toggling bits or bits held with the wrong value,
615 * or with bits toggling.
616 *
617 * Note that anything more complicated than checking if no bits are toggling
618 * (including checking DQ5 for an error status) is tricky to get working
619 * correctly and is therefore not done (particulary with interleaved chips
620 * as each chip must be checked independantly of the others).
621 *
622 */
623 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
624 {
625 map_word oldd, curd;
626
627 oldd = map_read(map, addr);
628 curd = map_read(map, addr);
629
630 return map_word_equal(map, oldd, curd) &&
631 map_word_equal(map, curd, expected);
632 }
633
634 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
635 {
636 DECLARE_WAITQUEUE(wait, current);
637 struct cfi_private *cfi = map->fldrv_priv;
638 unsigned long timeo;
639 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
640
641 resettime:
642 timeo = jiffies + HZ;
643 retry:
644 switch (chip->state) {
645
646 case FL_STATUS:
647 for (;;) {
648 if (chip_ready(map, adr))
649 break;
650
651 if (time_after(jiffies, timeo)) {
652 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
653 return -EIO;
654 }
655 mutex_unlock(&chip->mutex);
656 cfi_udelay(1);
657 mutex_lock(&chip->mutex);
658 /* Someone else might have been playing with it. */
659 goto retry;
660 }
661
662 case FL_READY:
663 case FL_CFI_QUERY:
664 case FL_JEDEC_QUERY:
665 return 0;
666
667 case FL_ERASING:
668 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
669 !(mode == FL_READY || mode == FL_POINT ||
670 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
671 goto sleep;
672
673 /* We could check to see if we're trying to access the sector
674 * that is currently being erased. However, no user will try
675 * anything like that so we just wait for the timeout. */
676
677 /* Erase suspend */
678 /* It's harmless to issue the Erase-Suspend and Erase-Resume
679 * commands when the erase algorithm isn't in progress. */
680 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
681 chip->oldstate = FL_ERASING;
682 chip->state = FL_ERASE_SUSPENDING;
683 chip->erase_suspended = 1;
684 for (;;) {
685 if (chip_ready(map, adr))
686 break;
687
688 if (time_after(jiffies, timeo)) {
689 /* Should have suspended the erase by now.
690 * Send an Erase-Resume command as either
691 * there was an error (so leave the erase
692 * routine to recover from it) or we trying to
693 * use the erase-in-progress sector. */
694 map_write(map, CMD(0x30), chip->in_progress_block_addr);
695 chip->state = FL_ERASING;
696 chip->oldstate = FL_READY;
697 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
698 return -EIO;
699 }
700
701 mutex_unlock(&chip->mutex);
702 cfi_udelay(1);
703 mutex_lock(&chip->mutex);
704 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
705 So we can just loop here. */
706 }
707 chip->state = FL_READY;
708 return 0;
709
710 case FL_XIP_WHILE_ERASING:
711 if (mode != FL_READY && mode != FL_POINT &&
712 (!cfip || !(cfip->EraseSuspend&2)))
713 goto sleep;
714 chip->oldstate = chip->state;
715 chip->state = FL_READY;
716 return 0;
717
718 case FL_SHUTDOWN:
719 /* The machine is rebooting */
720 return -EIO;
721
722 case FL_POINT:
723 /* Only if there's no operation suspended... */
724 if (mode == FL_READY && chip->oldstate == FL_READY)
725 return 0;
726
727 default:
728 sleep:
729 set_current_state(TASK_UNINTERRUPTIBLE);
730 add_wait_queue(&chip->wq, &wait);
731 mutex_unlock(&chip->mutex);
732 schedule();
733 remove_wait_queue(&chip->wq, &wait);
734 mutex_lock(&chip->mutex);
735 goto resettime;
736 }
737 }
738
739
740 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
741 {
742 struct cfi_private *cfi = map->fldrv_priv;
743
744 switch(chip->oldstate) {
745 case FL_ERASING:
746 chip->state = chip->oldstate;
747 map_write(map, CMD(0x30), chip->in_progress_block_addr);
748 chip->oldstate = FL_READY;
749 chip->state = FL_ERASING;
750 break;
751
752 case FL_XIP_WHILE_ERASING:
753 chip->state = chip->oldstate;
754 chip->oldstate = FL_READY;
755 break;
756
757 case FL_READY:
758 case FL_STATUS:
759 /* We should really make set_vpp() count, rather than doing this */
760 DISABLE_VPP(map);
761 break;
762 default:
763 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
764 }
765 wake_up(&chip->wq);
766 }
767
768 #ifdef CONFIG_MTD_XIP
769
770 /*
771 * No interrupt what so ever can be serviced while the flash isn't in array
772 * mode. This is ensured by the xip_disable() and xip_enable() functions
773 * enclosing any code path where the flash is known not to be in array mode.
774 * And within a XIP disabled code path, only functions marked with __xipram
775 * may be called and nothing else (it's a good thing to inspect generated
776 * assembly to make sure inline functions were actually inlined and that gcc
777 * didn't emit calls to its own support functions). Also configuring MTD CFI
778 * support to a single buswidth and a single interleave is also recommended.
779 */
780
781 static void xip_disable(struct map_info *map, struct flchip *chip,
782 unsigned long adr)
783 {
784 /* TODO: chips with no XIP use should ignore and return */
785 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
786 local_irq_disable();
787 }
788
789 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
790 unsigned long adr)
791 {
792 struct cfi_private *cfi = map->fldrv_priv;
793
794 if (chip->state != FL_POINT && chip->state != FL_READY) {
795 map_write(map, CMD(0xf0), adr);
796 chip->state = FL_READY;
797 }
798 (void) map_read(map, adr);
799 xip_iprefetch();
800 local_irq_enable();
801 }
802
803 /*
804 * When a delay is required for the flash operation to complete, the
805 * xip_udelay() function is polling for both the given timeout and pending
806 * (but still masked) hardware interrupts. Whenever there is an interrupt
807 * pending then the flash erase operation is suspended, array mode restored
808 * and interrupts unmasked. Task scheduling might also happen at that
809 * point. The CPU eventually returns from the interrupt or the call to
810 * schedule() and the suspended flash operation is resumed for the remaining
811 * of the delay period.
812 *
813 * Warning: this function _will_ fool interrupt latency tracing tools.
814 */
815
816 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
817 unsigned long adr, int usec)
818 {
819 struct cfi_private *cfi = map->fldrv_priv;
820 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
821 map_word status, OK = CMD(0x80);
822 unsigned long suspended, start = xip_currtime();
823 flstate_t oldstate;
824
825 do {
826 cpu_relax();
827 if (xip_irqpending() && extp &&
828 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
829 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
830 /*
831 * Let's suspend the erase operation when supported.
832 * Note that we currently don't try to suspend
833 * interleaved chips if there is already another
834 * operation suspended (imagine what happens
835 * when one chip was already done with the current
836 * operation while another chip suspended it, then
837 * we resume the whole thing at once). Yes, it
838 * can happen!
839 */
840 map_write(map, CMD(0xb0), adr);
841 usec -= xip_elapsed_since(start);
842 suspended = xip_currtime();
843 do {
844 if (xip_elapsed_since(suspended) > 100000) {
845 /*
846 * The chip doesn't want to suspend
847 * after waiting for 100 msecs.
848 * This is a critical error but there
849 * is not much we can do here.
850 */
851 return;
852 }
853 status = map_read(map, adr);
854 } while (!map_word_andequal(map, status, OK, OK));
855
856 /* Suspend succeeded */
857 oldstate = chip->state;
858 if (!map_word_bitsset(map, status, CMD(0x40)))
859 break;
860 chip->state = FL_XIP_WHILE_ERASING;
861 chip->erase_suspended = 1;
862 map_write(map, CMD(0xf0), adr);
863 (void) map_read(map, adr);
864 xip_iprefetch();
865 local_irq_enable();
866 mutex_unlock(&chip->mutex);
867 xip_iprefetch();
868 cond_resched();
869
870 /*
871 * We're back. However someone else might have
872 * decided to go write to the chip if we are in
873 * a suspended erase state. If so let's wait
874 * until it's done.
875 */
876 mutex_lock(&chip->mutex);
877 while (chip->state != FL_XIP_WHILE_ERASING) {
878 DECLARE_WAITQUEUE(wait, current);
879 set_current_state(TASK_UNINTERRUPTIBLE);
880 add_wait_queue(&chip->wq, &wait);
881 mutex_unlock(&chip->mutex);
882 schedule();
883 remove_wait_queue(&chip->wq, &wait);
884 mutex_lock(&chip->mutex);
885 }
886 /* Disallow XIP again */
887 local_irq_disable();
888
889 /* Resume the write or erase operation */
890 map_write(map, CMD(0x30), adr);
891 chip->state = oldstate;
892 start = xip_currtime();
893 } else if (usec >= 1000000/HZ) {
894 /*
895 * Try to save on CPU power when waiting delay
896 * is at least a system timer tick period.
897 * No need to be extremely accurate here.
898 */
899 xip_cpu_idle();
900 }
901 status = map_read(map, adr);
902 } while (!map_word_andequal(map, status, OK, OK)
903 && xip_elapsed_since(start) < usec);
904 }
905
906 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
907
908 /*
909 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
910 * the flash is actively programming or erasing since we have to poll for
911 * the operation to complete anyway. We can't do that in a generic way with
912 * a XIP setup so do it before the actual flash operation in this case
913 * and stub it out from INVALIDATE_CACHE_UDELAY.
914 */
915 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
916 INVALIDATE_CACHED_RANGE(map, from, size)
917
918 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
919 UDELAY(map, chip, adr, usec)
920
921 /*
922 * Extra notes:
923 *
924 * Activating this XIP support changes the way the code works a bit. For
925 * example the code to suspend the current process when concurrent access
926 * happens is never executed because xip_udelay() will always return with the
927 * same chip state as it was entered with. This is why there is no care for
928 * the presence of add_wait_queue() or schedule() calls from within a couple
929 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
930 * The queueing and scheduling are always happening within xip_udelay().
931 *
932 * Similarly, get_chip() and put_chip() just happen to always be executed
933 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
934 * is in array mode, therefore never executing many cases therein and not
935 * causing any problem with XIP.
936 */
937
938 #else
939
940 #define xip_disable(map, chip, adr)
941 #define xip_enable(map, chip, adr)
942 #define XIP_INVAL_CACHED_RANGE(x...)
943
944 #define UDELAY(map, chip, adr, usec) \
945 do { \
946 mutex_unlock(&chip->mutex); \
947 cfi_udelay(usec); \
948 mutex_lock(&chip->mutex); \
949 } while (0)
950
951 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
952 do { \
953 mutex_unlock(&chip->mutex); \
954 INVALIDATE_CACHED_RANGE(map, adr, len); \
955 cfi_udelay(usec); \
956 mutex_lock(&chip->mutex); \
957 } while (0)
958
959 #endif
960
961 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
962 {
963 unsigned long cmd_addr;
964 struct cfi_private *cfi = map->fldrv_priv;
965 int ret;
966
967 adr += chip->start;
968
969 /* Ensure cmd read/writes are aligned. */
970 cmd_addr = adr & ~(map_bankwidth(map)-1);
971
972 mutex_lock(&chip->mutex);
973 ret = get_chip(map, chip, cmd_addr, FL_READY);
974 if (ret) {
975 mutex_unlock(&chip->mutex);
976 return ret;
977 }
978
979 if (chip->state != FL_POINT && chip->state != FL_READY) {
980 map_write(map, CMD(0xf0), cmd_addr);
981 chip->state = FL_READY;
982 }
983
984 map_copy_from(map, buf, adr, len);
985
986 put_chip(map, chip, cmd_addr);
987
988 mutex_unlock(&chip->mutex);
989 return 0;
990 }
991
992
993 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
994 {
995 struct map_info *map = mtd->priv;
996 struct cfi_private *cfi = map->fldrv_priv;
997 unsigned long ofs;
998 int chipnum;
999 int ret = 0;
1000
1001 /* ofs: offset within the first chip that the first read should start */
1002
1003 chipnum = (from >> cfi->chipshift);
1004 ofs = from - (chipnum << cfi->chipshift);
1005
1006
1007 *retlen = 0;
1008
1009 while (len) {
1010 unsigned long thislen;
1011
1012 if (chipnum >= cfi->numchips)
1013 break;
1014
1015 if ((len + ofs -1) >> cfi->chipshift)
1016 thislen = (1<<cfi->chipshift) - ofs;
1017 else
1018 thislen = len;
1019
1020 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1021 if (ret)
1022 break;
1023
1024 *retlen += thislen;
1025 len -= thislen;
1026 buf += thislen;
1027
1028 ofs = 0;
1029 chipnum++;
1030 }
1031 return ret;
1032 }
1033
1034
1035 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1036 {
1037 DECLARE_WAITQUEUE(wait, current);
1038 unsigned long timeo = jiffies + HZ;
1039 struct cfi_private *cfi = map->fldrv_priv;
1040
1041 retry:
1042 mutex_lock(&chip->mutex);
1043
1044 if (chip->state != FL_READY){
1045 set_current_state(TASK_UNINTERRUPTIBLE);
1046 add_wait_queue(&chip->wq, &wait);
1047
1048 mutex_unlock(&chip->mutex);
1049
1050 schedule();
1051 remove_wait_queue(&chip->wq, &wait);
1052 timeo = jiffies + HZ;
1053
1054 goto retry;
1055 }
1056
1057 adr += chip->start;
1058
1059 chip->state = FL_READY;
1060
1061 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1062 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1063 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1064
1065 map_copy_from(map, buf, adr, len);
1066
1067 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1068 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1069 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1070 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1071
1072 wake_up(&chip->wq);
1073 mutex_unlock(&chip->mutex);
1074
1075 return 0;
1076 }
1077
1078 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1079 {
1080 struct map_info *map = mtd->priv;
1081 struct cfi_private *cfi = map->fldrv_priv;
1082 unsigned long ofs;
1083 int chipnum;
1084 int ret = 0;
1085
1086
1087 /* ofs: offset within the first chip that the first read should start */
1088
1089 /* 8 secsi bytes per chip */
1090 chipnum=from>>3;
1091 ofs=from & 7;
1092
1093
1094 *retlen = 0;
1095
1096 while (len) {
1097 unsigned long thislen;
1098
1099 if (chipnum >= cfi->numchips)
1100 break;
1101
1102 if ((len + ofs -1) >> 3)
1103 thislen = (1<<3) - ofs;
1104 else
1105 thislen = len;
1106
1107 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1108 if (ret)
1109 break;
1110
1111 *retlen += thislen;
1112 len -= thislen;
1113 buf += thislen;
1114
1115 ofs = 0;
1116 chipnum++;
1117 }
1118 return ret;
1119 }
1120
1121
1122 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1123 {
1124 struct cfi_private *cfi = map->fldrv_priv;
1125 unsigned long timeo = jiffies + HZ;
1126 /*
1127 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1128 * have a max write time of a few hundreds usec). However, we should
1129 * use the maximum timeout value given by the chip at probe time
1130 * instead. Unfortunately, struct flchip does have a field for
1131 * maximum timeout, only for typical which can be far too short
1132 * depending of the conditions. The ' + 1' is to avoid having a
1133 * timeout of 0 jiffies if HZ is smaller than 1000.
1134 */
1135 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1136 int ret = 0;
1137 map_word oldd;
1138 int retry_cnt = 0;
1139
1140 adr += chip->start;
1141
1142 mutex_lock(&chip->mutex);
1143 ret = get_chip(map, chip, adr, FL_WRITING);
1144 if (ret) {
1145 mutex_unlock(&chip->mutex);
1146 return ret;
1147 }
1148
1149 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1150 __func__, adr, datum.x[0] );
1151
1152 /*
1153 * Check for a NOP for the case when the datum to write is already
1154 * present - it saves time and works around buggy chips that corrupt
1155 * data at other locations when 0xff is written to a location that
1156 * already contains 0xff.
1157 */
1158 oldd = map_read(map, adr);
1159 if (map_word_equal(map, oldd, datum)) {
1160 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1161 __func__);
1162 goto op_done;
1163 }
1164
1165 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1166 ENABLE_VPP(map);
1167 xip_disable(map, chip, adr);
1168 retry:
1169 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1170 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1171 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1172 map_write(map, datum, adr);
1173 chip->state = FL_WRITING;
1174
1175 INVALIDATE_CACHE_UDELAY(map, chip,
1176 adr, map_bankwidth(map),
1177 chip->word_write_time);
1178
1179 /* See comment above for timeout value. */
1180 timeo = jiffies + uWriteTimeout;
1181 for (;;) {
1182 if (chip->state != FL_WRITING) {
1183 /* Someone's suspended the write. Sleep */
1184 DECLARE_WAITQUEUE(wait, current);
1185
1186 set_current_state(TASK_UNINTERRUPTIBLE);
1187 add_wait_queue(&chip->wq, &wait);
1188 mutex_unlock(&chip->mutex);
1189 schedule();
1190 remove_wait_queue(&chip->wq, &wait);
1191 timeo = jiffies + (HZ / 2); /* FIXME */
1192 mutex_lock(&chip->mutex);
1193 continue;
1194 }
1195
1196 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1197 xip_enable(map, chip, adr);
1198 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1199 xip_disable(map, chip, adr);
1200 break;
1201 }
1202
1203 if (chip_ready(map, adr))
1204 break;
1205
1206 /* Latency issues. Drop the lock, wait a while and retry */
1207 UDELAY(map, chip, adr, 1);
1208 }
1209 /* Did we succeed? */
1210 if (!chip_good(map, adr, datum)) {
1211 /* reset on all failures. */
1212 map_write( map, CMD(0xF0), chip->start );
1213 /* FIXME - should have reset delay before continuing */
1214
1215 if (++retry_cnt <= MAX_WORD_RETRIES)
1216 goto retry;
1217
1218 ret = -EIO;
1219 }
1220 xip_enable(map, chip, adr);
1221 op_done:
1222 chip->state = FL_READY;
1223 put_chip(map, chip, adr);
1224 mutex_unlock(&chip->mutex);
1225
1226 return ret;
1227 }
1228
1229
1230 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1231 size_t *retlen, const u_char *buf)
1232 {
1233 struct map_info *map = mtd->priv;
1234 struct cfi_private *cfi = map->fldrv_priv;
1235 int ret = 0;
1236 int chipnum;
1237 unsigned long ofs, chipstart;
1238 DECLARE_WAITQUEUE(wait, current);
1239
1240 *retlen = 0;
1241 if (!len)
1242 return 0;
1243
1244 chipnum = to >> cfi->chipshift;
1245 ofs = to - (chipnum << cfi->chipshift);
1246 chipstart = cfi->chips[chipnum].start;
1247
1248 /* If it's not bus-aligned, do the first byte write */
1249 if (ofs & (map_bankwidth(map)-1)) {
1250 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1251 int i = ofs - bus_ofs;
1252 int n = 0;
1253 map_word tmp_buf;
1254
1255 retry:
1256 mutex_lock(&cfi->chips[chipnum].mutex);
1257
1258 if (cfi->chips[chipnum].state != FL_READY) {
1259 set_current_state(TASK_UNINTERRUPTIBLE);
1260 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1261
1262 mutex_unlock(&cfi->chips[chipnum].mutex);
1263
1264 schedule();
1265 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1266 goto retry;
1267 }
1268
1269 /* Load 'tmp_buf' with old contents of flash */
1270 tmp_buf = map_read(map, bus_ofs+chipstart);
1271
1272 mutex_unlock(&cfi->chips[chipnum].mutex);
1273
1274 /* Number of bytes to copy from buffer */
1275 n = min_t(int, len, map_bankwidth(map)-i);
1276
1277 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1278
1279 ret = do_write_oneword(map, &cfi->chips[chipnum],
1280 bus_ofs, tmp_buf);
1281 if (ret)
1282 return ret;
1283
1284 ofs += n;
1285 buf += n;
1286 (*retlen) += n;
1287 len -= n;
1288
1289 if (ofs >> cfi->chipshift) {
1290 chipnum ++;
1291 ofs = 0;
1292 if (chipnum == cfi->numchips)
1293 return 0;
1294 }
1295 }
1296
1297 /* We are now aligned, write as much as possible */
1298 while(len >= map_bankwidth(map)) {
1299 map_word datum;
1300
1301 datum = map_word_load(map, buf);
1302
1303 ret = do_write_oneword(map, &cfi->chips[chipnum],
1304 ofs, datum);
1305 if (ret)
1306 return ret;
1307
1308 ofs += map_bankwidth(map);
1309 buf += map_bankwidth(map);
1310 (*retlen) += map_bankwidth(map);
1311 len -= map_bankwidth(map);
1312
1313 if (ofs >> cfi->chipshift) {
1314 chipnum ++;
1315 ofs = 0;
1316 if (chipnum == cfi->numchips)
1317 return 0;
1318 chipstart = cfi->chips[chipnum].start;
1319 }
1320 }
1321
1322 /* Write the trailing bytes if any */
1323 if (len & (map_bankwidth(map)-1)) {
1324 map_word tmp_buf;
1325
1326 retry1:
1327 mutex_lock(&cfi->chips[chipnum].mutex);
1328
1329 if (cfi->chips[chipnum].state != FL_READY) {
1330 set_current_state(TASK_UNINTERRUPTIBLE);
1331 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1332
1333 mutex_unlock(&cfi->chips[chipnum].mutex);
1334
1335 schedule();
1336 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1337 goto retry1;
1338 }
1339
1340 tmp_buf = map_read(map, ofs + chipstart);
1341
1342 mutex_unlock(&cfi->chips[chipnum].mutex);
1343
1344 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1345
1346 ret = do_write_oneword(map, &cfi->chips[chipnum],
1347 ofs, tmp_buf);
1348 if (ret)
1349 return ret;
1350
1351 (*retlen) += len;
1352 }
1353
1354 return 0;
1355 }
1356
1357
1358 /*
1359 * FIXME: interleaved mode not tested, and probably not supported!
1360 */
1361 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1362 unsigned long adr, const u_char *buf,
1363 int len)
1364 {
1365 struct cfi_private *cfi = map->fldrv_priv;
1366 unsigned long timeo = jiffies + HZ;
1367 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1368 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1369 int ret = -EIO;
1370 unsigned long cmd_adr;
1371 int z, words;
1372 map_word datum;
1373
1374 adr += chip->start;
1375 cmd_adr = adr;
1376
1377 mutex_lock(&chip->mutex);
1378 ret = get_chip(map, chip, adr, FL_WRITING);
1379 if (ret) {
1380 mutex_unlock(&chip->mutex);
1381 return ret;
1382 }
1383
1384 datum = map_word_load(map, buf);
1385
1386 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1387 __func__, adr, datum.x[0] );
1388
1389 XIP_INVAL_CACHED_RANGE(map, adr, len);
1390 ENABLE_VPP(map);
1391 xip_disable(map, chip, cmd_adr);
1392
1393 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1394 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1395
1396 /* Write Buffer Load */
1397 map_write(map, CMD(0x25), cmd_adr);
1398
1399 chip->state = FL_WRITING_TO_BUFFER;
1400
1401 /* Write length of data to come */
1402 words = len / map_bankwidth(map);
1403 map_write(map, CMD(words - 1), cmd_adr);
1404 /* Write data */
1405 z = 0;
1406 while(z < words * map_bankwidth(map)) {
1407 datum = map_word_load(map, buf);
1408 map_write(map, datum, adr + z);
1409
1410 z += map_bankwidth(map);
1411 buf += map_bankwidth(map);
1412 }
1413 z -= map_bankwidth(map);
1414
1415 adr += z;
1416
1417 /* Write Buffer Program Confirm: GO GO GO */
1418 map_write(map, CMD(0x29), cmd_adr);
1419 chip->state = FL_WRITING;
1420
1421 INVALIDATE_CACHE_UDELAY(map, chip,
1422 adr, map_bankwidth(map),
1423 chip->word_write_time);
1424
1425 timeo = jiffies + uWriteTimeout;
1426
1427 for (;;) {
1428 if (chip->state != FL_WRITING) {
1429 /* Someone's suspended the write. Sleep */
1430 DECLARE_WAITQUEUE(wait, current);
1431
1432 set_current_state(TASK_UNINTERRUPTIBLE);
1433 add_wait_queue(&chip->wq, &wait);
1434 mutex_unlock(&chip->mutex);
1435 schedule();
1436 remove_wait_queue(&chip->wq, &wait);
1437 timeo = jiffies + (HZ / 2); /* FIXME */
1438 mutex_lock(&chip->mutex);
1439 continue;
1440 }
1441
1442 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1443 break;
1444
1445 if (chip_ready(map, adr)) {
1446 xip_enable(map, chip, adr);
1447 goto op_done;
1448 }
1449
1450 /* Latency issues. Drop the lock, wait a while and retry */
1451 UDELAY(map, chip, adr, 1);
1452 }
1453
1454 /* reset on all failures. */
1455 map_write( map, CMD(0xF0), chip->start );
1456 xip_enable(map, chip, adr);
1457 /* FIXME - should have reset delay before continuing */
1458
1459 printk(KERN_WARNING "MTD %s(): software timeout\n",
1460 __func__ );
1461
1462 ret = -EIO;
1463 op_done:
1464 chip->state = FL_READY;
1465 put_chip(map, chip, adr);
1466 mutex_unlock(&chip->mutex);
1467
1468 return ret;
1469 }
1470
1471
1472 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1473 size_t *retlen, const u_char *buf)
1474 {
1475 struct map_info *map = mtd->priv;
1476 struct cfi_private *cfi = map->fldrv_priv;
1477 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1478 int ret = 0;
1479 int chipnum;
1480 unsigned long ofs;
1481
1482 *retlen = 0;
1483 if (!len)
1484 return 0;
1485
1486 chipnum = to >> cfi->chipshift;
1487 ofs = to - (chipnum << cfi->chipshift);
1488
1489 /* If it's not bus-aligned, do the first word write */
1490 if (ofs & (map_bankwidth(map)-1)) {
1491 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1492 if (local_len > len)
1493 local_len = len;
1494 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1495 local_len, retlen, buf);
1496 if (ret)
1497 return ret;
1498 ofs += local_len;
1499 buf += local_len;
1500 len -= local_len;
1501
1502 if (ofs >> cfi->chipshift) {
1503 chipnum ++;
1504 ofs = 0;
1505 if (chipnum == cfi->numchips)
1506 return 0;
1507 }
1508 }
1509
1510 /* Write buffer is worth it only if more than one word to write... */
1511 while (len >= map_bankwidth(map) * 2) {
1512 /* We must not cross write block boundaries */
1513 int size = wbufsize - (ofs & (wbufsize-1));
1514
1515 if (size > len)
1516 size = len;
1517 if (size % map_bankwidth(map))
1518 size -= size % map_bankwidth(map);
1519
1520 ret = do_write_buffer(map, &cfi->chips[chipnum],
1521 ofs, buf, size);
1522 if (ret)
1523 return ret;
1524
1525 ofs += size;
1526 buf += size;
1527 (*retlen) += size;
1528 len -= size;
1529
1530 if (ofs >> cfi->chipshift) {
1531 chipnum ++;
1532 ofs = 0;
1533 if (chipnum == cfi->numchips)
1534 return 0;
1535 }
1536 }
1537
1538 if (len) {
1539 size_t retlen_dregs = 0;
1540
1541 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1542 len, &retlen_dregs, buf);
1543
1544 *retlen += retlen_dregs;
1545 return ret;
1546 }
1547
1548 return 0;
1549 }
1550
1551
1552 /*
1553 * Handle devices with one erase region, that only implement
1554 * the chip erase command.
1555 */
1556 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1557 {
1558 struct cfi_private *cfi = map->fldrv_priv;
1559 unsigned long timeo = jiffies + HZ;
1560 unsigned long int adr;
1561 DECLARE_WAITQUEUE(wait, current);
1562 int ret = 0;
1563
1564 adr = cfi->addr_unlock1;
1565
1566 mutex_lock(&chip->mutex);
1567 ret = get_chip(map, chip, adr, FL_WRITING);
1568 if (ret) {
1569 mutex_unlock(&chip->mutex);
1570 return ret;
1571 }
1572
1573 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1574 __func__, chip->start );
1575
1576 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1577 ENABLE_VPP(map);
1578 xip_disable(map, chip, adr);
1579
1580 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1581 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1582 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1583 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1584 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1585 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1586
1587 chip->state = FL_ERASING;
1588 chip->erase_suspended = 0;
1589 chip->in_progress_block_addr = adr;
1590
1591 INVALIDATE_CACHE_UDELAY(map, chip,
1592 adr, map->size,
1593 chip->erase_time*500);
1594
1595 timeo = jiffies + (HZ*20);
1596
1597 for (;;) {
1598 if (chip->state != FL_ERASING) {
1599 /* Someone's suspended the erase. Sleep */
1600 set_current_state(TASK_UNINTERRUPTIBLE);
1601 add_wait_queue(&chip->wq, &wait);
1602 mutex_unlock(&chip->mutex);
1603 schedule();
1604 remove_wait_queue(&chip->wq, &wait);
1605 mutex_lock(&chip->mutex);
1606 continue;
1607 }
1608 if (chip->erase_suspended) {
1609 /* This erase was suspended and resumed.
1610 Adjust the timeout */
1611 timeo = jiffies + (HZ*20); /* FIXME */
1612 chip->erase_suspended = 0;
1613 }
1614
1615 if (chip_ready(map, adr))
1616 break;
1617
1618 if (time_after(jiffies, timeo)) {
1619 printk(KERN_WARNING "MTD %s(): software timeout\n",
1620 __func__ );
1621 break;
1622 }
1623
1624 /* Latency issues. Drop the lock, wait a while and retry */
1625 UDELAY(map, chip, adr, 1000000/HZ);
1626 }
1627 /* Did we succeed? */
1628 if (!chip_good(map, adr, map_word_ff(map))) {
1629 /* reset on all failures. */
1630 map_write( map, CMD(0xF0), chip->start );
1631 /* FIXME - should have reset delay before continuing */
1632
1633 ret = -EIO;
1634 }
1635
1636 chip->state = FL_READY;
1637 xip_enable(map, chip, adr);
1638 put_chip(map, chip, adr);
1639 mutex_unlock(&chip->mutex);
1640
1641 return ret;
1642 }
1643
1644
1645 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1646 {
1647 struct cfi_private *cfi = map->fldrv_priv;
1648 unsigned long timeo = jiffies + HZ;
1649 DECLARE_WAITQUEUE(wait, current);
1650 int ret = 0;
1651
1652 adr += chip->start;
1653
1654 mutex_lock(&chip->mutex);
1655 ret = get_chip(map, chip, adr, FL_ERASING);
1656 if (ret) {
1657 mutex_unlock(&chip->mutex);
1658 return ret;
1659 }
1660
1661 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1662 __func__, adr );
1663
1664 XIP_INVAL_CACHED_RANGE(map, adr, len);
1665 ENABLE_VPP(map);
1666 xip_disable(map, chip, adr);
1667
1668 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1669 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1670 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1671 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1672 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1673 map_write(map, CMD(0x30), adr);
1674
1675 chip->state = FL_ERASING;
1676 chip->erase_suspended = 0;
1677 chip->in_progress_block_addr = adr;
1678
1679 INVALIDATE_CACHE_UDELAY(map, chip,
1680 adr, len,
1681 chip->erase_time*500);
1682
1683 timeo = jiffies + (HZ*20);
1684
1685 for (;;) {
1686 if (chip->state != FL_ERASING) {
1687 /* Someone's suspended the erase. Sleep */
1688 set_current_state(TASK_UNINTERRUPTIBLE);
1689 add_wait_queue(&chip->wq, &wait);
1690 mutex_unlock(&chip->mutex);
1691 schedule();
1692 remove_wait_queue(&chip->wq, &wait);
1693 mutex_lock(&chip->mutex);
1694 continue;
1695 }
1696 if (chip->erase_suspended) {
1697 /* This erase was suspended and resumed.
1698 Adjust the timeout */
1699 timeo = jiffies + (HZ*20); /* FIXME */
1700 chip->erase_suspended = 0;
1701 }
1702
1703 if (chip_ready(map, adr)) {
1704 xip_enable(map, chip, adr);
1705 break;
1706 }
1707
1708 if (time_after(jiffies, timeo)) {
1709 xip_enable(map, chip, adr);
1710 printk(KERN_WARNING "MTD %s(): software timeout\n",
1711 __func__ );
1712 break;
1713 }
1714
1715 /* Latency issues. Drop the lock, wait a while and retry */
1716 UDELAY(map, chip, adr, 1000000/HZ);
1717 }
1718 /* Did we succeed? */
1719 if (!chip_good(map, adr, map_word_ff(map))) {
1720 /* reset on all failures. */
1721 map_write( map, CMD(0xF0), chip->start );
1722 /* FIXME - should have reset delay before continuing */
1723
1724 ret = -EIO;
1725 }
1726
1727 chip->state = FL_READY;
1728 put_chip(map, chip, adr);
1729 mutex_unlock(&chip->mutex);
1730 return ret;
1731 }
1732
1733
1734 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1735 {
1736 unsigned long ofs, len;
1737 int ret;
1738
1739 ofs = instr->addr;
1740 len = instr->len;
1741
1742 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1743 if (ret)
1744 return ret;
1745
1746 instr->state = MTD_ERASE_DONE;
1747 mtd_erase_callback(instr);
1748
1749 return 0;
1750 }
1751
1752
1753 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1754 {
1755 struct map_info *map = mtd->priv;
1756 struct cfi_private *cfi = map->fldrv_priv;
1757 int ret = 0;
1758
1759 if (instr->addr != 0)
1760 return -EINVAL;
1761
1762 if (instr->len != mtd->size)
1763 return -EINVAL;
1764
1765 ret = do_erase_chip(map, &cfi->chips[0]);
1766 if (ret)
1767 return ret;
1768
1769 instr->state = MTD_ERASE_DONE;
1770 mtd_erase_callback(instr);
1771
1772 return 0;
1773 }
1774
1775 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1776 unsigned long adr, int len, void *thunk)
1777 {
1778 struct cfi_private *cfi = map->fldrv_priv;
1779 int ret;
1780
1781 mutex_lock(&chip->mutex);
1782 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1783 if (ret)
1784 goto out_unlock;
1785 chip->state = FL_LOCKING;
1786
1787 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1788 __func__, adr, len);
1789
1790 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1791 cfi->device_type, NULL);
1792 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1793 cfi->device_type, NULL);
1794 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1795 cfi->device_type, NULL);
1796 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1797 cfi->device_type, NULL);
1798 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1799 cfi->device_type, NULL);
1800 map_write(map, CMD(0x40), chip->start + adr);
1801
1802 chip->state = FL_READY;
1803 put_chip(map, chip, adr + chip->start);
1804 ret = 0;
1805
1806 out_unlock:
1807 mutex_unlock(&chip->mutex);
1808 return ret;
1809 }
1810
1811 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1812 unsigned long adr, int len, void *thunk)
1813 {
1814 struct cfi_private *cfi = map->fldrv_priv;
1815 int ret;
1816
1817 mutex_lock(&chip->mutex);
1818 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1819 if (ret)
1820 goto out_unlock;
1821 chip->state = FL_UNLOCKING;
1822
1823 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1824 __func__, adr, len);
1825
1826 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1827 cfi->device_type, NULL);
1828 map_write(map, CMD(0x70), adr);
1829
1830 chip->state = FL_READY;
1831 put_chip(map, chip, adr + chip->start);
1832 ret = 0;
1833
1834 out_unlock:
1835 mutex_unlock(&chip->mutex);
1836 return ret;
1837 }
1838
1839 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1840 {
1841 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1842 }
1843
1844 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1845 {
1846 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1847 }
1848
1849
1850 static void cfi_amdstd_sync (struct mtd_info *mtd)
1851 {
1852 struct map_info *map = mtd->priv;
1853 struct cfi_private *cfi = map->fldrv_priv;
1854 int i;
1855 struct flchip *chip;
1856 int ret = 0;
1857 DECLARE_WAITQUEUE(wait, current);
1858
1859 for (i=0; !ret && i<cfi->numchips; i++) {
1860 chip = &cfi->chips[i];
1861
1862 retry:
1863 mutex_lock(&chip->mutex);
1864
1865 switch(chip->state) {
1866 case FL_READY:
1867 case FL_STATUS:
1868 case FL_CFI_QUERY:
1869 case FL_JEDEC_QUERY:
1870 chip->oldstate = chip->state;
1871 chip->state = FL_SYNCING;
1872 /* No need to wake_up() on this state change -
1873 * as the whole point is that nobody can do anything
1874 * with the chip now anyway.
1875 */
1876 case FL_SYNCING:
1877 mutex_unlock(&chip->mutex);
1878 break;
1879
1880 default:
1881 /* Not an idle state */
1882 set_current_state(TASK_UNINTERRUPTIBLE);
1883 add_wait_queue(&chip->wq, &wait);
1884
1885 mutex_unlock(&chip->mutex);
1886
1887 schedule();
1888
1889 remove_wait_queue(&chip->wq, &wait);
1890
1891 goto retry;
1892 }
1893 }
1894
1895 /* Unlock the chips again */
1896
1897 for (i--; i >=0; i--) {
1898 chip = &cfi->chips[i];
1899
1900 mutex_lock(&chip->mutex);
1901
1902 if (chip->state == FL_SYNCING) {
1903 chip->state = chip->oldstate;
1904 wake_up(&chip->wq);
1905 }
1906 mutex_unlock(&chip->mutex);
1907 }
1908 }
1909
1910
1911 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1912 {
1913 struct map_info *map = mtd->priv;
1914 struct cfi_private *cfi = map->fldrv_priv;
1915 int i;
1916 struct flchip *chip;
1917 int ret = 0;
1918
1919 for (i=0; !ret && i<cfi->numchips; i++) {
1920 chip = &cfi->chips[i];
1921
1922 mutex_lock(&chip->mutex);
1923
1924 switch(chip->state) {
1925 case FL_READY:
1926 case FL_STATUS:
1927 case FL_CFI_QUERY:
1928 case FL_JEDEC_QUERY:
1929 chip->oldstate = chip->state;
1930 chip->state = FL_PM_SUSPENDED;
1931 /* No need to wake_up() on this state change -
1932 * as the whole point is that nobody can do anything
1933 * with the chip now anyway.
1934 */
1935 case FL_PM_SUSPENDED:
1936 break;
1937
1938 default:
1939 ret = -EAGAIN;
1940 break;
1941 }
1942 mutex_unlock(&chip->mutex);
1943 }
1944
1945 /* Unlock the chips again */
1946
1947 if (ret) {
1948 for (i--; i >=0; i--) {
1949 chip = &cfi->chips[i];
1950
1951 mutex_lock(&chip->mutex);
1952
1953 if (chip->state == FL_PM_SUSPENDED) {
1954 chip->state = chip->oldstate;
1955 wake_up(&chip->wq);
1956 }
1957 mutex_unlock(&chip->mutex);
1958 }
1959 }
1960
1961 return ret;
1962 }
1963
1964
1965 static void cfi_amdstd_resume(struct mtd_info *mtd)
1966 {
1967 struct map_info *map = mtd->priv;
1968 struct cfi_private *cfi = map->fldrv_priv;
1969 int i;
1970 struct flchip *chip;
1971
1972 for (i=0; i<cfi->numchips; i++) {
1973
1974 chip = &cfi->chips[i];
1975
1976 mutex_lock(&chip->mutex);
1977
1978 if (chip->state == FL_PM_SUSPENDED) {
1979 chip->state = FL_READY;
1980 map_write(map, CMD(0xF0), chip->start);
1981 wake_up(&chip->wq);
1982 }
1983 else
1984 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1985
1986 mutex_unlock(&chip->mutex);
1987 }
1988 }
1989
1990
1991 /*
1992 * Ensure that the flash device is put back into read array mode before
1993 * unloading the driver or rebooting. On some systems, rebooting while
1994 * the flash is in query/program/erase mode will prevent the CPU from
1995 * fetching the bootloader code, requiring a hard reset or power cycle.
1996 */
1997 static int cfi_amdstd_reset(struct mtd_info *mtd)
1998 {
1999 struct map_info *map = mtd->priv;
2000 struct cfi_private *cfi = map->fldrv_priv;
2001 int i, ret;
2002 struct flchip *chip;
2003
2004 for (i = 0; i < cfi->numchips; i++) {
2005
2006 chip = &cfi->chips[i];
2007
2008 mutex_lock(&chip->mutex);
2009
2010 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2011 if (!ret) {
2012 map_write(map, CMD(0xF0), chip->start);
2013 chip->state = FL_SHUTDOWN;
2014 put_chip(map, chip, chip->start);
2015 }
2016
2017 mutex_unlock(&chip->mutex);
2018 }
2019
2020 return 0;
2021 }
2022
2023
2024 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2025 void *v)
2026 {
2027 struct mtd_info *mtd;
2028
2029 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2030 cfi_amdstd_reset(mtd);
2031 return NOTIFY_DONE;
2032 }
2033
2034
2035 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2036 {
2037 struct map_info *map = mtd->priv;
2038 struct cfi_private *cfi = map->fldrv_priv;
2039
2040 cfi_amdstd_reset(mtd);
2041 unregister_reboot_notifier(&mtd->reboot_notifier);
2042 kfree(cfi->cmdset_priv);
2043 kfree(cfi->cfiq);
2044 kfree(cfi);
2045 kfree(mtd->eraseregions);
2046 }
2047
2048 MODULE_LICENSE("GPL");
2049 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2050 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2051 MODULE_ALIAS("cfi_cmdset_0006");
2052 MODULE_ALIAS("cfi_cmdset_0701");
This page took 0.075469 seconds and 5 git commands to generate.