[MTD] cfi_cmdset_0001: Fix compiler warnings
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
50da7f60 7 * $Id: cfi_cmdset_0001.c,v 1.171 2005/03/19 22:39:49 gleixner Exp $
1da177e4
LT
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
51static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
52static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
54static void cfi_intelext_sync (struct mtd_info *);
55static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
56static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
f77814dd
NP
57static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
61static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
62 struct otp_info *, size_t);
63static int cfi_intelext_get_user_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
1da177e4
LT
65static int cfi_intelext_suspend (struct mtd_info *);
66static void cfi_intelext_resume (struct mtd_info *);
67
68static void cfi_intelext_destroy(struct mtd_info *);
69
70struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
71
72static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
73static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
74
75static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
76 size_t *retlen, u_char **mtdbuf);
77static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
78 size_t len);
79
80static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
81static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
82#include "fwh_lock.h"
83
84
85
86/*
87 * *********** SETUP AND PROBE BITS ***********
88 */
89
90static struct mtd_chip_driver cfi_intelext_chipdrv = {
91 .probe = NULL, /* Not usable directly */
92 .destroy = cfi_intelext_destroy,
93 .name = "cfi_cmdset_0001",
94 .module = THIS_MODULE
95};
96
97/* #define DEBUG_LOCK_BITS */
98/* #define DEBUG_CFI_FEATURES */
99
100#ifdef DEBUG_CFI_FEATURES
101static void cfi_tell_features(struct cfi_pri_intelext *extp)
102{
103 int i;
104 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
105 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
106 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
107 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
108 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
109 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
110 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
111 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
112 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
113 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
114 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
115 for (i=10; i<32; i++) {
116 if (extp->FeatureSupport & (1<<i))
117 printk(" - Unknown Bit %X: supported\n", i);
118 }
119
120 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
121 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
122 for (i=1; i<8; i++) {
123 if (extp->SuspendCmdSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
126
127 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
128 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
129 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
130 for (i=2; i<16; i++) {
131 if (extp->BlkStatusRegMask & (1<<i))
132 printk(" - Unknown Bit %X Active: yes\n",i);
133 }
134
135 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
136 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
137 if (extp->VppOptimal)
138 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
139 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
140}
141#endif
142
143#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
144/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
145static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
146{
147 struct map_info *map = mtd->priv;
148 struct cfi_private *cfi = map->fldrv_priv;
149 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150
151 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
152 "erase on write disabled.\n");
153 extp->SuspendCmdSupport &= ~1;
154}
155#endif
156
157#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
158static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
163
164 if (cfip && (cfip->FeatureSupport&4)) {
165 cfip->FeatureSupport &= ~4;
166 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
167 }
168}
169#endif
170
171static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175
176 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
177 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
178}
179
180static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
181{
182 struct map_info *map = mtd->priv;
183 struct cfi_private *cfi = map->fldrv_priv;
184
185 /* Note this is done after the region info is endian swapped */
186 cfi->cfiq->EraseRegionInfo[1] =
187 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
188};
189
190static void fixup_use_point(struct mtd_info *mtd, void *param)
191{
192 struct map_info *map = mtd->priv;
193 if (!mtd->point && map_is_linear(map)) {
194 mtd->point = cfi_intelext_point;
195 mtd->unpoint = cfi_intelext_unpoint;
196 }
197}
198
199static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
200{
201 struct map_info *map = mtd->priv;
202 struct cfi_private *cfi = map->fldrv_priv;
203 if (cfi->cfiq->BufWriteTimeoutTyp) {
204 printk(KERN_INFO "Using buffer write method\n" );
205 mtd->write = cfi_intelext_write_buffers;
206 }
207}
208
209static struct cfi_fixup cfi_fixup_table[] = {
210#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
212#endif
213#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
214 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
215#endif
216#if !FORCE_WORD_WRITE
217 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
218#endif
219 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
220 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
221 { 0, 0, NULL, NULL }
222};
223
224static struct cfi_fixup jedec_fixup_table[] = {
225 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
226 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
227 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
228 { 0, 0, NULL, NULL }
229};
230static struct cfi_fixup fixup_table[] = {
231 /* The CFI vendor ids and the JEDEC vendor IDs appear
232 * to be common. It is like the devices id's are as
233 * well. This table is to pick all cases where
234 * we know that is the case.
235 */
236 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
237 { 0, 0, NULL, NULL }
238};
239
240static inline struct cfi_pri_intelext *
241read_pri_intelext(struct map_info *map, __u16 adr)
242{
243 struct cfi_pri_intelext *extp;
244 unsigned int extp_size = sizeof(*extp);
245
246 again:
247 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
248 if (!extp)
249 return NULL;
250
251 /* Do some byteswapping if necessary */
252 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
253 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
254 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
255
256 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
257 unsigned int extra_size = 0;
258 int nb_parts, i;
259
260 /* Protection Register info */
72b56a2d
NP
261 extra_size += (extp->NumProtectionFields - 1) *
262 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
263
264 /* Burst Read info */
265 extra_size += 6;
266
267 /* Number of hardware-partitions */
268 extra_size += 1;
269 if (extp_size < sizeof(*extp) + extra_size)
270 goto need_more;
271 nb_parts = extp->extra[extra_size - 1];
272
273 for (i = 0; i < nb_parts; i++) {
274 struct cfi_intelext_regioninfo *rinfo;
275 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
276 extra_size += sizeof(*rinfo);
277 if (extp_size < sizeof(*extp) + extra_size)
278 goto need_more;
279 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
280 extra_size += (rinfo->NumBlockTypes - 1)
281 * sizeof(struct cfi_intelext_blockinfo);
282 }
283
284 if (extp_size < sizeof(*extp) + extra_size) {
285 need_more:
286 extp_size = sizeof(*extp) + extra_size;
287 kfree(extp);
288 if (extp_size > 4096) {
289 printk(KERN_ERR
290 "%s: cfi_pri_intelext is too fat\n",
291 __FUNCTION__);
292 return NULL;
293 }
294 goto again;
295 }
296 }
297
298 return extp;
299}
300
301/* This routine is made available to other mtd code via
302 * inter_module_register. It must only be accessed through
303 * inter_module_get which will bump the use count of this module. The
304 * addresses passed back in cfi are valid as long as the use count of
305 * this module is non-zero, i.e. between inter_module_get and
306 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
307 */
308struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
309{
310 struct cfi_private *cfi = map->fldrv_priv;
311 struct mtd_info *mtd;
312 int i;
313
314 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
315 if (!mtd) {
316 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
317 return NULL;
318 }
319 memset(mtd, 0, sizeof(*mtd));
320 mtd->priv = map;
321 mtd->type = MTD_NORFLASH;
322
323 /* Fill in the default mtd operations */
324 mtd->erase = cfi_intelext_erase_varsize;
325 mtd->read = cfi_intelext_read;
326 mtd->write = cfi_intelext_write_words;
327 mtd->sync = cfi_intelext_sync;
328 mtd->lock = cfi_intelext_lock;
329 mtd->unlock = cfi_intelext_unlock;
330 mtd->suspend = cfi_intelext_suspend;
331 mtd->resume = cfi_intelext_resume;
332 mtd->flags = MTD_CAP_NORFLASH;
333 mtd->name = map->name;
334
335 if (cfi->cfi_mode == CFI_MODE_CFI) {
336 /*
337 * It's a real CFI chip, not one for which the probe
338 * routine faked a CFI structure. So we read the feature
339 * table from it.
340 */
341 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
342 struct cfi_pri_intelext *extp;
343
344 extp = read_pri_intelext(map, adr);
345 if (!extp) {
346 kfree(mtd);
347 return NULL;
348 }
349
350 /* Install our own private info structure */
351 cfi->cmdset_priv = extp;
352
353 cfi_fixup(mtd, cfi_fixup_table);
354
355#ifdef DEBUG_CFI_FEATURES
356 /* Tell the user about it in lots of lovely detail */
357 cfi_tell_features(extp);
358#endif
359
360 if(extp->SuspendCmdSupport & 1) {
361 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
362 }
363 }
364 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
365 /* Apply jedec specific fixups */
366 cfi_fixup(mtd, jedec_fixup_table);
367 }
368 /* Apply generic fixups */
369 cfi_fixup(mtd, fixup_table);
370
371 for (i=0; i< cfi->numchips; i++) {
372 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
373 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
374 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
375 cfi->chips[i].ref_point_counter = 0;
376 }
377
378 map->fldrv = &cfi_intelext_chipdrv;
379
380 return cfi_intelext_setup(mtd);
381}
382
383static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
384{
385 struct map_info *map = mtd->priv;
386 struct cfi_private *cfi = map->fldrv_priv;
387 unsigned long offset = 0;
388 int i,j;
389 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
390
391 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
392
393 mtd->size = devsize * cfi->numchips;
394
395 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
396 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
397 * mtd->numeraseregions, GFP_KERNEL);
398 if (!mtd->eraseregions) {
399 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
400 goto setup_err;
401 }
402
403 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
404 unsigned long ernum, ersize;
405 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
406 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
407
408 if (mtd->erasesize < ersize) {
409 mtd->erasesize = ersize;
410 }
411 for (j=0; j<cfi->numchips; j++) {
412 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
413 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
414 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
415 }
416 offset += (ersize * ernum);
417 }
418
419 if (offset != devsize) {
420 /* Argh */
421 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
422 goto setup_err;
423 }
424
425 for (i=0; i<mtd->numeraseregions;i++){
426 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
427 i,mtd->eraseregions[i].offset,
428 mtd->eraseregions[i].erasesize,
429 mtd->eraseregions[i].numblocks);
430 }
431
f77814dd 432#ifdef CONFIG_MTD_OTP
1da177e4 433 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
434 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
435 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
436 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
437 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
438 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
439#endif
440
441 /* This function has the potential to distort the reality
442 a bit and therefore should be called last. */
443 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
444 goto setup_err;
445
446 __module_get(THIS_MODULE);
447 return mtd;
448
449 setup_err:
450 if(mtd) {
451 if(mtd->eraseregions)
452 kfree(mtd->eraseregions);
453 kfree(mtd);
454 }
455 kfree(cfi->cmdset_priv);
456 return NULL;
457}
458
459static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
460 struct cfi_private **pcfi)
461{
462 struct map_info *map = mtd->priv;
463 struct cfi_private *cfi = *pcfi;
464 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
465
466 /*
467 * Probing of multi-partition flash ships.
468 *
469 * To support multiple partitions when available, we simply arrange
470 * for each of them to have their own flchip structure even if they
471 * are on the same physical chip. This means completely recreating
472 * a new cfi_private structure right here which is a blatent code
473 * layering violation, but this is still the least intrusive
474 * arrangement at this point. This can be rearranged in the future
475 * if someone feels motivated enough. --nico
476 */
477 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
478 && extp->FeatureSupport & (1 << 9)) {
479 struct cfi_private *newcfi;
480 struct flchip *chip;
481 struct flchip_shared *shared;
482 int offs, numregions, numparts, partshift, numvirtchips, i, j;
483
484 /* Protection Register info */
72b56a2d
NP
485 offs = (extp->NumProtectionFields - 1) *
486 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
487
488 /* Burst Read info */
489 offs += 6;
490
491 /* Number of partition regions */
492 numregions = extp->extra[offs];
493 offs += 1;
494
495 /* Number of hardware partitions */
496 numparts = 0;
497 for (i = 0; i < numregions; i++) {
498 struct cfi_intelext_regioninfo *rinfo;
499 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
500 numparts += rinfo->NumIdentPartitions;
501 offs += sizeof(*rinfo)
502 + (rinfo->NumBlockTypes - 1) *
503 sizeof(struct cfi_intelext_blockinfo);
504 }
505
506 /*
507 * All functions below currently rely on all chips having
508 * the same geometry so we'll just assume that all hardware
509 * partitions are of the same size too.
510 */
511 partshift = cfi->chipshift - __ffs(numparts);
512
513 if ((1 << partshift) < mtd->erasesize) {
514 printk( KERN_ERR
515 "%s: bad number of hw partitions (%d)\n",
516 __FUNCTION__, numparts);
517 return -EINVAL;
518 }
519
520 numvirtchips = cfi->numchips * numparts;
521 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
522 if (!newcfi)
523 return -ENOMEM;
524 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
525 if (!shared) {
526 kfree(newcfi);
527 return -ENOMEM;
528 }
529 memcpy(newcfi, cfi, sizeof(struct cfi_private));
530 newcfi->numchips = numvirtchips;
531 newcfi->chipshift = partshift;
532
533 chip = &newcfi->chips[0];
534 for (i = 0; i < cfi->numchips; i++) {
535 shared[i].writing = shared[i].erasing = NULL;
536 spin_lock_init(&shared[i].lock);
537 for (j = 0; j < numparts; j++) {
538 *chip = cfi->chips[i];
539 chip->start += j << partshift;
540 chip->priv = &shared[i];
541 /* those should be reset too since
542 they create memory references. */
543 init_waitqueue_head(&chip->wq);
544 spin_lock_init(&chip->_spinlock);
545 chip->mutex = &chip->_spinlock;
546 chip++;
547 }
548 }
549
550 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
551 "--> %d partitions of %d KiB\n",
552 map->name, cfi->numchips, cfi->interleave,
553 newcfi->numchips, 1<<(newcfi->chipshift-10));
554
555 map->fldrv_priv = newcfi;
556 *pcfi = newcfi;
557 kfree(cfi);
558 }
559
560 return 0;
561}
562
563/*
564 * *********** CHIP ACCESS FUNCTIONS ***********
565 */
566
567static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
568{
569 DECLARE_WAITQUEUE(wait, current);
570 struct cfi_private *cfi = map->fldrv_priv;
571 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
572 unsigned long timeo;
573 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
574
575 resettime:
576 timeo = jiffies + HZ;
577 retry:
f77814dd 578 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
579 /*
580 * OK. We have possibility for contension on the write/erase
581 * operations which are global to the real chip and not per
582 * partition. So let's fight it over in the partition which
583 * currently has authority on the operation.
584 *
585 * The rules are as follows:
586 *
587 * - any write operation must own shared->writing.
588 *
589 * - any erase operation must own _both_ shared->writing and
590 * shared->erasing.
591 *
592 * - contension arbitration is handled in the owner's context.
593 *
594 * The 'shared' struct can be read when its lock is taken.
595 * However any writes to it can only be made when the current
596 * owner's lock is also held.
597 */
598 struct flchip_shared *shared = chip->priv;
599 struct flchip *contender;
600 spin_lock(&shared->lock);
601 contender = shared->writing;
602 if (contender && contender != chip) {
603 /*
604 * The engine to perform desired operation on this
605 * partition is already in use by someone else.
606 * Let's fight over it in the context of the chip
607 * currently using it. If it is possible to suspend,
608 * that other partition will do just that, otherwise
609 * it'll happily send us to sleep. In any case, when
610 * get_chip returns success we're clear to go ahead.
611 */
612 int ret = spin_trylock(contender->mutex);
613 spin_unlock(&shared->lock);
614 if (!ret)
615 goto retry;
616 spin_unlock(chip->mutex);
617 ret = get_chip(map, contender, contender->start, mode);
618 spin_lock(chip->mutex);
619 if (ret) {
620 spin_unlock(contender->mutex);
621 return ret;
622 }
623 timeo = jiffies + HZ;
624 spin_lock(&shared->lock);
625 }
626
627 /* We now own it */
628 shared->writing = chip;
629 if (mode == FL_ERASING)
630 shared->erasing = chip;
631 if (contender && contender != chip)
632 spin_unlock(contender->mutex);
633 spin_unlock(&shared->lock);
634 }
635
636 switch (chip->state) {
637
638 case FL_STATUS:
639 for (;;) {
640 status = map_read(map, adr);
641 if (map_word_andequal(map, status, status_OK, status_OK))
642 break;
643
644 /* At this point we're fine with write operations
645 in other partitions as they don't conflict. */
646 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
647 break;
648
649 if (time_after(jiffies, timeo)) {
650 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
651 status.x[0]);
652 return -EIO;
653 }
654 spin_unlock(chip->mutex);
655 cfi_udelay(1);
656 spin_lock(chip->mutex);
657 /* Someone else might have been playing with it. */
658 goto retry;
659 }
660
661 case FL_READY:
662 case FL_CFI_QUERY:
663 case FL_JEDEC_QUERY:
664 return 0;
665
666 case FL_ERASING:
667 if (!cfip ||
668 !(cfip->FeatureSupport & 2) ||
669 !(mode == FL_READY || mode == FL_POINT ||
670 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
671 goto sleep;
672
673
674 /* Erase suspend */
675 map_write(map, CMD(0xB0), adr);
676
677 /* If the flash has finished erasing, then 'erase suspend'
678 * appears to make some (28F320) flash devices switch to
679 * 'read' mode. Make sure that we switch to 'read status'
680 * mode so we get the right data. --rmk
681 */
682 map_write(map, CMD(0x70), adr);
683 chip->oldstate = FL_ERASING;
684 chip->state = FL_ERASE_SUSPENDING;
685 chip->erase_suspended = 1;
686 for (;;) {
687 status = map_read(map, adr);
688 if (map_word_andequal(map, status, status_OK, status_OK))
689 break;
690
691 if (time_after(jiffies, timeo)) {
692 /* Urgh. Resume and pretend we weren't here. */
693 map_write(map, CMD(0xd0), adr);
694 /* Make sure we're in 'read status' mode if it had finished */
695 map_write(map, CMD(0x70), adr);
696 chip->state = FL_ERASING;
697 chip->oldstate = FL_READY;
698 printk(KERN_ERR "Chip not ready after erase "
699 "suspended: status = 0x%lx\n", status.x[0]);
700 return -EIO;
701 }
702
703 spin_unlock(chip->mutex);
704 cfi_udelay(1);
705 spin_lock(chip->mutex);
706 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
707 So we can just loop here. */
708 }
709 chip->state = FL_STATUS;
710 return 0;
711
712 case FL_XIP_WHILE_ERASING:
713 if (mode != FL_READY && mode != FL_POINT &&
714 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
715 goto sleep;
716 chip->oldstate = chip->state;
717 chip->state = FL_READY;
718 return 0;
719
720 case FL_POINT:
721 /* Only if there's no operation suspended... */
722 if (mode == FL_READY && chip->oldstate == FL_READY)
723 return 0;
724
725 default:
726 sleep:
727 set_current_state(TASK_UNINTERRUPTIBLE);
728 add_wait_queue(&chip->wq, &wait);
729 spin_unlock(chip->mutex);
730 schedule();
731 remove_wait_queue(&chip->wq, &wait);
732 spin_lock(chip->mutex);
733 goto resettime;
734 }
735}
736
737static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
738{
739 struct cfi_private *cfi = map->fldrv_priv;
740
741 if (chip->priv) {
742 struct flchip_shared *shared = chip->priv;
743 spin_lock(&shared->lock);
744 if (shared->writing == chip && chip->oldstate == FL_READY) {
745 /* We own the ability to write, but we're done */
746 shared->writing = shared->erasing;
747 if (shared->writing && shared->writing != chip) {
748 /* give back ownership to who we loaned it from */
749 struct flchip *loaner = shared->writing;
750 spin_lock(loaner->mutex);
751 spin_unlock(&shared->lock);
752 spin_unlock(chip->mutex);
753 put_chip(map, loaner, loaner->start);
754 spin_lock(chip->mutex);
755 spin_unlock(loaner->mutex);
756 wake_up(&chip->wq);
757 return;
758 }
759 shared->erasing = NULL;
760 shared->writing = NULL;
761 } else if (shared->erasing == chip && shared->writing != chip) {
762 /*
763 * We own the ability to erase without the ability
764 * to write, which means the erase was suspended
765 * and some other partition is currently writing.
766 * Don't let the switch below mess things up since
767 * we don't have ownership to resume anything.
768 */
769 spin_unlock(&shared->lock);
770 wake_up(&chip->wq);
771 return;
772 }
773 spin_unlock(&shared->lock);
774 }
775
776 switch(chip->oldstate) {
777 case FL_ERASING:
778 chip->state = chip->oldstate;
779 /* What if one interleaved chip has finished and the
780 other hasn't? The old code would leave the finished
781 one in READY mode. That's bad, and caused -EROFS
782 errors to be returned from do_erase_oneblock because
783 that's the only bit it checked for at the time.
784 As the state machine appears to explicitly allow
785 sending the 0x70 (Read Status) command to an erasing
786 chip and expecting it to be ignored, that's what we
787 do. */
788 map_write(map, CMD(0xd0), adr);
789 map_write(map, CMD(0x70), adr);
790 chip->oldstate = FL_READY;
791 chip->state = FL_ERASING;
792 break;
793
794 case FL_XIP_WHILE_ERASING:
795 chip->state = chip->oldstate;
796 chip->oldstate = FL_READY;
797 break;
798
799 case FL_READY:
800 case FL_STATUS:
801 case FL_JEDEC_QUERY:
802 /* We should really make set_vpp() count, rather than doing this */
803 DISABLE_VPP(map);
804 break;
805 default:
806 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
807 }
808 wake_up(&chip->wq);
809}
810
811#ifdef CONFIG_MTD_XIP
812
813/*
814 * No interrupt what so ever can be serviced while the flash isn't in array
815 * mode. This is ensured by the xip_disable() and xip_enable() functions
816 * enclosing any code path where the flash is known not to be in array mode.
817 * And within a XIP disabled code path, only functions marked with __xipram
818 * may be called and nothing else (it's a good thing to inspect generated
819 * assembly to make sure inline functions were actually inlined and that gcc
820 * didn't emit calls to its own support functions). Also configuring MTD CFI
821 * support to a single buswidth and a single interleave is also recommended.
822 * Note that not only IRQs are disabled but the preemption count is also
823 * increased to prevent other locking primitives (namely spin_unlock) from
824 * decrementing the preempt count to zero and scheduling the CPU away while
825 * not in array mode.
826 */
827
828static void xip_disable(struct map_info *map, struct flchip *chip,
829 unsigned long adr)
830{
831 /* TODO: chips with no XIP use should ignore and return */
832 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
833 preempt_disable();
834 local_irq_disable();
835}
836
837static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
838 unsigned long adr)
839{
840 struct cfi_private *cfi = map->fldrv_priv;
841 if (chip->state != FL_POINT && chip->state != FL_READY) {
842 map_write(map, CMD(0xff), adr);
843 chip->state = FL_READY;
844 }
845 (void) map_read(map, adr);
846 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
847 local_irq_enable();
848 preempt_enable();
849}
850
851/*
852 * When a delay is required for the flash operation to complete, the
853 * xip_udelay() function is polling for both the given timeout and pending
854 * (but still masked) hardware interrupts. Whenever there is an interrupt
855 * pending then the flash erase or write operation is suspended, array mode
856 * restored and interrupts unmasked. Task scheduling might also happen at that
857 * point. The CPU eventually returns from the interrupt or the call to
858 * schedule() and the suspended flash operation is resumed for the remaining
859 * of the delay period.
860 *
861 * Warning: this function _will_ fool interrupt latency tracing tools.
862 */
863
864static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
865 unsigned long adr, int usec)
866{
867 struct cfi_private *cfi = map->fldrv_priv;
868 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
869 map_word status, OK = CMD(0x80);
870 unsigned long suspended, start = xip_currtime();
871 flstate_t oldstate, newstate;
872
873 do {
874 cpu_relax();
875 if (xip_irqpending() && cfip &&
876 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
877 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
878 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
879 /*
880 * Let's suspend the erase or write operation when
881 * supported. Note that we currently don't try to
882 * suspend interleaved chips if there is already
883 * another operation suspended (imagine what happens
884 * when one chip was already done with the current
885 * operation while another chip suspended it, then
886 * we resume the whole thing at once). Yes, it
887 * can happen!
888 */
889 map_write(map, CMD(0xb0), adr);
890 map_write(map, CMD(0x70), adr);
891 usec -= xip_elapsed_since(start);
892 suspended = xip_currtime();
893 do {
894 if (xip_elapsed_since(suspended) > 100000) {
895 /*
896 * The chip doesn't want to suspend
897 * after waiting for 100 msecs.
898 * This is a critical error but there
899 * is not much we can do here.
900 */
901 return;
902 }
903 status = map_read(map, adr);
904 } while (!map_word_andequal(map, status, OK, OK));
905
906 /* Suspend succeeded */
907 oldstate = chip->state;
908 if (oldstate == FL_ERASING) {
909 if (!map_word_bitsset(map, status, CMD(0x40)))
910 break;
911 newstate = FL_XIP_WHILE_ERASING;
912 chip->erase_suspended = 1;
913 } else {
914 if (!map_word_bitsset(map, status, CMD(0x04)))
915 break;
916 newstate = FL_XIP_WHILE_WRITING;
917 chip->write_suspended = 1;
918 }
919 chip->state = newstate;
920 map_write(map, CMD(0xff), adr);
921 (void) map_read(map, adr);
922 asm volatile (".rep 8; nop; .endr");
923 local_irq_enable();
924 preempt_enable();
925 asm volatile (".rep 8; nop; .endr");
926 cond_resched();
927
928 /*
929 * We're back. However someone else might have
930 * decided to go write to the chip if we are in
931 * a suspended erase state. If so let's wait
932 * until it's done.
933 */
934 preempt_disable();
935 while (chip->state != newstate) {
936 DECLARE_WAITQUEUE(wait, current);
937 set_current_state(TASK_UNINTERRUPTIBLE);
938 add_wait_queue(&chip->wq, &wait);
939 preempt_enable();
940 schedule();
941 remove_wait_queue(&chip->wq, &wait);
942 preempt_disable();
943 }
944 /* Disallow XIP again */
945 local_irq_disable();
946
947 /* Resume the write or erase operation */
948 map_write(map, CMD(0xd0), adr);
949 map_write(map, CMD(0x70), adr);
950 chip->state = oldstate;
951 start = xip_currtime();
952 } else if (usec >= 1000000/HZ) {
953 /*
954 * Try to save on CPU power when waiting delay
955 * is at least a system timer tick period.
956 * No need to be extremely accurate here.
957 */
958 xip_cpu_idle();
959 }
960 status = map_read(map, adr);
961 } while (!map_word_andequal(map, status, OK, OK)
962 && xip_elapsed_since(start) < usec);
963}
964
965#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
966
967/*
968 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
969 * the flash is actively programming or erasing since we have to poll for
970 * the operation to complete anyway. We can't do that in a generic way with
971 * a XIP setup so do it before the actual flash operation in this case.
972 */
973#undef INVALIDATE_CACHED_RANGE
974#define INVALIDATE_CACHED_RANGE(x...)
975#define XIP_INVAL_CACHED_RANGE(map, from, size) \
976 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
977
978/*
979 * Extra notes:
980 *
981 * Activating this XIP support changes the way the code works a bit. For
982 * example the code to suspend the current process when concurrent access
983 * happens is never executed because xip_udelay() will always return with the
984 * same chip state as it was entered with. This is why there is no care for
985 * the presence of add_wait_queue() or schedule() calls from within a couple
986 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
987 * The queueing and scheduling are always happening within xip_udelay().
988 *
989 * Similarly, get_chip() and put_chip() just happen to always be executed
990 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
991 * is in array mode, therefore never executing many cases therein and not
992 * causing any problem with XIP.
993 */
994
995#else
996
997#define xip_disable(map, chip, adr)
998#define xip_enable(map, chip, adr)
999
1000#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
1001
1002#define XIP_INVAL_CACHED_RANGE(x...)
1003
1004#endif
1005
1006static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1007{
1008 unsigned long cmd_addr;
1009 struct cfi_private *cfi = map->fldrv_priv;
1010 int ret = 0;
1011
1012 adr += chip->start;
1013
1014 /* Ensure cmd read/writes are aligned. */
1015 cmd_addr = adr & ~(map_bankwidth(map)-1);
1016
1017 spin_lock(chip->mutex);
1018
1019 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1020
1021 if (!ret) {
1022 if (chip->state != FL_POINT && chip->state != FL_READY)
1023 map_write(map, CMD(0xff), cmd_addr);
1024
1025 chip->state = FL_POINT;
1026 chip->ref_point_counter++;
1027 }
1028 spin_unlock(chip->mutex);
1029
1030 return ret;
1031}
1032
1033static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1034{
1035 struct map_info *map = mtd->priv;
1036 struct cfi_private *cfi = map->fldrv_priv;
1037 unsigned long ofs;
1038 int chipnum;
1039 int ret = 0;
1040
1041 if (!map->virt || (from + len > mtd->size))
1042 return -EINVAL;
1043
1044 *mtdbuf = (void *)map->virt + from;
1045 *retlen = 0;
1046
1047 /* Now lock the chip(s) to POINT state */
1048
1049 /* ofs: offset within the first chip that the first read should start */
1050 chipnum = (from >> cfi->chipshift);
1051 ofs = from - (chipnum << cfi->chipshift);
1052
1053 while (len) {
1054 unsigned long thislen;
1055
1056 if (chipnum >= cfi->numchips)
1057 break;
1058
1059 if ((len + ofs -1) >> cfi->chipshift)
1060 thislen = (1<<cfi->chipshift) - ofs;
1061 else
1062 thislen = len;
1063
1064 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1065 if (ret)
1066 break;
1067
1068 *retlen += thislen;
1069 len -= thislen;
1070
1071 ofs = 0;
1072 chipnum++;
1073 }
1074 return 0;
1075}
1076
1077static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1078{
1079 struct map_info *map = mtd->priv;
1080 struct cfi_private *cfi = map->fldrv_priv;
1081 unsigned long ofs;
1082 int chipnum;
1083
1084 /* Now unlock the chip(s) POINT state */
1085
1086 /* ofs: offset within the first chip that the first read should start */
1087 chipnum = (from >> cfi->chipshift);
1088 ofs = from - (chipnum << cfi->chipshift);
1089
1090 while (len) {
1091 unsigned long thislen;
1092 struct flchip *chip;
1093
1094 chip = &cfi->chips[chipnum];
1095 if (chipnum >= cfi->numchips)
1096 break;
1097
1098 if ((len + ofs -1) >> cfi->chipshift)
1099 thislen = (1<<cfi->chipshift) - ofs;
1100 else
1101 thislen = len;
1102
1103 spin_lock(chip->mutex);
1104 if (chip->state == FL_POINT) {
1105 chip->ref_point_counter--;
1106 if(chip->ref_point_counter == 0)
1107 chip->state = FL_READY;
1108 } else
1109 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1110
1111 put_chip(map, chip, chip->start);
1112 spin_unlock(chip->mutex);
1113
1114 len -= thislen;
1115 ofs = 0;
1116 chipnum++;
1117 }
1118}
1119
1120static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1121{
1122 unsigned long cmd_addr;
1123 struct cfi_private *cfi = map->fldrv_priv;
1124 int ret;
1125
1126 adr += chip->start;
1127
1128 /* Ensure cmd read/writes are aligned. */
1129 cmd_addr = adr & ~(map_bankwidth(map)-1);
1130
1131 spin_lock(chip->mutex);
1132 ret = get_chip(map, chip, cmd_addr, FL_READY);
1133 if (ret) {
1134 spin_unlock(chip->mutex);
1135 return ret;
1136 }
1137
1138 if (chip->state != FL_POINT && chip->state != FL_READY) {
1139 map_write(map, CMD(0xff), cmd_addr);
1140
1141 chip->state = FL_READY;
1142 }
1143
1144 map_copy_from(map, buf, adr, len);
1145
1146 put_chip(map, chip, cmd_addr);
1147
1148 spin_unlock(chip->mutex);
1149 return 0;
1150}
1151
1152static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1153{
1154 struct map_info *map = mtd->priv;
1155 struct cfi_private *cfi = map->fldrv_priv;
1156 unsigned long ofs;
1157 int chipnum;
1158 int ret = 0;
1159
1160 /* ofs: offset within the first chip that the first read should start */
1161 chipnum = (from >> cfi->chipshift);
1162 ofs = from - (chipnum << cfi->chipshift);
1163
1164 *retlen = 0;
1165
1166 while (len) {
1167 unsigned long thislen;
1168
1169 if (chipnum >= cfi->numchips)
1170 break;
1171
1172 if ((len + ofs -1) >> cfi->chipshift)
1173 thislen = (1<<cfi->chipshift) - ofs;
1174 else
1175 thislen = len;
1176
1177 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1178 if (ret)
1179 break;
1180
1181 *retlen += thislen;
1182 len -= thislen;
1183 buf += thislen;
1184
1185 ofs = 0;
1186 chipnum++;
1187 }
1188 return ret;
1189}
1190
1da177e4 1191static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1192 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1193{
1194 struct cfi_private *cfi = map->fldrv_priv;
f77814dd 1195 map_word status, status_OK, write_cmd;
1da177e4
LT
1196 unsigned long timeo;
1197 int z, ret=0;
1198
1199 adr += chip->start;
1200
1201 /* Let's determine this according to the interleave only once */
1202 status_OK = CMD(0x80);
f77814dd
NP
1203 switch (mode) {
1204 case FL_WRITING: write_cmd = CMD(0x40); break;
1205 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1206 default: return -EINVAL;
1207 }
1da177e4
LT
1208
1209 spin_lock(chip->mutex);
f77814dd 1210 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1211 if (ret) {
1212 spin_unlock(chip->mutex);
1213 return ret;
1214 }
1215
1216 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1217 ENABLE_VPP(map);
1218 xip_disable(map, chip, adr);
f77814dd 1219 map_write(map, write_cmd, adr);
1da177e4 1220 map_write(map, datum, adr);
f77814dd 1221 chip->state = mode;
1da177e4
LT
1222
1223 spin_unlock(chip->mutex);
1224 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1225 UDELAY(map, chip, adr, chip->word_write_time);
1226 spin_lock(chip->mutex);
1227
1228 timeo = jiffies + (HZ/2);
1229 z = 0;
1230 for (;;) {
f77814dd 1231 if (chip->state != mode) {
1da177e4
LT
1232 /* Someone's suspended the write. Sleep */
1233 DECLARE_WAITQUEUE(wait, current);
1234
1235 set_current_state(TASK_UNINTERRUPTIBLE);
1236 add_wait_queue(&chip->wq, &wait);
1237 spin_unlock(chip->mutex);
1238 schedule();
1239 remove_wait_queue(&chip->wq, &wait);
1240 timeo = jiffies + (HZ / 2); /* FIXME */
1241 spin_lock(chip->mutex);
1242 continue;
1243 }
1244
1245 status = map_read(map, adr);
1246 if (map_word_andequal(map, status, status_OK, status_OK))
1247 break;
1248
1249 /* OK Still waiting */
1250 if (time_after(jiffies, timeo)) {
1251 chip->state = FL_STATUS;
1252 xip_enable(map, chip, adr);
1253 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1254 ret = -EIO;
1255 goto out;
1256 }
1257
1258 /* Latency issues. Drop the lock, wait a while and retry */
1259 spin_unlock(chip->mutex);
1260 z++;
1261 UDELAY(map, chip, adr, 1);
1262 spin_lock(chip->mutex);
1263 }
1264 if (!z) {
1265 chip->word_write_time--;
1266 if (!chip->word_write_time)
1267 chip->word_write_time++;
1268 }
1269 if (z > 1)
1270 chip->word_write_time++;
1271
1272 /* Done and happy. */
1273 chip->state = FL_STATUS;
1274
1275 /* check for lock bit */
1276 if (map_word_bitsset(map, status, CMD(0x02))) {
1277 /* clear status */
1278 map_write(map, CMD(0x50), adr);
1279 /* put back into read status register mode */
1280 map_write(map, CMD(0x70), adr);
1281 ret = -EROFS;
1282 }
1283
1284 xip_enable(map, chip, adr);
1285 out: put_chip(map, chip, adr);
1286 spin_unlock(chip->mutex);
1287
1288 return ret;
1289}
1290
1291
1292static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1293{
1294 struct map_info *map = mtd->priv;
1295 struct cfi_private *cfi = map->fldrv_priv;
1296 int ret = 0;
1297 int chipnum;
1298 unsigned long ofs;
1299
1300 *retlen = 0;
1301 if (!len)
1302 return 0;
1303
1304 chipnum = to >> cfi->chipshift;
1305 ofs = to - (chipnum << cfi->chipshift);
1306
1307 /* If it's not bus-aligned, do the first byte write */
1308 if (ofs & (map_bankwidth(map)-1)) {
1309 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1310 int gap = ofs - bus_ofs;
1311 int n;
1312 map_word datum;
1313
1314 n = min_t(int, len, map_bankwidth(map)-gap);
1315 datum = map_word_ff(map);
1316 datum = map_word_load_partial(map, datum, buf, gap, n);
1317
1318 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1319 bus_ofs, datum, FL_WRITING);
1da177e4
LT
1320 if (ret)
1321 return ret;
1322
1323 len -= n;
1324 ofs += n;
1325 buf += n;
1326 (*retlen) += n;
1327
1328 if (ofs >> cfi->chipshift) {
1329 chipnum ++;
1330 ofs = 0;
1331 if (chipnum == cfi->numchips)
1332 return 0;
1333 }
1334 }
1335
1336 while(len >= map_bankwidth(map)) {
1337 map_word datum = map_word_load(map, buf);
1338
1339 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1340 ofs, datum, FL_WRITING);
1da177e4
LT
1341 if (ret)
1342 return ret;
1343
1344 ofs += map_bankwidth(map);
1345 buf += map_bankwidth(map);
1346 (*retlen) += map_bankwidth(map);
1347 len -= map_bankwidth(map);
1348
1349 if (ofs >> cfi->chipshift) {
1350 chipnum ++;
1351 ofs = 0;
1352 if (chipnum == cfi->numchips)
1353 return 0;
1354 }
1355 }
1356
1357 if (len & (map_bankwidth(map)-1)) {
1358 map_word datum;
1359
1360 datum = map_word_ff(map);
1361 datum = map_word_load_partial(map, datum, buf, 0, len);
1362
1363 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1364 ofs, datum, FL_WRITING);
1da177e4
LT
1365 if (ret)
1366 return ret;
1367
1368 (*retlen) += len;
1369 }
1370
1371 return 0;
1372}
1373
1374
1375static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1376 unsigned long adr, const u_char *buf, int len)
1377{
1378 struct cfi_private *cfi = map->fldrv_priv;
1379 map_word status, status_OK;
1380 unsigned long cmd_adr, timeo;
1381 int wbufsize, z, ret=0, bytes, words;
1382
1383 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1384 adr += chip->start;
1385 cmd_adr = adr & ~(wbufsize-1);
1386
1387 /* Let's determine this according to the interleave only once */
1388 status_OK = CMD(0x80);
1389
1390 spin_lock(chip->mutex);
1391 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1392 if (ret) {
1393 spin_unlock(chip->mutex);
1394 return ret;
1395 }
1396
1397 XIP_INVAL_CACHED_RANGE(map, adr, len);
1398 ENABLE_VPP(map);
1399 xip_disable(map, chip, cmd_adr);
1400