Restartable sequences: tests: introduce simple rseq start/finish
[deliverable/linux.git] / drivers / mtd / ftl.c
1 /* This version ported to the Linux-MTD system by dwmw2@infradead.org
2 *
3 * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
4 * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
5 *
6 * Based on:
7 */
8 /*======================================================================
9
10 A Flash Translation Layer memory card driver
11
12 This driver implements a disk-like block device driver with an
13 apparent block size of 512 bytes for flash memory cards.
14
15 ftl_cs.c 1.62 2000/02/01 00:59:04
16
17 The contents of this file are subject to the Mozilla Public
18 License Version 1.1 (the "License"); you may not use this file
19 except in compliance with the License. You may obtain a copy of
20 the License at http://www.mozilla.org/MPL/
21
22 Software distributed under the License is distributed on an "AS
23 IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
24 implied. See the License for the specific language governing
25 rights and limitations under the License.
26
27 The initial developer of the original code is David A. Hinds
28 <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
29 are Copyright © 1999 David A. Hinds. All Rights Reserved.
30
31 Alternatively, the contents of this file may be used under the
32 terms of the GNU General Public License version 2 (the "GPL"), in
33 which case the provisions of the GPL are applicable instead of the
34 above. If you wish to allow the use of your version of this file
35 only under the terms of the GPL and not to allow others to use
36 your version of this file under the MPL, indicate your decision
37 by deleting the provisions above and replace them with the notice
38 and other provisions required by the GPL. If you do not delete
39 the provisions above, a recipient may use your version of this
40 file under either the MPL or the GPL.
41
42 LEGAL NOTE: The FTL format is patented by M-Systems. They have
43 granted a license for its use with PCMCIA devices:
44
45 "M-Systems grants a royalty-free, non-exclusive license under
46 any presently existing M-Systems intellectual property rights
47 necessary for the design and development of FTL-compatible
48 drivers, file systems and utilities using the data formats with
49 PCMCIA PC Cards as described in the PCMCIA Flash Translation
50 Layer (FTL) Specification."
51
52 Use of the FTL format for non-PCMCIA applications may be an
53 infringement of these patents. For additional information,
54 contact M-Systems directly. M-Systems since acquired by Sandisk.
55
56 ======================================================================*/
57 #include <linux/mtd/blktrans.h>
58 #include <linux/module.h>
59 #include <linux/mtd/mtd.h>
60 /*#define PSYCHO_DEBUG */
61
62 #include <linux/kernel.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/string.h>
66 #include <linux/timer.h>
67 #include <linux/major.h>
68 #include <linux/fs.h>
69 #include <linux/init.h>
70 #include <linux/hdreg.h>
71 #include <linux/vmalloc.h>
72 #include <linux/blkpg.h>
73 #include <asm/uaccess.h>
74
75 #include <linux/mtd/ftl.h>
76
77 /*====================================================================*/
78
79 /* Parameters that can be set with 'insmod' */
80 static int shuffle_freq = 50;
81 module_param(shuffle_freq, int, 0);
82
83 /*====================================================================*/
84
85 /* Major device # for FTL device */
86 #ifndef FTL_MAJOR
87 #define FTL_MAJOR 44
88 #endif
89
90
91 /*====================================================================*/
92
93 /* Maximum number of separate memory devices we'll allow */
94 #define MAX_DEV 4
95
96 /* Maximum number of regions per device */
97 #define MAX_REGION 4
98
99 /* Maximum number of partitions in an FTL region */
100 #define PART_BITS 4
101
102 /* Maximum number of outstanding erase requests per socket */
103 #define MAX_ERASE 8
104
105 /* Sector size -- shouldn't need to change */
106 #define SECTOR_SIZE 512
107
108
109 /* Each memory region corresponds to a minor device */
110 typedef struct partition_t {
111 struct mtd_blktrans_dev mbd;
112 uint32_t state;
113 uint32_t *VirtualBlockMap;
114 uint32_t FreeTotal;
115 struct eun_info_t {
116 uint32_t Offset;
117 uint32_t EraseCount;
118 uint32_t Free;
119 uint32_t Deleted;
120 } *EUNInfo;
121 struct xfer_info_t {
122 uint32_t Offset;
123 uint32_t EraseCount;
124 uint16_t state;
125 } *XferInfo;
126 uint16_t bam_index;
127 uint32_t *bam_cache;
128 uint16_t DataUnits;
129 uint32_t BlocksPerUnit;
130 erase_unit_header_t header;
131 } partition_t;
132
133 /* Partition state flags */
134 #define FTL_FORMATTED 0x01
135
136 /* Transfer unit states */
137 #define XFER_UNKNOWN 0x00
138 #define XFER_ERASING 0x01
139 #define XFER_ERASED 0x02
140 #define XFER_PREPARED 0x03
141 #define XFER_FAILED 0x04
142
143 /*====================================================================*/
144
145
146 static void ftl_erase_callback(struct erase_info *done);
147
148
149 /*======================================================================
150
151 Scan_header() checks to see if a memory region contains an FTL
152 partition. build_maps() reads all the erase unit headers, builds
153 the erase unit map, and then builds the virtual page map.
154
155 ======================================================================*/
156
157 static int scan_header(partition_t *part)
158 {
159 erase_unit_header_t header;
160 loff_t offset, max_offset;
161 size_t ret;
162 int err;
163 part->header.FormattedSize = 0;
164 max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
165 /* Search first megabyte for a valid FTL header */
166 for (offset = 0;
167 (offset + sizeof(header)) < max_offset;
168 offset += part->mbd.mtd->erasesize ? : 0x2000) {
169
170 err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
171 (unsigned char *)&header);
172
173 if (err)
174 return err;
175
176 if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
177 }
178
179 if (offset == max_offset) {
180 printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
181 return -ENOENT;
182 }
183 if (header.BlockSize != 9 ||
184 (header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
185 (header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
186 printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
187 return -1;
188 }
189 if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
190 printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
191 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
192 return -1;
193 }
194 part->header = header;
195 return 0;
196 }
197
198 static int build_maps(partition_t *part)
199 {
200 erase_unit_header_t header;
201 uint16_t xvalid, xtrans, i;
202 unsigned blocks, j;
203 int hdr_ok, ret = -1;
204 ssize_t retval;
205 loff_t offset;
206
207 /* Set up erase unit maps */
208 part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
209 part->header.NumTransferUnits;
210 part->EUNInfo = kmalloc(part->DataUnits * sizeof(struct eun_info_t),
211 GFP_KERNEL);
212 if (!part->EUNInfo)
213 goto out;
214 for (i = 0; i < part->DataUnits; i++)
215 part->EUNInfo[i].Offset = 0xffffffff;
216 part->XferInfo =
217 kmalloc(part->header.NumTransferUnits * sizeof(struct xfer_info_t),
218 GFP_KERNEL);
219 if (!part->XferInfo)
220 goto out_EUNInfo;
221
222 xvalid = xtrans = 0;
223 for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
224 offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
225 << part->header.EraseUnitSize);
226 ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
227 (unsigned char *)&header);
228
229 if (ret)
230 goto out_XferInfo;
231
232 ret = -1;
233 /* Is this a transfer partition? */
234 hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0);
235 if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&
236 (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {
237 part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;
238 part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =
239 le32_to_cpu(header.EraseCount);
240 xvalid++;
241 } else {
242 if (xtrans == part->header.NumTransferUnits) {
243 printk(KERN_NOTICE "ftl_cs: format error: too many "
244 "transfer units!\n");
245 goto out_XferInfo;
246 }
247 if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {
248 part->XferInfo[xtrans].state = XFER_PREPARED;
249 part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);
250 } else {
251 part->XferInfo[xtrans].state = XFER_UNKNOWN;
252 /* Pick anything reasonable for the erase count */
253 part->XferInfo[xtrans].EraseCount =
254 le32_to_cpu(part->header.EraseCount);
255 }
256 part->XferInfo[xtrans].Offset = offset;
257 xtrans++;
258 }
259 }
260 /* Check for format trouble */
261 header = part->header;
262 if ((xtrans != header.NumTransferUnits) ||
263 (xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {
264 printk(KERN_NOTICE "ftl_cs: format error: erase units "
265 "don't add up!\n");
266 goto out_XferInfo;
267 }
268
269 /* Set up virtual page map */
270 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
271 part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
272 if (!part->VirtualBlockMap)
273 goto out_XferInfo;
274
275 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
276 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
277
278 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
279 GFP_KERNEL);
280 if (!part->bam_cache)
281 goto out_VirtualBlockMap;
282
283 part->bam_index = 0xffff;
284 part->FreeTotal = 0;
285
286 for (i = 0; i < part->DataUnits; i++) {
287 part->EUNInfo[i].Free = 0;
288 part->EUNInfo[i].Deleted = 0;
289 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
290
291 ret = mtd_read(part->mbd.mtd, offset,
292 part->BlocksPerUnit * sizeof(uint32_t), &retval,
293 (unsigned char *)part->bam_cache);
294
295 if (ret)
296 goto out_bam_cache;
297
298 for (j = 0; j < part->BlocksPerUnit; j++) {
299 if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {
300 part->EUNInfo[i].Free++;
301 part->FreeTotal++;
302 } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&
303 (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))
304 part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =
305 (i << header.EraseUnitSize) + (j << header.BlockSize);
306 else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))
307 part->EUNInfo[i].Deleted++;
308 }
309 }
310
311 ret = 0;
312 goto out;
313
314 out_bam_cache:
315 kfree(part->bam_cache);
316 out_VirtualBlockMap:
317 vfree(part->VirtualBlockMap);
318 out_XferInfo:
319 kfree(part->XferInfo);
320 out_EUNInfo:
321 kfree(part->EUNInfo);
322 out:
323 return ret;
324 } /* build_maps */
325
326 /*======================================================================
327
328 Erase_xfer() schedules an asynchronous erase operation for a
329 transfer unit.
330
331 ======================================================================*/
332
333 static int erase_xfer(partition_t *part,
334 uint16_t xfernum)
335 {
336 int ret;
337 struct xfer_info_t *xfer;
338 struct erase_info *erase;
339
340 xfer = &part->XferInfo[xfernum];
341 pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
342 xfer->state = XFER_ERASING;
343
344 /* Is there a free erase slot? Always in MTD. */
345
346
347 erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
348 if (!erase)
349 return -ENOMEM;
350
351 erase->mtd = part->mbd.mtd;
352 erase->callback = ftl_erase_callback;
353 erase->addr = xfer->Offset;
354 erase->len = 1 << part->header.EraseUnitSize;
355 erase->priv = (u_long)part;
356
357 ret = mtd_erase(part->mbd.mtd, erase);
358
359 if (!ret)
360 xfer->EraseCount++;
361 else
362 kfree(erase);
363
364 return ret;
365 } /* erase_xfer */
366
367 /*======================================================================
368
369 Prepare_xfer() takes a freshly erased transfer unit and gives
370 it an appropriate header.
371
372 ======================================================================*/
373
374 static void ftl_erase_callback(struct erase_info *erase)
375 {
376 partition_t *part;
377 struct xfer_info_t *xfer;
378 int i;
379
380 /* Look up the transfer unit */
381 part = (partition_t *)(erase->priv);
382
383 for (i = 0; i < part->header.NumTransferUnits; i++)
384 if (part->XferInfo[i].Offset == erase->addr) break;
385
386 if (i == part->header.NumTransferUnits) {
387 printk(KERN_NOTICE "ftl_cs: internal error: "
388 "erase lookup failed!\n");
389 return;
390 }
391
392 xfer = &part->XferInfo[i];
393 if (erase->state == MTD_ERASE_DONE)
394 xfer->state = XFER_ERASED;
395 else {
396 xfer->state = XFER_FAILED;
397 printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n",
398 erase->state);
399 }
400
401 kfree(erase);
402
403 } /* ftl_erase_callback */
404
405 static int prepare_xfer(partition_t *part, int i)
406 {
407 erase_unit_header_t header;
408 struct xfer_info_t *xfer;
409 int nbam, ret;
410 uint32_t ctl;
411 ssize_t retlen;
412 loff_t offset;
413
414 xfer = &part->XferInfo[i];
415 xfer->state = XFER_FAILED;
416
417 pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
418
419 /* Write the transfer unit header */
420 header = part->header;
421 header.LogicalEUN = cpu_to_le16(0xffff);
422 header.EraseCount = cpu_to_le32(xfer->EraseCount);
423
424 ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
425 (u_char *)&header);
426
427 if (ret) {
428 return ret;
429 }
430
431 /* Write the BAM stub */
432 nbam = (part->BlocksPerUnit * sizeof(uint32_t) +
433 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
434
435 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
436 ctl = cpu_to_le32(BLOCK_CONTROL);
437
438 for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
439
440 ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
441 (u_char *)&ctl);
442
443 if (ret)
444 return ret;
445 }
446 xfer->state = XFER_PREPARED;
447 return 0;
448
449 } /* prepare_xfer */
450
451 /*======================================================================
452
453 Copy_erase_unit() takes a full erase block and a transfer unit,
454 copies everything to the transfer unit, then swaps the block
455 pointers.
456
457 All data blocks are copied to the corresponding blocks in the
458 target unit, so the virtual block map does not need to be
459 updated.
460
461 ======================================================================*/
462
463 static int copy_erase_unit(partition_t *part, uint16_t srcunit,
464 uint16_t xferunit)
465 {
466 u_char buf[SECTOR_SIZE];
467 struct eun_info_t *eun;
468 struct xfer_info_t *xfer;
469 uint32_t src, dest, free, i;
470 uint16_t unit;
471 int ret;
472 ssize_t retlen;
473 loff_t offset;
474 uint16_t srcunitswap = cpu_to_le16(srcunit);
475
476 eun = &part->EUNInfo[srcunit];
477 xfer = &part->XferInfo[xferunit];
478 pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
479 eun->Offset, xfer->Offset);
480
481
482 /* Read current BAM */
483 if (part->bam_index != srcunit) {
484
485 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
486
487 ret = mtd_read(part->mbd.mtd, offset,
488 part->BlocksPerUnit * sizeof(uint32_t), &retlen,
489 (u_char *)(part->bam_cache));
490
491 /* mark the cache bad, in case we get an error later */
492 part->bam_index = 0xffff;
493
494 if (ret) {
495 printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
496 return ret;
497 }
498 }
499
500 /* Write the LogicalEUN for the transfer unit */
501 xfer->state = XFER_UNKNOWN;
502 offset = xfer->Offset + 20; /* Bad! */
503 unit = cpu_to_le16(0x7fff);
504
505 ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
506 (u_char *)&unit);
507
508 if (ret) {
509 printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
510 return ret;
511 }
512
513 /* Copy all data blocks from source unit to transfer unit */
514 src = eun->Offset; dest = xfer->Offset;
515
516 free = 0;
517 ret = 0;
518 for (i = 0; i < part->BlocksPerUnit; i++) {
519 switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {
520 case BLOCK_CONTROL:
521 /* This gets updated later */
522 break;
523 case BLOCK_DATA:
524 case BLOCK_REPLACEMENT:
525 ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
526 (u_char *)buf);
527 if (ret) {
528 printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
529 return ret;
530 }
531
532
533 ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
534 (u_char *)buf);
535 if (ret) {
536 printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
537 return ret;
538 }
539
540 break;
541 default:
542 /* All other blocks must be free */
543 part->bam_cache[i] = cpu_to_le32(0xffffffff);
544 free++;
545 break;
546 }
547 src += SECTOR_SIZE;
548 dest += SECTOR_SIZE;
549 }
550
551 /* Write the BAM to the transfer unit */
552 ret = mtd_write(part->mbd.mtd,
553 xfer->Offset + le32_to_cpu(part->header.BAMOffset),
554 part->BlocksPerUnit * sizeof(int32_t),
555 &retlen,
556 (u_char *)part->bam_cache);
557 if (ret) {
558 printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
559 return ret;
560 }
561
562
563 /* All clear? Then update the LogicalEUN again */
564 ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
565 &retlen, (u_char *)&srcunitswap);
566
567 if (ret) {
568 printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
569 return ret;
570 }
571
572
573 /* Update the maps and usage stats*/
574 swap(xfer->EraseCount, eun->EraseCount);
575 swap(xfer->Offset, eun->Offset);
576 part->FreeTotal -= eun->Free;
577 part->FreeTotal += free;
578 eun->Free = free;
579 eun->Deleted = 0;
580
581 /* Now, the cache should be valid for the new block */
582 part->bam_index = srcunit;
583
584 return 0;
585 } /* copy_erase_unit */
586
587 /*======================================================================
588
589 reclaim_block() picks a full erase unit and a transfer unit and
590 then calls copy_erase_unit() to copy one to the other. Then, it
591 schedules an erase on the expired block.
592
593 What's a good way to decide which transfer unit and which erase
594 unit to use? Beats me. My way is to always pick the transfer
595 unit with the fewest erases, and usually pick the data unit with
596 the most deleted blocks. But with a small probability, pick the
597 oldest data unit instead. This means that we generally postpone
598 the next reclamation as long as possible, but shuffle static
599 stuff around a bit for wear leveling.
600
601 ======================================================================*/
602
603 static int reclaim_block(partition_t *part)
604 {
605 uint16_t i, eun, xfer;
606 uint32_t best;
607 int queued, ret;
608
609 pr_debug("ftl_cs: reclaiming space...\n");
610 pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
611 /* Pick the least erased transfer unit */
612 best = 0xffffffff; xfer = 0xffff;
613 do {
614 queued = 0;
615 for (i = 0; i < part->header.NumTransferUnits; i++) {
616 int n=0;
617 if (part->XferInfo[i].state == XFER_UNKNOWN) {
618 pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
619 n=1;
620 erase_xfer(part, i);
621 }
622 if (part->XferInfo[i].state == XFER_ERASING) {
623 pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
624 n=1;
625 queued = 1;
626 }
627 else if (part->XferInfo[i].state == XFER_ERASED) {
628 pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
629 n=1;
630 prepare_xfer(part, i);
631 }
632 if (part->XferInfo[i].state == XFER_PREPARED) {
633 pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
634 n=1;
635 if (part->XferInfo[i].EraseCount <= best) {
636 best = part->XferInfo[i].EraseCount;
637 xfer = i;
638 }
639 }
640 if (!n)
641 pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
642
643 }
644 if (xfer == 0xffff) {
645 if (queued) {
646 pr_debug("ftl_cs: waiting for transfer "
647 "unit to be prepared...\n");
648 mtd_sync(part->mbd.mtd);
649 } else {
650 static int ne = 0;
651 if (++ne < 5)
652 printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
653 "suitable transfer units!\n");
654 else
655 pr_debug("ftl_cs: reclaim failed: no "
656 "suitable transfer units!\n");
657
658 return -EIO;
659 }
660 }
661 } while (xfer == 0xffff);
662
663 eun = 0;
664 if ((jiffies % shuffle_freq) == 0) {
665 pr_debug("ftl_cs: recycling freshest block...\n");
666 best = 0xffffffff;
667 for (i = 0; i < part->DataUnits; i++)
668 if (part->EUNInfo[i].EraseCount <= best) {
669 best = part->EUNInfo[i].EraseCount;
670 eun = i;
671 }
672 } else {
673 best = 0;
674 for (i = 0; i < part->DataUnits; i++)
675 if (part->EUNInfo[i].Deleted >= best) {
676 best = part->EUNInfo[i].Deleted;
677 eun = i;
678 }
679 if (best == 0) {
680 static int ne = 0;
681 if (++ne < 5)
682 printk(KERN_NOTICE "ftl_cs: reclaim failed: "
683 "no free blocks!\n");
684 else
685 pr_debug("ftl_cs: reclaim failed: "
686 "no free blocks!\n");
687
688 return -EIO;
689 }
690 }
691 ret = copy_erase_unit(part, eun, xfer);
692 if (!ret)
693 erase_xfer(part, xfer);
694 else
695 printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n");
696 return ret;
697 } /* reclaim_block */
698
699 /*======================================================================
700
701 Find_free() searches for a free block. If necessary, it updates
702 the BAM cache for the erase unit containing the free block. It
703 returns the block index -- the erase unit is just the currently
704 cached unit. If there are no free blocks, it returns 0 -- this
705 is never a valid data block because it contains the header.
706
707 ======================================================================*/
708
709 #ifdef PSYCHO_DEBUG
710 static void dump_lists(partition_t *part)
711 {
712 int i;
713 printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal);
714 for (i = 0; i < part->DataUnits; i++)
715 printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, "
716 "%d deleted\n", i,
717 part->EUNInfo[i].Offset >> part->header.EraseUnitSize,
718 part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);
719 }
720 #endif
721
722 static uint32_t find_free(partition_t *part)
723 {
724 uint16_t stop, eun;
725 uint32_t blk;
726 size_t retlen;
727 int ret;
728
729 /* Find an erase unit with some free space */
730 stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
731 eun = stop;
732 do {
733 if (part->EUNInfo[eun].Free != 0) break;
734 /* Wrap around at end of table */
735 if (++eun == part->DataUnits) eun = 0;
736 } while (eun != stop);
737
738 if (part->EUNInfo[eun].Free == 0)
739 return 0;
740
741 /* Is this unit's BAM cached? */
742 if (eun != part->bam_index) {
743 /* Invalidate cache */
744 part->bam_index = 0xffff;
745
746 ret = mtd_read(part->mbd.mtd,
747 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
748 part->BlocksPerUnit * sizeof(uint32_t),
749 &retlen,
750 (u_char *)(part->bam_cache));
751
752 if (ret) {
753 printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
754 return 0;
755 }
756 part->bam_index = eun;
757 }
758
759 /* Find a free block */
760 for (blk = 0; blk < part->BlocksPerUnit; blk++)
761 if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break;
762 if (blk == part->BlocksPerUnit) {
763 #ifdef PSYCHO_DEBUG
764 static int ne = 0;
765 if (++ne == 1)
766 dump_lists(part);
767 #endif
768 printk(KERN_NOTICE "ftl_cs: bad free list!\n");
769 return 0;
770 }
771 pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
772 return blk;
773
774 } /* find_free */
775
776
777 /*======================================================================
778
779 Read a series of sectors from an FTL partition.
780
781 ======================================================================*/
782
783 static int ftl_read(partition_t *part, caddr_t buffer,
784 u_long sector, u_long nblocks)
785 {
786 uint32_t log_addr, bsize;
787 u_long i;
788 int ret;
789 size_t offset, retlen;
790
791 pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
792 part, sector, nblocks);
793 if (!(part->state & FTL_FORMATTED)) {
794 printk(KERN_NOTICE "ftl_cs: bad partition\n");
795 return -EIO;
796 }
797 bsize = 1 << part->header.EraseUnitSize;
798
799 for (i = 0; i < nblocks; i++) {
800 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
801 printk(KERN_NOTICE "ftl_cs: bad read offset\n");
802 return -EIO;
803 }
804 log_addr = part->VirtualBlockMap[sector+i];
805 if (log_addr == 0xffffffff)
806 memset(buffer, 0, SECTOR_SIZE);
807 else {
808 offset = (part->EUNInfo[log_addr / bsize].Offset
809 + (log_addr % bsize));
810 ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
811 (u_char *)buffer);
812
813 if (ret) {
814 printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
815 return ret;
816 }
817 }
818 buffer += SECTOR_SIZE;
819 }
820 return 0;
821 } /* ftl_read */
822
823 /*======================================================================
824
825 Write a series of sectors to an FTL partition
826
827 ======================================================================*/
828
829 static int set_bam_entry(partition_t *part, uint32_t log_addr,
830 uint32_t virt_addr)
831 {
832 uint32_t bsize, blk, le_virt_addr;
833 #ifdef PSYCHO_DEBUG
834 uint32_t old_addr;
835 #endif
836 uint16_t eun;
837 int ret;
838 size_t retlen, offset;
839
840 pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
841 part, log_addr, virt_addr);
842 bsize = 1 << part->header.EraseUnitSize;
843 eun = log_addr / bsize;
844 blk = (log_addr % bsize) / SECTOR_SIZE;
845 offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
846 le32_to_cpu(part->header.BAMOffset));
847
848 #ifdef PSYCHO_DEBUG
849 ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
850 (u_char *)&old_addr);
851 if (ret) {
852 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
853 return ret;
854 }
855 old_addr = le32_to_cpu(old_addr);
856
857 if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) ||
858 ((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) ||
859 (!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) {
860 static int ne = 0;
861 if (++ne < 5) {
862 printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n");
863 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x"
864 ", new = 0x%x\n", log_addr, old_addr, virt_addr);
865 }
866 return -EIO;
867 }
868 #endif
869 le_virt_addr = cpu_to_le32(virt_addr);
870 if (part->bam_index == eun) {
871 #ifdef PSYCHO_DEBUG
872 if (le32_to_cpu(part->bam_cache[blk]) != old_addr) {
873 static int ne = 0;
874 if (++ne < 5) {
875 printk(KERN_NOTICE "ftl_cs: set_bam_entry() "
876 "inconsistency!\n");
877 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache"
878 " = 0x%x\n",
879 le32_to_cpu(part->bam_cache[blk]), old_addr);
880 }
881 return -EIO;
882 }
883 #endif
884 part->bam_cache[blk] = le_virt_addr;
885 }
886 ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
887 (u_char *)&le_virt_addr);
888
889 if (ret) {
890 printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
891 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n",
892 log_addr, virt_addr);
893 }
894 return ret;
895 } /* set_bam_entry */
896
897 static int ftl_write(partition_t *part, caddr_t buffer,
898 u_long sector, u_long nblocks)
899 {
900 uint32_t bsize, log_addr, virt_addr, old_addr, blk;
901 u_long i;
902 int ret;
903 size_t retlen, offset;
904
905 pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
906 part, sector, nblocks);
907 if (!(part->state & FTL_FORMATTED)) {
908 printk(KERN_NOTICE "ftl_cs: bad partition\n");
909 return -EIO;
910 }
911 /* See if we need to reclaim space, before we start */
912 while (part->FreeTotal < nblocks) {
913 ret = reclaim_block(part);
914 if (ret)
915 return ret;
916 }
917
918 bsize = 1 << part->header.EraseUnitSize;
919
920 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
921 for (i = 0; i < nblocks; i++) {
922 if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) {
923 printk(KERN_NOTICE "ftl_cs: bad write offset\n");
924 return -EIO;
925 }
926
927 /* Grab a free block */
928 blk = find_free(part);
929 if (blk == 0) {
930 static int ne = 0;
931 if (++ne < 5)
932 printk(KERN_NOTICE "ftl_cs: internal error: "
933 "no free blocks!\n");
934 return -ENOSPC;
935 }
936
937 /* Tag the BAM entry, and write the new block */
938 log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
939 part->EUNInfo[part->bam_index].Free--;
940 part->FreeTotal--;
941 if (set_bam_entry(part, log_addr, 0xfffffffe))
942 return -EIO;
943 part->EUNInfo[part->bam_index].Deleted++;
944 offset = (part->EUNInfo[part->bam_index].Offset +
945 blk * SECTOR_SIZE);
946 ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
947
948 if (ret) {
949 printk(KERN_NOTICE "ftl_cs: block write failed!\n");
950 printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
951 " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
952 offset);
953 return -EIO;
954 }
955
956 /* Only delete the old entry when the new entry is ready */
957 old_addr = part->VirtualBlockMap[sector+i];
958 if (old_addr != 0xffffffff) {
959 part->VirtualBlockMap[sector+i] = 0xffffffff;
960 part->EUNInfo[old_addr/bsize].Deleted++;
961 if (set_bam_entry(part, old_addr, 0))
962 return -EIO;
963 }
964
965 /* Finally, set up the new pointers */
966 if (set_bam_entry(part, log_addr, virt_addr))
967 return -EIO;
968 part->VirtualBlockMap[sector+i] = log_addr;
969 part->EUNInfo[part->bam_index].Deleted--;
970
971 buffer += SECTOR_SIZE;
972 virt_addr += SECTOR_SIZE;
973 }
974 return 0;
975 } /* ftl_write */
976
977 static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
978 {
979 partition_t *part = (void *)dev;
980 u_long sect;
981
982 /* Sort of arbitrary: round size down to 4KiB boundary */
983 sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
984
985 geo->heads = 1;
986 geo->sectors = 8;
987 geo->cylinders = sect >> 3;
988
989 return 0;
990 }
991
992 static int ftl_readsect(struct mtd_blktrans_dev *dev,
993 unsigned long block, char *buf)
994 {
995 return ftl_read((void *)dev, buf, block, 1);
996 }
997
998 static int ftl_writesect(struct mtd_blktrans_dev *dev,
999 unsigned long block, char *buf)
1000 {
1001 return ftl_write((void *)dev, buf, block, 1);
1002 }
1003
1004 static int ftl_discardsect(struct mtd_blktrans_dev *dev,
1005 unsigned long sector, unsigned nr_sects)
1006 {
1007 partition_t *part = (void *)dev;
1008 uint32_t bsize = 1 << part->header.EraseUnitSize;
1009
1010 pr_debug("FTL erase sector %ld for %d sectors\n",
1011 sector, nr_sects);
1012
1013 while (nr_sects) {
1014 uint32_t old_addr = part->VirtualBlockMap[sector];
1015 if (old_addr != 0xffffffff) {
1016 part->VirtualBlockMap[sector] = 0xffffffff;
1017 part->EUNInfo[old_addr/bsize].Deleted++;
1018 if (set_bam_entry(part, old_addr, 0))
1019 return -EIO;
1020 }
1021 nr_sects--;
1022 sector++;
1023 }
1024
1025 return 0;
1026 }
1027 /*====================================================================*/
1028
1029 static void ftl_freepart(partition_t *part)
1030 {
1031 vfree(part->VirtualBlockMap);
1032 part->VirtualBlockMap = NULL;
1033 kfree(part->EUNInfo);
1034 part->EUNInfo = NULL;
1035 kfree(part->XferInfo);
1036 part->XferInfo = NULL;
1037 kfree(part->bam_cache);
1038 part->bam_cache = NULL;
1039 } /* ftl_freepart */
1040
1041 static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1042 {
1043 partition_t *partition;
1044
1045 partition = kzalloc(sizeof(partition_t), GFP_KERNEL);
1046
1047 if (!partition) {
1048 printk(KERN_WARNING "No memory to scan for FTL on %s\n",
1049 mtd->name);
1050 return;
1051 }
1052
1053 partition->mbd.mtd = mtd;
1054
1055 if ((scan_header(partition) == 0) &&
1056 (build_maps(partition) == 0)) {
1057
1058 partition->state = FTL_FORMATTED;
1059 #ifdef PCMCIA_DEBUG
1060 printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
1061 le32_to_cpu(partition->header.FormattedSize) >> 10);
1062 #endif
1063 partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
1064
1065 partition->mbd.tr = tr;
1066 partition->mbd.devnum = -1;
1067 if (!add_mtd_blktrans_dev((void *)partition))
1068 return;
1069 }
1070
1071 kfree(partition);
1072 }
1073
1074 static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
1075 {
1076 del_mtd_blktrans_dev(dev);
1077 ftl_freepart((partition_t *)dev);
1078 }
1079
1080 static struct mtd_blktrans_ops ftl_tr = {
1081 .name = "ftl",
1082 .major = FTL_MAJOR,
1083 .part_bits = PART_BITS,
1084 .blksize = SECTOR_SIZE,
1085 .readsect = ftl_readsect,
1086 .writesect = ftl_writesect,
1087 .discard = ftl_discardsect,
1088 .getgeo = ftl_getgeo,
1089 .add_mtd = ftl_add_mtd,
1090 .remove_dev = ftl_remove_dev,
1091 .owner = THIS_MODULE,
1092 };
1093
1094 static int __init init_ftl(void)
1095 {
1096 return register_mtd_blktrans(&ftl_tr);
1097 }
1098
1099 static void __exit cleanup_ftl(void)
1100 {
1101 deregister_mtd_blktrans(&ftl_tr);
1102 }
1103
1104 module_init(init_ftl);
1105 module_exit(cleanup_ftl);
1106
1107
1108 MODULE_LICENSE("Dual MPL/GPL");
1109 MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
1110 MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
This page took 0.052809 seconds and 5 git commands to generate.