2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
16 #include <linux/crc32.h>
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
23 size_t ubi_calc_fm_size(struct ubi_device
*ubi
)
27 size
= sizeof(struct ubi_fm_sb
) + \
28 sizeof(struct ubi_fm_hdr
) + \
29 sizeof(struct ubi_fm_scan_pool
) + \
30 sizeof(struct ubi_fm_scan_pool
) + \
31 (ubi
->peb_count
* sizeof(struct ubi_fm_ec
)) + \
32 (sizeof(struct ubi_fm_eba
) + \
33 (ubi
->peb_count
* sizeof(__be32
))) + \
34 sizeof(struct ubi_fm_volhdr
) * UBI_MAX_VOLUMES
;
35 return roundup(size
, ubi
->leb_size
);
40 * new_fm_vhdr - allocate a new volume header for fastmap usage.
41 * @ubi: UBI device description object
42 * @vol_id: the VID of the new header
44 * Returns a new struct ubi_vid_hdr on success.
45 * NULL indicates out of memory.
47 static struct ubi_vid_hdr
*new_fm_vhdr(struct ubi_device
*ubi
, int vol_id
)
49 struct ubi_vid_hdr
*new;
51 new = ubi_zalloc_vid_hdr(ubi
, GFP_KERNEL
);
55 new->vol_type
= UBI_VID_DYNAMIC
;
56 new->vol_id
= cpu_to_be32(vol_id
);
58 /* UBI implementations without fastmap support have to delete the
61 new->compat
= UBI_COMPAT_DELETE
;
68 * add_aeb - create and add a attach erase block to a given list.
69 * @ai: UBI attach info object
70 * @list: the target list
71 * @pnum: PEB number of the new attach erase block
72 * @ec: erease counter of the new LEB
73 * @scrub: scrub this PEB after attaching
75 * Returns 0 on success, < 0 indicates an internal error.
77 static int add_aeb(struct ubi_attach_info
*ai
, struct list_head
*list
,
78 int pnum
, int ec
, int scrub
)
80 struct ubi_ainf_peb
*aeb
;
82 aeb
= kmem_cache_alloc(ai
->aeb_slab_cache
, GFP_KERNEL
);
90 aeb
->copy_flag
= aeb
->sqnum
= 0;
92 ai
->ec_sum
+= aeb
->ec
;
95 if (ai
->max_ec
< aeb
->ec
)
98 if (ai
->min_ec
> aeb
->ec
)
101 list_add_tail(&aeb
->u
.list
, list
);
107 * add_vol - create and add a new volume to ubi_attach_info.
108 * @ai: ubi_attach_info object
109 * @vol_id: VID of the new volume
110 * @used_ebs: number of used EBS
111 * @data_pad: data padding value of the new volume
112 * @vol_type: volume type
113 * @last_eb_bytes: number of bytes in the last LEB
115 * Returns the new struct ubi_ainf_volume on success.
116 * NULL indicates an error.
118 static struct ubi_ainf_volume
*add_vol(struct ubi_attach_info
*ai
, int vol_id
,
119 int used_ebs
, int data_pad
, u8 vol_type
,
122 struct ubi_ainf_volume
*av
;
123 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
127 av
= rb_entry(parent
, struct ubi_ainf_volume
, rb
);
129 if (vol_id
> av
->vol_id
)
135 av
= kmalloc(sizeof(struct ubi_ainf_volume
), GFP_KERNEL
);
139 av
->highest_lnum
= av
->leb_count
= 0;
141 av
->used_ebs
= used_ebs
;
142 av
->data_pad
= data_pad
;
143 av
->last_data_size
= last_eb_bytes
;
145 av
->vol_type
= vol_type
;
148 dbg_bld("found volume (ID %i)", vol_id
);
150 rb_link_node(&av
->rb
, parent
, p
);
151 rb_insert_color(&av
->rb
, &ai
->volumes
);
158 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
159 * from it's original list.
160 * @ai: ubi_attach_info object
161 * @aeb: the to be assigned SEB
162 * @av: target scan volume
164 static void assign_aeb_to_av(struct ubi_attach_info
*ai
,
165 struct ubi_ainf_peb
*aeb
,
166 struct ubi_ainf_volume
*av
)
168 struct ubi_ainf_peb
*tmp_aeb
;
169 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
171 p
= &av
->root
.rb_node
;
175 tmp_aeb
= rb_entry(parent
, struct ubi_ainf_peb
, u
.rb
);
176 if (aeb
->lnum
!= tmp_aeb
->lnum
) {
177 if (aeb
->lnum
< tmp_aeb
->lnum
)
187 list_del(&aeb
->u
.list
);
190 rb_link_node(&aeb
->u
.rb
, parent
, p
);
191 rb_insert_color(&aeb
->u
.rb
, &av
->root
);
195 * update_vol - inserts or updates a LEB which was found a pool.
196 * @ubi: the UBI device object
197 * @ai: attach info object
198 * @av: the volume this LEB belongs to
199 * @new_vh: the volume header derived from new_aeb
200 * @new_aeb: the AEB to be examined
202 * Returns 0 on success, < 0 indicates an internal error.
204 static int update_vol(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
205 struct ubi_ainf_volume
*av
, struct ubi_vid_hdr
*new_vh
,
206 struct ubi_ainf_peb
*new_aeb
)
208 struct rb_node
**p
= &av
->root
.rb_node
, *parent
= NULL
;
209 struct ubi_ainf_peb
*aeb
, *victim
;
214 aeb
= rb_entry(parent
, struct ubi_ainf_peb
, u
.rb
);
216 if (be32_to_cpu(new_vh
->lnum
) != aeb
->lnum
) {
217 if (be32_to_cpu(new_vh
->lnum
) < aeb
->lnum
)
225 /* This case can happen if the fastmap gets written
226 * because of a volume change (creation, deletion, ..).
227 * Then a PEB can be within the persistent EBA and the pool.
229 if (aeb
->pnum
== new_aeb
->pnum
) {
230 ubi_assert(aeb
->lnum
== new_aeb
->lnum
);
231 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
236 cmp_res
= ubi_compare_lebs(ubi
, aeb
, new_aeb
->pnum
, new_vh
);
240 /* new_aeb is newer */
242 victim
= kmem_cache_alloc(ai
->aeb_slab_cache
,
247 victim
->ec
= aeb
->ec
;
248 victim
->pnum
= aeb
->pnum
;
249 list_add_tail(&victim
->u
.list
, &ai
->erase
);
251 if (av
->highest_lnum
== be32_to_cpu(new_vh
->lnum
))
252 av
->last_data_size
= \
253 be32_to_cpu(new_vh
->data_size
);
255 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
256 av
->vol_id
, aeb
->lnum
, new_aeb
->pnum
);
258 aeb
->ec
= new_aeb
->ec
;
259 aeb
->pnum
= new_aeb
->pnum
;
260 aeb
->copy_flag
= new_vh
->copy_flag
;
261 aeb
->scrub
= new_aeb
->scrub
;
262 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
264 /* new_aeb is older */
266 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
267 av
->vol_id
, aeb
->lnum
, new_aeb
->pnum
);
268 list_add_tail(&new_aeb
->u
.list
, &ai
->erase
);
273 /* This LEB is new, let's add it to the volume */
275 if (av
->highest_lnum
<= be32_to_cpu(new_vh
->lnum
)) {
276 av
->highest_lnum
= be32_to_cpu(new_vh
->lnum
);
277 av
->last_data_size
= be32_to_cpu(new_vh
->data_size
);
280 if (av
->vol_type
== UBI_STATIC_VOLUME
)
281 av
->used_ebs
= be32_to_cpu(new_vh
->used_ebs
);
285 rb_link_node(&new_aeb
->u
.rb
, parent
, p
);
286 rb_insert_color(&new_aeb
->u
.rb
, &av
->root
);
292 * process_pool_aeb - we found a non-empty PEB in a pool.
293 * @ubi: UBI device object
294 * @ai: attach info object
295 * @new_vh: the volume header derived from new_aeb
296 * @new_aeb: the AEB to be examined
298 * Returns 0 on success, < 0 indicates an internal error.
300 static int process_pool_aeb(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
301 struct ubi_vid_hdr
*new_vh
,
302 struct ubi_ainf_peb
*new_aeb
)
304 struct ubi_ainf_volume
*av
, *tmp_av
= NULL
;
305 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
308 if (be32_to_cpu(new_vh
->vol_id
) == UBI_FM_SB_VOLUME_ID
||
309 be32_to_cpu(new_vh
->vol_id
) == UBI_FM_DATA_VOLUME_ID
) {
310 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
315 /* Find the volume this SEB belongs to */
318 tmp_av
= rb_entry(parent
, struct ubi_ainf_volume
, rb
);
320 if (be32_to_cpu(new_vh
->vol_id
) > tmp_av
->vol_id
)
322 else if (be32_to_cpu(new_vh
->vol_id
) < tmp_av
->vol_id
)
333 ubi_err(ubi
, "orphaned volume in fastmap pool!");
334 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
335 return UBI_BAD_FASTMAP
;
338 ubi_assert(be32_to_cpu(new_vh
->vol_id
) == av
->vol_id
);
340 return update_vol(ubi
, ai
, av
, new_vh
, new_aeb
);
344 * unmap_peb - unmap a PEB.
345 * If fastmap detects a free PEB in the pool it has to check whether
346 * this PEB has been unmapped after writing the fastmap.
348 * @ai: UBI attach info object
349 * @pnum: The PEB to be unmapped
351 static void unmap_peb(struct ubi_attach_info
*ai
, int pnum
)
353 struct ubi_ainf_volume
*av
;
354 struct rb_node
*node
, *node2
;
355 struct ubi_ainf_peb
*aeb
;
357 for (node
= rb_first(&ai
->volumes
); node
; node
= rb_next(node
)) {
358 av
= rb_entry(node
, struct ubi_ainf_volume
, rb
);
360 for (node2
= rb_first(&av
->root
); node2
;
361 node2
= rb_next(node2
)) {
362 aeb
= rb_entry(node2
, struct ubi_ainf_peb
, u
.rb
);
363 if (aeb
->pnum
== pnum
) {
364 rb_erase(&aeb
->u
.rb
, &av
->root
);
366 kmem_cache_free(ai
->aeb_slab_cache
, aeb
);
374 * scan_pool - scans a pool for changed (no longer empty PEBs).
375 * @ubi: UBI device object
376 * @ai: attach info object
377 * @pebs: an array of all PEB numbers in the to be scanned pool
378 * @pool_size: size of the pool (number of entries in @pebs)
379 * @max_sqnum: pointer to the maximal sequence number
380 * @free: list of PEBs which are most likely free (and go into @ai->free)
382 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
383 * < 0 indicates an internal error.
385 static int scan_pool(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
386 int *pebs
, int pool_size
, unsigned long long *max_sqnum
,
387 struct list_head
*free
)
389 struct ubi_vid_hdr
*vh
;
390 struct ubi_ec_hdr
*ech
;
391 struct ubi_ainf_peb
*new_aeb
;
392 int i
, pnum
, err
, ret
= 0;
394 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
398 vh
= ubi_zalloc_vid_hdr(ubi
, GFP_KERNEL
);
404 dbg_bld("scanning fastmap pool: size = %i", pool_size
);
407 * Now scan all PEBs in the pool to find changes which have been made
408 * after the creation of the fastmap
410 for (i
= 0; i
< pool_size
; i
++) {
414 pnum
= be32_to_cpu(pebs
[i
]);
416 if (ubi_io_is_bad(ubi
, pnum
)) {
417 ubi_err(ubi
, "bad PEB in fastmap pool!");
418 ret
= UBI_BAD_FASTMAP
;
422 err
= ubi_io_read_ec_hdr(ubi
, pnum
, ech
, 0);
423 if (err
&& err
!= UBI_IO_BITFLIPS
) {
424 ubi_err(ubi
, "unable to read EC header! PEB:%i err:%i",
426 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
428 } else if (err
== UBI_IO_BITFLIPS
)
432 * Older UBI implementations have image_seq set to zero, so
433 * we shouldn't fail if image_seq == 0.
435 image_seq
= be32_to_cpu(ech
->image_seq
);
437 if (image_seq
&& (image_seq
!= ubi
->image_seq
)) {
438 ubi_err(ubi
, "bad image seq: 0x%x, expected: 0x%x",
439 be32_to_cpu(ech
->image_seq
), ubi
->image_seq
);
440 ret
= UBI_BAD_FASTMAP
;
444 err
= ubi_io_read_vid_hdr(ubi
, pnum
, vh
, 0);
445 if (err
== UBI_IO_FF
|| err
== UBI_IO_FF_BITFLIPS
) {
446 unsigned long long ec
= be64_to_cpu(ech
->ec
);
448 dbg_bld("Adding PEB to free: %i", pnum
);
449 if (err
== UBI_IO_FF_BITFLIPS
)
450 add_aeb(ai
, free
, pnum
, ec
, 1);
452 add_aeb(ai
, free
, pnum
, ec
, 0);
454 } else if (err
== 0 || err
== UBI_IO_BITFLIPS
) {
455 dbg_bld("Found non empty PEB:%i in pool", pnum
);
457 if (err
== UBI_IO_BITFLIPS
)
460 new_aeb
= kmem_cache_alloc(ai
->aeb_slab_cache
,
467 new_aeb
->ec
= be64_to_cpu(ech
->ec
);
468 new_aeb
->pnum
= pnum
;
469 new_aeb
->lnum
= be32_to_cpu(vh
->lnum
);
470 new_aeb
->sqnum
= be64_to_cpu(vh
->sqnum
);
471 new_aeb
->copy_flag
= vh
->copy_flag
;
472 new_aeb
->scrub
= scrub
;
474 if (*max_sqnum
< new_aeb
->sqnum
)
475 *max_sqnum
= new_aeb
->sqnum
;
477 err
= process_pool_aeb(ubi
, ai
, vh
, new_aeb
);
479 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
483 /* We are paranoid and fall back to scanning mode */
484 ubi_err(ubi
, "fastmap pool PEBs contains damaged PEBs!");
485 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
492 ubi_free_vid_hdr(ubi
, vh
);
498 * count_fastmap_pebs - Counts the PEBs found by fastmap.
499 * @ai: The UBI attach info object
501 static int count_fastmap_pebs(struct ubi_attach_info
*ai
)
503 struct ubi_ainf_peb
*aeb
;
504 struct ubi_ainf_volume
*av
;
505 struct rb_node
*rb1
, *rb2
;
508 list_for_each_entry(aeb
, &ai
->erase
, u
.list
)
511 list_for_each_entry(aeb
, &ai
->free
, u
.list
)
514 ubi_rb_for_each_entry(rb1
, av
, &ai
->volumes
, rb
)
515 ubi_rb_for_each_entry(rb2
, aeb
, &av
->root
, u
.rb
)
522 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
523 * @ubi: UBI device object
524 * @ai: UBI attach info object
525 * @fm: the fastmap to be attached
527 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
528 * < 0 indicates an internal error.
530 static int ubi_attach_fastmap(struct ubi_device
*ubi
,
531 struct ubi_attach_info
*ai
,
532 struct ubi_fastmap_layout
*fm
)
534 struct list_head used
, free
;
535 struct ubi_ainf_volume
*av
;
536 struct ubi_ainf_peb
*aeb
, *tmp_aeb
, *_tmp_aeb
;
537 struct ubi_fm_sb
*fmsb
;
538 struct ubi_fm_hdr
*fmhdr
;
539 struct ubi_fm_scan_pool
*fmpl1
, *fmpl2
;
540 struct ubi_fm_ec
*fmec
;
541 struct ubi_fm_volhdr
*fmvhdr
;
542 struct ubi_fm_eba
*fm_eba
;
543 int ret
, i
, j
, pool_size
, wl_pool_size
;
544 size_t fm_pos
= 0, fm_size
= ubi
->fm_size
;
545 unsigned long long max_sqnum
= 0;
546 void *fm_raw
= ubi
->fm_buf
;
548 INIT_LIST_HEAD(&used
);
549 INIT_LIST_HEAD(&free
);
550 ai
->min_ec
= UBI_MAX_ERASECOUNTER
;
552 fmsb
= (struct ubi_fm_sb
*)(fm_raw
);
553 ai
->max_sqnum
= fmsb
->sqnum
;
554 fm_pos
+= sizeof(struct ubi_fm_sb
);
555 if (fm_pos
>= fm_size
)
558 fmhdr
= (struct ubi_fm_hdr
*)(fm_raw
+ fm_pos
);
559 fm_pos
+= sizeof(*fmhdr
);
560 if (fm_pos
>= fm_size
)
563 if (be32_to_cpu(fmhdr
->magic
) != UBI_FM_HDR_MAGIC
) {
564 ubi_err(ubi
, "bad fastmap header magic: 0x%x, expected: 0x%x",
565 be32_to_cpu(fmhdr
->magic
), UBI_FM_HDR_MAGIC
);
569 fmpl1
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
570 fm_pos
+= sizeof(*fmpl1
);
571 if (fm_pos
>= fm_size
)
573 if (be32_to_cpu(fmpl1
->magic
) != UBI_FM_POOL_MAGIC
) {
574 ubi_err(ubi
, "bad fastmap pool magic: 0x%x, expected: 0x%x",
575 be32_to_cpu(fmpl1
->magic
), UBI_FM_POOL_MAGIC
);
579 fmpl2
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
580 fm_pos
+= sizeof(*fmpl2
);
581 if (fm_pos
>= fm_size
)
583 if (be32_to_cpu(fmpl2
->magic
) != UBI_FM_POOL_MAGIC
) {
584 ubi_err(ubi
, "bad fastmap pool magic: 0x%x, expected: 0x%x",
585 be32_to_cpu(fmpl2
->magic
), UBI_FM_POOL_MAGIC
);
589 pool_size
= be16_to_cpu(fmpl1
->size
);
590 wl_pool_size
= be16_to_cpu(fmpl2
->size
);
591 fm
->max_pool_size
= be16_to_cpu(fmpl1
->max_size
);
592 fm
->max_wl_pool_size
= be16_to_cpu(fmpl2
->max_size
);
594 if (pool_size
> UBI_FM_MAX_POOL_SIZE
|| pool_size
< 0) {
595 ubi_err(ubi
, "bad pool size: %i", pool_size
);
599 if (wl_pool_size
> UBI_FM_MAX_POOL_SIZE
|| wl_pool_size
< 0) {
600 ubi_err(ubi
, "bad WL pool size: %i", wl_pool_size
);
605 if (fm
->max_pool_size
> UBI_FM_MAX_POOL_SIZE
||
606 fm
->max_pool_size
< 0) {
607 ubi_err(ubi
, "bad maximal pool size: %i", fm
->max_pool_size
);
611 if (fm
->max_wl_pool_size
> UBI_FM_MAX_POOL_SIZE
||
612 fm
->max_wl_pool_size
< 0) {
613 ubi_err(ubi
, "bad maximal WL pool size: %i",
614 fm
->max_wl_pool_size
);
618 /* read EC values from free list */
619 for (i
= 0; i
< be32_to_cpu(fmhdr
->free_peb_count
); i
++) {
620 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
621 fm_pos
+= sizeof(*fmec
);
622 if (fm_pos
>= fm_size
)
625 add_aeb(ai
, &ai
->free
, be32_to_cpu(fmec
->pnum
),
626 be32_to_cpu(fmec
->ec
), 0);
629 /* read EC values from used list */
630 for (i
= 0; i
< be32_to_cpu(fmhdr
->used_peb_count
); i
++) {
631 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
632 fm_pos
+= sizeof(*fmec
);
633 if (fm_pos
>= fm_size
)
636 add_aeb(ai
, &used
, be32_to_cpu(fmec
->pnum
),
637 be32_to_cpu(fmec
->ec
), 0);
640 /* read EC values from scrub list */
641 for (i
= 0; i
< be32_to_cpu(fmhdr
->scrub_peb_count
); i
++) {
642 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
643 fm_pos
+= sizeof(*fmec
);
644 if (fm_pos
>= fm_size
)
647 add_aeb(ai
, &used
, be32_to_cpu(fmec
->pnum
),
648 be32_to_cpu(fmec
->ec
), 1);
651 /* read EC values from erase list */
652 for (i
= 0; i
< be32_to_cpu(fmhdr
->erase_peb_count
); i
++) {
653 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
654 fm_pos
+= sizeof(*fmec
);
655 if (fm_pos
>= fm_size
)
658 add_aeb(ai
, &ai
->erase
, be32_to_cpu(fmec
->pnum
),
659 be32_to_cpu(fmec
->ec
), 1);
662 ai
->mean_ec
= div_u64(ai
->ec_sum
, ai
->ec_count
);
663 ai
->bad_peb_count
= be32_to_cpu(fmhdr
->bad_peb_count
);
665 /* Iterate over all volumes and read their EBA table */
666 for (i
= 0; i
< be32_to_cpu(fmhdr
->vol_count
); i
++) {
667 fmvhdr
= (struct ubi_fm_volhdr
*)(fm_raw
+ fm_pos
);
668 fm_pos
+= sizeof(*fmvhdr
);
669 if (fm_pos
>= fm_size
)
672 if (be32_to_cpu(fmvhdr
->magic
) != UBI_FM_VHDR_MAGIC
) {
673 ubi_err(ubi
, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
674 be32_to_cpu(fmvhdr
->magic
), UBI_FM_VHDR_MAGIC
);
678 av
= add_vol(ai
, be32_to_cpu(fmvhdr
->vol_id
),
679 be32_to_cpu(fmvhdr
->used_ebs
),
680 be32_to_cpu(fmvhdr
->data_pad
),
682 be32_to_cpu(fmvhdr
->last_eb_bytes
));
688 if (ai
->highest_vol_id
< be32_to_cpu(fmvhdr
->vol_id
))
689 ai
->highest_vol_id
= be32_to_cpu(fmvhdr
->vol_id
);
691 fm_eba
= (struct ubi_fm_eba
*)(fm_raw
+ fm_pos
);
692 fm_pos
+= sizeof(*fm_eba
);
693 fm_pos
+= (sizeof(__be32
) * be32_to_cpu(fm_eba
->reserved_pebs
));
694 if (fm_pos
>= fm_size
)
697 if (be32_to_cpu(fm_eba
->magic
) != UBI_FM_EBA_MAGIC
) {
698 ubi_err(ubi
, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
699 be32_to_cpu(fm_eba
->magic
), UBI_FM_EBA_MAGIC
);
703 for (j
= 0; j
< be32_to_cpu(fm_eba
->reserved_pebs
); j
++) {
704 int pnum
= be32_to_cpu(fm_eba
->pnum
[j
]);
706 if ((int)be32_to_cpu(fm_eba
->pnum
[j
]) < 0)
710 list_for_each_entry(tmp_aeb
, &used
, u
.list
) {
711 if (tmp_aeb
->pnum
== pnum
) {
718 ubi_err(ubi
, "PEB %i is in EBA but not in used list", pnum
);
724 if (av
->highest_lnum
<= aeb
->lnum
)
725 av
->highest_lnum
= aeb
->lnum
;
727 assign_aeb_to_av(ai
, aeb
, av
);
729 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
730 aeb
->pnum
, aeb
->lnum
, av
->vol_id
);
734 ret
= scan_pool(ubi
, ai
, fmpl1
->pebs
, pool_size
, &max_sqnum
, &free
);
738 ret
= scan_pool(ubi
, ai
, fmpl2
->pebs
, wl_pool_size
, &max_sqnum
, &free
);
742 if (max_sqnum
> ai
->max_sqnum
)
743 ai
->max_sqnum
= max_sqnum
;
745 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &free
, u
.list
)
746 list_move_tail(&tmp_aeb
->u
.list
, &ai
->free
);
748 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &used
, u
.list
)
749 list_move_tail(&tmp_aeb
->u
.list
, &ai
->erase
);
751 ubi_assert(list_empty(&free
));
754 * If fastmap is leaking PEBs (must not happen), raise a
755 * fat warning and fall back to scanning mode.
756 * We do this here because in ubi_wl_init() it's too late
757 * and we cannot fall back to scanning.
759 if (WARN_ON(count_fastmap_pebs(ai
) != ubi
->peb_count
-
760 ai
->bad_peb_count
- fm
->used_blocks
))
766 ret
= UBI_BAD_FASTMAP
;
768 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &used
, u
.list
) {
769 list_del(&tmp_aeb
->u
.list
);
770 kmem_cache_free(ai
->aeb_slab_cache
, tmp_aeb
);
772 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &free
, u
.list
) {
773 list_del(&tmp_aeb
->u
.list
);
774 kmem_cache_free(ai
->aeb_slab_cache
, tmp_aeb
);
781 * ubi_scan_fastmap - scan the fastmap.
782 * @ubi: UBI device object
783 * @ai: UBI attach info to be filled
784 * @fm_anchor: The fastmap starts at this PEB
786 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
787 * UBI_BAD_FASTMAP if one was found but is not usable.
788 * < 0 indicates an internal error.
790 int ubi_scan_fastmap(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
793 struct ubi_fm_sb
*fmsb
, *fmsb2
;
794 struct ubi_vid_hdr
*vh
;
795 struct ubi_ec_hdr
*ech
;
796 struct ubi_fastmap_layout
*fm
;
797 int i
, used_blocks
, pnum
, ret
= 0;
800 unsigned long long sqnum
= 0;
802 mutex_lock(&ubi
->fm_mutex
);
803 memset(ubi
->fm_buf
, 0, ubi
->fm_size
);
805 fmsb
= kmalloc(sizeof(*fmsb
), GFP_KERNEL
);
811 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
818 ret
= ubi_io_read(ubi
, fmsb
, fm_anchor
, ubi
->leb_start
, sizeof(*fmsb
));
819 if (ret
&& ret
!= UBI_IO_BITFLIPS
)
821 else if (ret
== UBI_IO_BITFLIPS
)
822 fm
->to_be_tortured
[0] = 1;
824 if (be32_to_cpu(fmsb
->magic
) != UBI_FM_SB_MAGIC
) {
825 ubi_err(ubi
, "bad super block magic: 0x%x, expected: 0x%x",
826 be32_to_cpu(fmsb
->magic
), UBI_FM_SB_MAGIC
);
827 ret
= UBI_BAD_FASTMAP
;
831 if (fmsb
->version
!= UBI_FM_FMT_VERSION
) {
832 ubi_err(ubi
, "bad fastmap version: %i, expected: %i",
833 fmsb
->version
, UBI_FM_FMT_VERSION
);
834 ret
= UBI_BAD_FASTMAP
;
838 used_blocks
= be32_to_cpu(fmsb
->used_blocks
);
839 if (used_blocks
> UBI_FM_MAX_BLOCKS
|| used_blocks
< 1) {
840 ubi_err(ubi
, "number of fastmap blocks is invalid: %i",
842 ret
= UBI_BAD_FASTMAP
;
846 fm_size
= ubi
->leb_size
* used_blocks
;
847 if (fm_size
!= ubi
->fm_size
) {
848 ubi_err(ubi
, "bad fastmap size: %zi, expected: %zi",
849 fm_size
, ubi
->fm_size
);
850 ret
= UBI_BAD_FASTMAP
;
854 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
860 vh
= ubi_zalloc_vid_hdr(ubi
, GFP_KERNEL
);
866 for (i
= 0; i
< used_blocks
; i
++) {
869 pnum
= be32_to_cpu(fmsb
->block_loc
[i
]);
871 if (ubi_io_is_bad(ubi
, pnum
)) {
872 ret
= UBI_BAD_FASTMAP
;
876 ret
= ubi_io_read_ec_hdr(ubi
, pnum
, ech
, 0);
877 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
878 ubi_err(ubi
, "unable to read fastmap block# %i EC (PEB: %i)",
881 ret
= UBI_BAD_FASTMAP
;
883 } else if (ret
== UBI_IO_BITFLIPS
)
884 fm
->to_be_tortured
[i
] = 1;
886 image_seq
= be32_to_cpu(ech
->image_seq
);
888 ubi
->image_seq
= image_seq
;
891 * Older UBI implementations have image_seq set to zero, so
892 * we shouldn't fail if image_seq == 0.
894 if (image_seq
&& (image_seq
!= ubi
->image_seq
)) {
895 ubi_err(ubi
, "wrong image seq:%d instead of %d",
896 be32_to_cpu(ech
->image_seq
), ubi
->image_seq
);
897 ret
= UBI_BAD_FASTMAP
;
901 ret
= ubi_io_read_vid_hdr(ubi
, pnum
, vh
, 0);
902 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
903 ubi_err(ubi
, "unable to read fastmap block# %i (PEB: %i)",
909 if (be32_to_cpu(vh
->vol_id
) != UBI_FM_SB_VOLUME_ID
) {
910 ubi_err(ubi
, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
911 be32_to_cpu(vh
->vol_id
),
912 UBI_FM_SB_VOLUME_ID
);
913 ret
= UBI_BAD_FASTMAP
;
917 if (be32_to_cpu(vh
->vol_id
) != UBI_FM_DATA_VOLUME_ID
) {
918 ubi_err(ubi
, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
919 be32_to_cpu(vh
->vol_id
),
920 UBI_FM_DATA_VOLUME_ID
);
921 ret
= UBI_BAD_FASTMAP
;
926 if (sqnum
< be64_to_cpu(vh
->sqnum
))
927 sqnum
= be64_to_cpu(vh
->sqnum
);
929 ret
= ubi_io_read(ubi
, ubi
->fm_buf
+ (ubi
->leb_size
* i
), pnum
,
930 ubi
->leb_start
, ubi
->leb_size
);
931 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
932 ubi_err(ubi
, "unable to read fastmap block# %i (PEB: %i, "
933 "err: %i)", i
, pnum
, ret
);
941 fmsb2
= (struct ubi_fm_sb
*)(ubi
->fm_buf
);
942 tmp_crc
= be32_to_cpu(fmsb2
->data_crc
);
944 crc
= crc32(UBI_CRC32_INIT
, ubi
->fm_buf
, fm_size
);
945 if (crc
!= tmp_crc
) {
946 ubi_err(ubi
, "fastmap data CRC is invalid");
947 ubi_err(ubi
, "CRC should be: 0x%x, calc: 0x%x",
949 ret
= UBI_BAD_FASTMAP
;
953 fmsb2
->sqnum
= sqnum
;
955 fm
->used_blocks
= used_blocks
;
957 ret
= ubi_attach_fastmap(ubi
, ai
, fm
);
960 ret
= UBI_BAD_FASTMAP
;
964 for (i
= 0; i
< used_blocks
; i
++) {
965 struct ubi_wl_entry
*e
;
967 e
= kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
976 e
->pnum
= be32_to_cpu(fmsb2
->block_loc
[i
]);
977 e
->ec
= be32_to_cpu(fmsb2
->block_ec
[i
]);
982 ubi
->fm_pool
.max_size
= ubi
->fm
->max_pool_size
;
983 ubi
->fm_wl_pool
.max_size
= ubi
->fm
->max_wl_pool_size
;
984 ubi_msg(ubi
, "attached by fastmap");
985 ubi_msg(ubi
, "fastmap pool size: %d", ubi
->fm_pool
.max_size
);
986 ubi_msg(ubi
, "fastmap WL pool size: %d",
987 ubi
->fm_wl_pool
.max_size
);
988 ubi
->fm_disabled
= 0;
990 ubi_free_vid_hdr(ubi
, vh
);
993 mutex_unlock(&ubi
->fm_mutex
);
994 if (ret
== UBI_BAD_FASTMAP
)
995 ubi_err(ubi
, "Attach by fastmap failed, doing a full scan!");
999 ubi_free_vid_hdr(ubi
, vh
);
1008 * ubi_write_fastmap - writes a fastmap.
1009 * @ubi: UBI device object
1010 * @new_fm: the to be written fastmap
1012 * Returns 0 on success, < 0 indicates an internal error.
1014 static int ubi_write_fastmap(struct ubi_device
*ubi
,
1015 struct ubi_fastmap_layout
*new_fm
)
1019 struct ubi_fm_sb
*fmsb
;
1020 struct ubi_fm_hdr
*fmh
;
1021 struct ubi_fm_scan_pool
*fmpl1
, *fmpl2
;
1022 struct ubi_fm_ec
*fec
;
1023 struct ubi_fm_volhdr
*fvh
;
1024 struct ubi_fm_eba
*feba
;
1025 struct rb_node
*node
;
1026 struct ubi_wl_entry
*wl_e
;
1027 struct ubi_volume
*vol
;
1028 struct ubi_vid_hdr
*avhdr
, *dvhdr
;
1029 struct ubi_work
*ubi_wrk
;
1030 int ret
, i
, j
, free_peb_count
, used_peb_count
, vol_count
;
1031 int scrub_peb_count
, erase_peb_count
;
1033 fm_raw
= ubi
->fm_buf
;
1034 memset(ubi
->fm_buf
, 0, ubi
->fm_size
);
1036 avhdr
= new_fm_vhdr(ubi
, UBI_FM_SB_VOLUME_ID
);
1042 dvhdr
= new_fm_vhdr(ubi
, UBI_FM_DATA_VOLUME_ID
);
1048 spin_lock(&ubi
->volumes_lock
);
1049 spin_lock(&ubi
->wl_lock
);
1051 fmsb
= (struct ubi_fm_sb
*)fm_raw
;
1052 fm_pos
+= sizeof(*fmsb
);
1053 ubi_assert(fm_pos
<= ubi
->fm_size
);
1055 fmh
= (struct ubi_fm_hdr
*)(fm_raw
+ fm_pos
);
1056 fm_pos
+= sizeof(*fmh
);
1057 ubi_assert(fm_pos
<= ubi
->fm_size
);
1059 fmsb
->magic
= cpu_to_be32(UBI_FM_SB_MAGIC
);
1060 fmsb
->version
= UBI_FM_FMT_VERSION
;
1061 fmsb
->used_blocks
= cpu_to_be32(new_fm
->used_blocks
);
1062 /* the max sqnum will be filled in while *reading* the fastmap */
1065 fmh
->magic
= cpu_to_be32(UBI_FM_HDR_MAGIC
);
1068 scrub_peb_count
= 0;
1069 erase_peb_count
= 0;
1072 fmpl1
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
1073 fm_pos
+= sizeof(*fmpl1
);
1074 fmpl1
->magic
= cpu_to_be32(UBI_FM_POOL_MAGIC
);
1075 fmpl1
->size
= cpu_to_be16(ubi
->fm_pool
.size
);
1076 fmpl1
->max_size
= cpu_to_be16(ubi
->fm_pool
.max_size
);
1078 for (i
= 0; i
< ubi
->fm_pool
.size
; i
++)
1079 fmpl1
->pebs
[i
] = cpu_to_be32(ubi
->fm_pool
.pebs
[i
]);
1081 fmpl2
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
1082 fm_pos
+= sizeof(*fmpl2
);
1083 fmpl2
->magic
= cpu_to_be32(UBI_FM_POOL_MAGIC
);
1084 fmpl2
->size
= cpu_to_be16(ubi
->fm_wl_pool
.size
);
1085 fmpl2
->max_size
= cpu_to_be16(ubi
->fm_wl_pool
.max_size
);
1087 for (i
= 0; i
< ubi
->fm_wl_pool
.size
; i
++)
1088 fmpl2
->pebs
[i
] = cpu_to_be32(ubi
->fm_wl_pool
.pebs
[i
]);
1090 for (node
= rb_first(&ubi
->free
); node
; node
= rb_next(node
)) {
1091 wl_e
= rb_entry(node
, struct ubi_wl_entry
, u
.rb
);
1092 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1094 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1095 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1098 fm_pos
+= sizeof(*fec
);
1099 ubi_assert(fm_pos
<= ubi
->fm_size
);
1101 fmh
->free_peb_count
= cpu_to_be32(free_peb_count
);
1103 for (node
= rb_first(&ubi
->used
); node
; node
= rb_next(node
)) {
1104 wl_e
= rb_entry(node
, struct ubi_wl_entry
, u
.rb
);
1105 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1107 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1108 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1111 fm_pos
+= sizeof(*fec
);
1112 ubi_assert(fm_pos
<= ubi
->fm_size
);
1115 for (i
= 0; i
< UBI_PROT_QUEUE_LEN
; i
++) {
1116 list_for_each_entry(wl_e
, &ubi
->pq
[i
], u
.list
) {
1117 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1119 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1120 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1123 fm_pos
+= sizeof(*fec
);
1124 ubi_assert(fm_pos
<= ubi
->fm_size
);
1127 fmh
->used_peb_count
= cpu_to_be32(used_peb_count
);
1129 for (node
= rb_first(&ubi
->scrub
); node
; node
= rb_next(node
)) {
1130 wl_e
= rb_entry(node
, struct ubi_wl_entry
, u
.rb
);
1131 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1133 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1134 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1137 fm_pos
+= sizeof(*fec
);
1138 ubi_assert(fm_pos
<= ubi
->fm_size
);
1140 fmh
->scrub_peb_count
= cpu_to_be32(scrub_peb_count
);
1143 list_for_each_entry(ubi_wrk
, &ubi
->works
, list
) {
1144 if (ubi_is_erase_work(ubi_wrk
)) {
1148 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1150 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1151 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1154 fm_pos
+= sizeof(*fec
);
1155 ubi_assert(fm_pos
<= ubi
->fm_size
);
1158 fmh
->erase_peb_count
= cpu_to_be32(erase_peb_count
);
1160 for (i
= 0; i
< UBI_MAX_VOLUMES
+ UBI_INT_VOL_COUNT
; i
++) {
1161 vol
= ubi
->volumes
[i
];
1168 fvh
= (struct ubi_fm_volhdr
*)(fm_raw
+ fm_pos
);
1169 fm_pos
+= sizeof(*fvh
);
1170 ubi_assert(fm_pos
<= ubi
->fm_size
);
1172 fvh
->magic
= cpu_to_be32(UBI_FM_VHDR_MAGIC
);
1173 fvh
->vol_id
= cpu_to_be32(vol
->vol_id
);
1174 fvh
->vol_type
= vol
->vol_type
;
1175 fvh
->used_ebs
= cpu_to_be32(vol
->used_ebs
);
1176 fvh
->data_pad
= cpu_to_be32(vol
->data_pad
);
1177 fvh
->last_eb_bytes
= cpu_to_be32(vol
->last_eb_bytes
);
1179 ubi_assert(vol
->vol_type
== UBI_DYNAMIC_VOLUME
||
1180 vol
->vol_type
== UBI_STATIC_VOLUME
);
1182 feba
= (struct ubi_fm_eba
*)(fm_raw
+ fm_pos
);
1183 fm_pos
+= sizeof(*feba
) + (sizeof(__be32
) * vol
->reserved_pebs
);
1184 ubi_assert(fm_pos
<= ubi
->fm_size
);
1186 for (j
= 0; j
< vol
->reserved_pebs
; j
++)
1187 feba
->pnum
[j
] = cpu_to_be32(vol
->eba_tbl
[j
]);
1189 feba
->reserved_pebs
= cpu_to_be32(j
);
1190 feba
->magic
= cpu_to_be32(UBI_FM_EBA_MAGIC
);
1192 fmh
->vol_count
= cpu_to_be32(vol_count
);
1193 fmh
->bad_peb_count
= cpu_to_be32(ubi
->bad_peb_count
);
1195 avhdr
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1198 spin_unlock(&ubi
->wl_lock
);
1199 spin_unlock(&ubi
->volumes_lock
);
1201 dbg_bld("writing fastmap SB to PEB %i", new_fm
->e
[0]->pnum
);
1202 ret
= ubi_io_write_vid_hdr(ubi
, new_fm
->e
[0]->pnum
, avhdr
);
1204 ubi_err(ubi
, "unable to write vid_hdr to fastmap SB!");
1208 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1209 fmsb
->block_loc
[i
] = cpu_to_be32(new_fm
->e
[i
]->pnum
);
1210 fmsb
->block_ec
[i
] = cpu_to_be32(new_fm
->e
[i
]->ec
);
1214 fmsb
->data_crc
= cpu_to_be32(crc32(UBI_CRC32_INIT
, fm_raw
,
1217 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1218 dvhdr
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1219 dvhdr
->lnum
= cpu_to_be32(i
);
1220 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1221 new_fm
->e
[i
]->pnum
, be64_to_cpu(dvhdr
->sqnum
));
1222 ret
= ubi_io_write_vid_hdr(ubi
, new_fm
->e
[i
]->pnum
, dvhdr
);
1224 ubi_err(ubi
, "unable to write vid_hdr to PEB %i!",
1225 new_fm
->e
[i
]->pnum
);
1230 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1231 ret
= ubi_io_write(ubi
, fm_raw
+ (i
* ubi
->leb_size
),
1232 new_fm
->e
[i
]->pnum
, ubi
->leb_start
, ubi
->leb_size
);
1234 ubi_err(ubi
, "unable to write fastmap to PEB %i!",
1235 new_fm
->e
[i
]->pnum
);
1243 dbg_bld("fastmap written!");
1246 ubi_free_vid_hdr(ubi
, avhdr
);
1247 ubi_free_vid_hdr(ubi
, dvhdr
);
1253 * erase_block - Manually erase a PEB.
1254 * @ubi: UBI device object
1255 * @pnum: PEB to be erased
1257 * Returns the new EC value on success, < 0 indicates an internal error.
1259 static int erase_block(struct ubi_device
*ubi
, int pnum
)
1262 struct ubi_ec_hdr
*ec_hdr
;
1265 ec_hdr
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
1269 ret
= ubi_io_read_ec_hdr(ubi
, pnum
, ec_hdr
, 0);
1272 else if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
1277 ret
= ubi_io_sync_erase(ubi
, pnum
, 0);
1281 ec
= be64_to_cpu(ec_hdr
->ec
);
1283 if (ec
> UBI_MAX_ERASECOUNTER
) {
1288 ec_hdr
->ec
= cpu_to_be64(ec
);
1289 ret
= ubi_io_write_ec_hdr(ubi
, pnum
, ec_hdr
);
1300 * invalidate_fastmap - destroys a fastmap.
1301 * @ubi: UBI device object
1302 * @fm: the fastmap to be destroyed
1304 * Returns 0 on success, < 0 indicates an internal error.
1306 static int invalidate_fastmap(struct ubi_device
*ubi
,
1307 struct ubi_fastmap_layout
*fm
)
1310 struct ubi_vid_hdr
*vh
;
1312 ret
= erase_block(ubi
, fm
->e
[0]->pnum
);
1316 vh
= new_fm_vhdr(ubi
, UBI_FM_SB_VOLUME_ID
);
1320 /* deleting the current fastmap SB is not enough, an old SB may exist,
1321 * so create a (corrupted) SB such that fastmap will find it and fall
1322 * back to scanning mode in any case */
1323 vh
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1324 ret
= ubi_io_write_vid_hdr(ubi
, fm
->e
[0]->pnum
, vh
);
1330 * ubi_update_fastmap - will be called by UBI if a volume changes or
1331 * a fastmap pool becomes full.
1332 * @ubi: UBI device object
1334 * Returns 0 on success, < 0 indicates an internal error.
1336 int ubi_update_fastmap(struct ubi_device
*ubi
)
1339 struct ubi_fastmap_layout
*new_fm
, *old_fm
;
1340 struct ubi_wl_entry
*tmp_e
;
1342 mutex_lock(&ubi
->fm_mutex
);
1344 ubi_refill_pools(ubi
);
1346 if (ubi
->ro_mode
|| ubi
->fm_disabled
) {
1347 mutex_unlock(&ubi
->fm_mutex
);
1351 ret
= ubi_ensure_anchor_pebs(ubi
);
1353 mutex_unlock(&ubi
->fm_mutex
);
1357 new_fm
= kzalloc(sizeof(*new_fm
), GFP_KERNEL
);
1359 mutex_unlock(&ubi
->fm_mutex
);
1363 new_fm
->used_blocks
= ubi
->fm_size
/ ubi
->leb_size
;
1367 if (new_fm
->used_blocks
> UBI_FM_MAX_BLOCKS
) {
1368 ubi_err(ubi
, "fastmap too large");
1373 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1374 spin_lock(&ubi
->wl_lock
);
1375 tmp_e
= ubi_wl_get_fm_peb(ubi
, 0);
1376 spin_unlock(&ubi
->wl_lock
);
1378 if (!tmp_e
&& !old_fm
) {
1380 ubi_err(ubi
, "could not get any free erase block");
1382 for (j
= 1; j
< i
; j
++)
1383 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[j
], j
, 0);
1387 } else if (!tmp_e
&& old_fm
) {
1388 ret
= erase_block(ubi
, old_fm
->e
[i
]->pnum
);
1392 for (j
= 1; j
< i
; j
++)
1393 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[j
],
1396 ubi_err(ubi
, "could not erase old fastmap PEB");
1399 new_fm
->e
[i
] = old_fm
->e
[i
];
1401 new_fm
->e
[i
] = tmp_e
;
1404 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[i
], i
,
1405 old_fm
->to_be_tortured
[i
]);
1409 spin_lock(&ubi
->wl_lock
);
1410 tmp_e
= ubi_wl_get_fm_peb(ubi
, 1);
1411 spin_unlock(&ubi
->wl_lock
);
1414 /* no fresh anchor PEB was found, reuse the old one */
1416 ret
= erase_block(ubi
, old_fm
->e
[0]->pnum
);
1419 ubi_err(ubi
, "could not erase old anchor PEB");
1421 for (i
= 1; i
< new_fm
->used_blocks
; i
++)
1422 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[i
],
1426 new_fm
->e
[0] = old_fm
->e
[0];
1427 new_fm
->e
[0]->ec
= ret
;
1429 /* we've got a new anchor PEB, return the old one */
1430 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[0], 0,
1431 old_fm
->to_be_tortured
[0]);
1432 new_fm
->e
[0] = tmp_e
;
1437 ubi_err(ubi
, "could not find any anchor PEB");
1439 for (i
= 1; i
< new_fm
->used_blocks
; i
++)
1440 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[i
], i
, 0);
1445 new_fm
->e
[0] = tmp_e
;
1448 down_write(&ubi
->work_sem
);
1449 down_write(&ubi
->fm_sem
);
1450 ret
= ubi_write_fastmap(ubi
, new_fm
);
1451 up_write(&ubi
->fm_sem
);
1452 up_write(&ubi
->work_sem
);
1458 mutex_unlock(&ubi
->fm_mutex
);
1465 ubi_warn(ubi
, "Unable to write new fastmap, err=%i", ret
);
1469 ret
= invalidate_fastmap(ubi
, old_fm
);
1471 ubi_err(ubi
, "Unable to invalidiate current fastmap!");
This page took 0.062568 seconds and 6 git commands to generate.