Merge branch 'master' of /home/tglx/work/mtd/git/linux-2.6.git/
[deliverable/linux.git] / fs / jffs2 / nodelist.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
e4fef661 10 * $Id: nodelist.c,v 1.98 2005/07/10 15:15:32 dedekind Exp $
1da177e4
LT
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/sched.h>
16#include <linux/fs.h>
17#include <linux/mtd/mtd.h>
18#include <linux/rbtree.h>
19#include <linux/crc32.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include "nodelist.h"
23
24void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
25{
26 struct jffs2_full_dirent **prev = list;
27 D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
28
29 while ((*prev) && (*prev)->nhash <= new->nhash) {
30 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
31 /* Duplicate. Free one */
32 if (new->version < (*prev)->version) {
33 D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
34 D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
35 jffs2_mark_node_obsolete(c, new->raw);
36 jffs2_free_full_dirent(new);
37 } else {
38 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
39 new->next = (*prev)->next;
40 jffs2_mark_node_obsolete(c, ((*prev)->raw));
41 jffs2_free_full_dirent(*prev);
42 *prev = new;
43 }
44 goto out;
45 }
46 prev = &((*prev)->next);
47 }
48 new->next = *prev;
49 *prev = new;
50
51 out:
52 D2(while(*list) {
53 printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
54 list = &(*list)->next;
55 });
56}
57
e4fef661
AB
58/*
59 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
60 * order of increasing version.
61 */
62static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
1da177e4 63{
9dee7503
DW
64 struct rb_node **p = &list->rb_node;
65 struct rb_node * parent = NULL;
66 struct jffs2_tmp_dnode_info *this;
67
68 while (*p) {
69 parent = *p;
70 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
71
6430a8de
AB
72 /* There may actually be a collision here, but it doesn't
73 actually matter. As long as the two nodes with the same
74 version are together, it's all fine. */
9dee7503
DW
75 if (tn->version < this->version)
76 p = &(*p)->rb_left;
9dee7503
DW
77 else
78 p = &(*p)->rb_right;
79 }
80
81 rb_link_node(&tn->rb, parent, p);
82 rb_insert_color(&tn->rb, list);
1da177e4
LT
83}
84
9dee7503 85static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
1da177e4 86{
9dee7503
DW
87 struct rb_node *this;
88 struct jffs2_tmp_dnode_info *tn;
89
90 this = list->rb_node;
91
92 /* Now at bottom of tree */
93 while (this) {
94 if (this->rb_left)
95 this = this->rb_left;
96 else if (this->rb_right)
97 this = this->rb_right;
98 else {
99 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
100 jffs2_free_full_dnode(tn->fn);
101 jffs2_free_tmp_dnode_info(tn);
102
103 this = this->rb_parent;
104 if (!this)
105 break;
1da177e4 106
9dee7503
DW
107 if (this->rb_left == &tn->rb)
108 this->rb_left = NULL;
109 else if (this->rb_right == &tn->rb)
110 this->rb_right = NULL;
111 else BUG();
112 }
1da177e4 113 }
9dee7503 114 list->rb_node = NULL;
1da177e4
LT
115}
116
117static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
118{
119 struct jffs2_full_dirent *next;
120
121 while (fd) {
122 next = fd->next;
123 jffs2_free_full_dirent(fd);
124 fd = next;
125 }
126}
127
128/* Returns first valid node after 'ref'. May return 'ref' */
129static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
130{
131 while (ref && ref->next_in_ino) {
132 if (!ref_obsolete(ref))
133 return ref;
134 D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
135 ref = ref->next_in_ino;
136 }
137 return NULL;
138}
139
140/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
141 with this ino, returning the former in order of version */
142
143int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
9dee7503 144 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
1da177e4
LT
145 uint32_t *highest_version, uint32_t *latest_mctime,
146 uint32_t *mctime_ver)
147{
148 struct jffs2_raw_node_ref *ref, *valid_ref;
9dee7503
DW
149 struct jffs2_tmp_dnode_info *tn;
150 struct rb_root ret_tn = RB_ROOT;
1da177e4
LT
151 struct jffs2_full_dirent *fd, *ret_fd = NULL;
152 union jffs2_node_union node;
153 size_t retlen;
154 int err;
155
156 *mctime_ver = 0;
157
158 D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
159
160 spin_lock(&c->erase_completion_lock);
161
162 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
163
8fabed4a 164 if (!valid_ref && (f->inocache->ino != 1))
1da177e4
LT
165 printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
166
167 while (valid_ref) {
168 /* We can hold a pointer to a non-obsolete node without the spinlock,
169 but _obsolete_ nodes may disappear at any time, if the block
170 they're in gets erased. So if we mark 'ref' obsolete while we're
171 not holding the lock, it can go away immediately. For that reason,
172 we find the next valid node first, before processing 'ref'.
173 */
174 ref = valid_ref;
175 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
176 spin_unlock(&c->erase_completion_lock);
177
178 cond_resched();
179
180 /* FIXME: point() */
181 err = jffs2_flash_read(c, (ref_offset(ref)),
182 min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
183 &retlen, (void *)&node);
184 if (err) {
185 printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
186 goto free_out;
187 }
188
189
190 /* Check we've managed to read at least the common node header */
191 if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
192 printk(KERN_WARNING "short read in get_inode_nodes()\n");
193 err = -EIO;
194 goto free_out;
195 }
196
197 switch (je16_to_cpu(node.u.nodetype)) {
198 case JFFS2_NODETYPE_DIRENT:
199 D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
200 if (ref_flags(ref) == REF_UNCHECKED) {
201 printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref));
202 BUG();
203 }
204 if (retlen < sizeof(node.d)) {
205 printk(KERN_WARNING "short read in get_inode_nodes()\n");
206 err = -EIO;
207 goto free_out;
208 }
209 /* sanity check */
210 if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) {
211 printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n",
212 ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen));
213 jffs2_mark_node_obsolete(c, ref);
214 spin_lock(&c->erase_completion_lock);
215 continue;
216 }
217 if (je32_to_cpu(node.d.version) > *highest_version)
218 *highest_version = je32_to_cpu(node.d.version);
219 if (ref_obsolete(ref)) {
220 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
221 printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n",
222 ref_offset(ref));
223 BUG();
224 }
225
226 fd = jffs2_alloc_full_dirent(node.d.nsize+1);
227 if (!fd) {
228 err = -ENOMEM;
229 goto free_out;
230 }
231 fd->raw = ref;
232 fd->version = je32_to_cpu(node.d.version);
233 fd->ino = je32_to_cpu(node.d.ino);
234 fd->type = node.d.type;
235
236 /* Pick out the mctime of the latest dirent */
237 if(fd->version > *mctime_ver) {
238 *mctime_ver = fd->version;
239 *latest_mctime = je32_to_cpu(node.d.mctime);
240 }
241
242 /* memcpy as much of the name as possible from the raw
243 dirent we've already read from the flash
244 */
245 if (retlen > sizeof(struct jffs2_raw_dirent))
246 memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
247
248 /* Do we need to copy any more of the name directly
249 from the flash?
250 */
251 if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) {
252 /* FIXME: point() */
253 int already = retlen - sizeof(struct jffs2_raw_dirent);
254
255 err = jffs2_flash_read(c, (ref_offset(ref)) + retlen,
256 node.d.nsize - already, &retlen, &fd->name[already]);
257 if (!err && retlen != node.d.nsize - already)
258 err = -EIO;
259
260 if (err) {
261 printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
262 jffs2_free_full_dirent(fd);
263 goto free_out;
264 }
265 }
266 fd->nhash = full_name_hash(fd->name, node.d.nsize);
267 fd->next = NULL;
268 fd->name[node.d.nsize] = '\0';
269 /* Wheee. We now have a complete jffs2_full_dirent structure, with
270 the name in it and everything. Link it into the list
271 */
272 D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
273 jffs2_add_fd_to_list(c, fd, &ret_fd);
274 break;
275
276 case JFFS2_NODETYPE_INODE:
277 D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
278 if (retlen < sizeof(node.i)) {
279 printk(KERN_WARNING "read too short for dnode\n");
280 err = -EIO;
281 goto free_out;
282 }
283 if (je32_to_cpu(node.i.version) > *highest_version)
284 *highest_version = je32_to_cpu(node.i.version);
285 D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version));
286
287 if (ref_obsolete(ref)) {
288 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
289 printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n",
290 ref_offset(ref));
291 BUG();
292 }
293
294 /* If we've never checked the CRCs on this node, check them now. */
295 if (ref_flags(ref) == REF_UNCHECKED) {
296 uint32_t crc, len;
297 struct jffs2_eraseblock *jeb;
298
299 crc = crc32(0, &node, sizeof(node.i)-8);
300 if (crc != je32_to_cpu(node.i.node_crc)) {
301 printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
302 ref_offset(ref), je32_to_cpu(node.i.node_crc), crc);
303 jffs2_mark_node_obsolete(c, ref);
304 spin_lock(&c->erase_completion_lock);
305 continue;
306 }
307
308 /* sanity checks */
309 if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) ||
310 PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) {
311 printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n",
312 ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino),
313 je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize),
314 je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize));
315 jffs2_mark_node_obsolete(c, ref);
316 spin_lock(&c->erase_completion_lock);
317 continue;
318 }
319
320 if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) {
321 unsigned char *buf=NULL;
322 uint32_t pointed = 0;
323#ifndef __ECOS
324 if (c->mtd->point) {
325 err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
326 &retlen, &buf);
327 if (!err && retlen < je32_to_cpu(node.i.csize)) {
328 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
329 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
330 } else if (err){
331 D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
332 } else
333 pointed = 1; /* succefully pointed to device */
334 }
335#endif
336 if(!pointed){
337 buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL);
338 if (!buf)
339 return -ENOMEM;
340
341 err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
342 &retlen, buf);
343 if (!err && retlen != je32_to_cpu(node.i.csize))
344 err = -EIO;
345 if (err) {
346 kfree(buf);
347 return err;
348 }
349 }
350 crc = crc32(0, buf, je32_to_cpu(node.i.csize));
351 if(!pointed)
352 kfree(buf);
353#ifndef __ECOS
354 else
355 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
356#endif
357
358 if (crc != je32_to_cpu(node.i.data_crc)) {
359 printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
360 ref_offset(ref), je32_to_cpu(node.i.data_crc), crc);
361 jffs2_mark_node_obsolete(c, ref);
362 spin_lock(&c->erase_completion_lock);
363 continue;
364 }
365
366 }
367
368 /* Mark the node as having been checked and fix the accounting accordingly */
369 spin_lock(&c->erase_completion_lock);
370 jeb = &c->blocks[ref->flash_offset / c->sector_size];
371 len = ref_totlen(c, jeb, ref);
372
373 jeb->used_size += len;
374 jeb->unchecked_size -= len;
375 c->used_size += len;
376 c->unchecked_size -= len;
377
378 /* If node covers at least a whole page, or if it starts at the
379 beginning of a page and runs to the end of the file, or if
380 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
381
382 If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
383 when the overlapping node(s) get added to the tree anyway.
384 */
385 if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
386 ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
387 (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) {
388 D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref)));
389 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
390 } else {
391 D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref)));
392 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
393 }
394 spin_unlock(&c->erase_completion_lock);
395 }
396
397 tn = jffs2_alloc_tmp_dnode_info();
398 if (!tn) {
399 D1(printk(KERN_DEBUG "alloc tn failed\n"));
400 err = -ENOMEM;
401 goto free_out;
402 }
403
404 tn->fn = jffs2_alloc_full_dnode();
405 if (!tn->fn) {
406 D1(printk(KERN_DEBUG "alloc fn failed\n"));
407 err = -ENOMEM;
408 jffs2_free_tmp_dnode_info(tn);
409 goto free_out;
410 }
411 tn->version = je32_to_cpu(node.i.version);
412 tn->fn->ofs = je32_to_cpu(node.i.offset);
413 /* There was a bug where we wrote hole nodes out with
414 csize/dsize swapped. Deal with it */
415 if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
416 tn->fn->size = je32_to_cpu(node.i.csize);
417 else // normal case...
418 tn->fn->size = je32_to_cpu(node.i.dsize);
419 tn->fn->raw = ref;
420 D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
421 ref_offset(ref), je32_to_cpu(node.i.version),
422 je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize)));
e4fef661 423 jffs2_add_tn_to_tree(tn, &ret_tn);
1da177e4
LT
424 break;
425
426 default:
427 if (ref_flags(ref) == REF_UNCHECKED) {
428 struct jffs2_eraseblock *jeb;
429 uint32_t len;
430
431 printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
432 je16_to_cpu(node.u.nodetype), ref_offset(ref));
433
434 /* Mark the node as having been checked and fix the accounting accordingly */
435 spin_lock(&c->erase_completion_lock);
436 jeb = &c->blocks[ref->flash_offset / c->sector_size];
437 len = ref_totlen(c, jeb, ref);
438
439 jeb->used_size += len;
440 jeb->unchecked_size -= len;
441 c->used_size += len;
442 c->unchecked_size -= len;
443
444 mark_ref_normal(ref);
445 spin_unlock(&c->erase_completion_lock);
446 }
447 node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
448 if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
449 /* Hmmm. This should have been caught at scan time. */
450 printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n",
451 ref_offset(ref));
452 printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n",
453 je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
454 je32_to_cpu(node.u.hdr_crc));
455 jffs2_mark_node_obsolete(c, ref);
456 } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) {
457 case JFFS2_FEATURE_INCOMPAT:
458 printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
459 /* EEP */
460 BUG();
461 break;
462 case JFFS2_FEATURE_ROCOMPAT:
463 printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
464 if (!(c->flags & JFFS2_SB_FLAG_RO))
465 BUG();
466 break;
467 case JFFS2_FEATURE_RWCOMPAT_COPY:
468 printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
469 break;
470 case JFFS2_FEATURE_RWCOMPAT_DELETE:
471 printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
472 jffs2_mark_node_obsolete(c, ref);
473 break;
474 }
475
476 }
477 spin_lock(&c->erase_completion_lock);
478
479 }
480 spin_unlock(&c->erase_completion_lock);
481 *tnp = ret_tn;
482 *fdp = ret_fd;
483
484 return 0;
485
486 free_out:
9dee7503 487 jffs2_free_tmp_dnode_info_list(&ret_tn);
1da177e4
LT
488 jffs2_free_full_dirent_list(ret_fd);
489 return err;
490}
491
492void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
493{
494 spin_lock(&c->inocache_lock);
495 ic->state = state;
496 wake_up(&c->inocache_wq);
497 spin_unlock(&c->inocache_lock);
498}
499
500/* During mount, this needs no locking. During normal operation, its
501 callers want to do other stuff while still holding the inocache_lock.
502 Rather than introducing special case get_ino_cache functions or
503 callbacks, we just let the caller do the locking itself. */
504
505struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
506{
507 struct jffs2_inode_cache *ret;
508
509 D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
510
511 ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
512 while (ret && ret->ino < ino) {
513 ret = ret->next;
514 }
515
516 if (ret && ret->ino != ino)
517 ret = NULL;
518
519 D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
520 return ret;
521}
522
523void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
524{
525 struct jffs2_inode_cache **prev;
7d27c814 526
1da177e4 527 spin_lock(&c->inocache_lock);
7d27c814
TG
528 if (!new->ino)
529 new->ino = ++c->highest_ino;
530
531 D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
532
1da177e4
LT
533 prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
534
535 while ((*prev) && (*prev)->ino < new->ino) {
536 prev = &(*prev)->next;
537 }
538 new->next = *prev;
539 *prev = new;
540
541 spin_unlock(&c->inocache_lock);
542}
543
544void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
545{
546 struct jffs2_inode_cache **prev;
67e345d1 547 D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
1da177e4
LT
548 spin_lock(&c->inocache_lock);
549
550 prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
551
552 while ((*prev) && (*prev)->ino < old->ino) {
553 prev = &(*prev)->next;
554 }
555 if ((*prev) == old) {
556 *prev = old->next;
557 }
558
67e345d1
DW
559 /* Free it now unless it's in READING or CLEARING state, which
560 are the transitions upon read_inode() and clear_inode(). The
561 rest of the time we know nobody else is looking at it, and
562 if it's held by read_inode() or clear_inode() they'll free it
563 for themselves. */
564 if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
565 jffs2_free_inode_cache(old);
566
1da177e4
LT
567 spin_unlock(&c->inocache_lock);
568}
569
570void jffs2_free_ino_caches(struct jffs2_sb_info *c)
571{
572 int i;
573 struct jffs2_inode_cache *this, *next;
574
575 for (i=0; i<INOCACHE_HASHSIZE; i++) {
576 this = c->inocache_list[i];
577 while (this) {
578 next = this->next;
1da177e4
LT
579 jffs2_free_inode_cache(this);
580 this = next;
581 }
582 c->inocache_list[i] = NULL;
583 }
584}
585
586void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
587{
588 int i;
589 struct jffs2_raw_node_ref *this, *next;
590
591 for (i=0; i<c->nr_blocks; i++) {
592 this = c->blocks[i].first_node;
593 while(this) {
594 next = this->next_phys;
595 jffs2_free_raw_node_ref(this);
596 this = next;
597 }
598 c->blocks[i].first_node = c->blocks[i].last_node = NULL;
599 }
600}
601
602struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
603{
604 /* The common case in lookup is that there will be a node
605 which precisely matches. So we go looking for that first */
606 struct rb_node *next;
607 struct jffs2_node_frag *prev = NULL;
608 struct jffs2_node_frag *frag = NULL;
609
610 D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
611
612 next = fragtree->rb_node;
613
614 while(next) {
615 frag = rb_entry(next, struct jffs2_node_frag, rb);
616
617 D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
618 frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
619 if (frag->ofs + frag->size <= offset) {
620 D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
621 frag->ofs, frag->ofs+frag->size));
622 /* Remember the closest smaller match on the way down */
623 if (!prev || frag->ofs > prev->ofs)
624 prev = frag;
625 next = frag->rb.rb_right;
626 } else if (frag->ofs > offset) {
627 D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
628 frag->ofs, frag->ofs+frag->size));
629 next = frag->rb.rb_left;
630 } else {
631 D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
632 frag->ofs, frag->ofs+frag->size));
633 return frag;
634 }
635 }
636
637 /* Exact match not found. Go back up looking at each parent,
638 and return the closest smaller one */
639
640 if (prev)
641 D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
642 prev->ofs, prev->ofs+prev->size));
643 else
644 D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
645
646 return prev;
647}
648
649/* Pass 'c' argument to indicate that nodes should be marked obsolete as
650 they're killed. */
651void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
652{
653 struct jffs2_node_frag *frag;
654 struct jffs2_node_frag *parent;
655
656 if (!root->rb_node)
657 return;
658
659 frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
660
661 while(frag) {
662 if (frag->rb.rb_left) {
663 D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n",
664 frag, frag->ofs, frag->ofs+frag->size));
665 frag = frag_left(frag);
666 continue;
667 }
668 if (frag->rb.rb_right) {
669 D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n",
670 frag, frag->ofs, frag->ofs+frag->size));
671 frag = frag_right(frag);
672 continue;
673 }
674
675 D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
676 frag->ofs, frag->ofs+frag->size, frag->node,
677 frag->node?frag->node->frags:0));
678
679 if (frag->node && !(--frag->node->frags)) {
680 /* Not a hole, and it's the final remaining frag
681 of this node. Free the node */
682 if (c)
683 jffs2_mark_node_obsolete(c, frag->node->raw);
684
685 jffs2_free_full_dnode(frag->node);
686 }
687 parent = frag_parent(frag);
688 if (parent) {
689 if (frag_left(parent) == frag)
690 parent->rb.rb_left = NULL;
691 else
692 parent->rb.rb_right = NULL;
693 }
694
695 jffs2_free_node_frag(frag);
696 frag = parent;
697
698 cond_resched();
699 }
700}
701
702void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
703{
704 struct rb_node *parent = &base->rb;
705 struct rb_node **link = &parent;
706
707 D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag,
708 newfrag->ofs, newfrag->ofs+newfrag->size, base));
709
710 while (*link) {
711 parent = *link;
712 base = rb_entry(parent, struct jffs2_node_frag, rb);
713
714 D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
715 if (newfrag->ofs > base->ofs)
716 link = &base->rb.rb_right;
717 else if (newfrag->ofs < base->ofs)
718 link = &base->rb.rb_left;
719 else {
720 printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
721 BUG();
722 }
723 }
724
725 rb_link_node(&newfrag->rb, &base->rb, link);
726}
This page took 0.068032 seconds and 5 git commands to generate.