Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu...
[deliverable/linux.git] / fs / f2fs / node.h
1 /*
2 * fs/f2fs/node.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 /* start node id of a node block dedicated to the given node id */
12 #define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
13
14 /* node block offset on the NAT area dedicated to the given start node id */
15 #define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
16
17 /* # of pages to perform synchronous readahead before building free nids */
18 #define FREE_NID_PAGES 4
19
20 #define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
21
22 /* maximum readahead size for node during getting data blocks */
23 #define MAX_RA_NODE 128
24
25 /* control the memory footprint threshold (10MB per 1GB ram) */
26 #define DEF_RAM_THRESHOLD 10
27
28 /* vector size for gang look-up from nat cache that consists of radix tree */
29 #define NATVEC_SIZE 64
30 #define SETVEC_SIZE 32
31
32 /* return value for read_node_page */
33 #define LOCKED_PAGE 1
34
35 /* For flag in struct node_info */
36 enum {
37 IS_CHECKPOINTED, /* is it checkpointed before? */
38 HAS_FSYNCED_INODE, /* is the inode fsynced before? */
39 HAS_LAST_FSYNC, /* has the latest node fsync mark? */
40 IS_DIRTY, /* this nat entry is dirty? */
41 };
42
43 /*
44 * For node information
45 */
46 struct node_info {
47 nid_t nid; /* node id */
48 nid_t ino; /* inode number of the node's owner */
49 block_t blk_addr; /* block address of the node */
50 unsigned char version; /* version of the node */
51 unsigned char flag; /* for node information bits */
52 };
53
54 struct nat_entry {
55 struct list_head list; /* for clean or dirty nat list */
56 struct node_info ni; /* in-memory node information */
57 };
58
59 #define nat_get_nid(nat) (nat->ni.nid)
60 #define nat_set_nid(nat, n) (nat->ni.nid = n)
61 #define nat_get_blkaddr(nat) (nat->ni.blk_addr)
62 #define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
63 #define nat_get_ino(nat) (nat->ni.ino)
64 #define nat_set_ino(nat, i) (nat->ni.ino = i)
65 #define nat_get_version(nat) (nat->ni.version)
66 #define nat_set_version(nat, v) (nat->ni.version = v)
67
68 #define inc_node_version(version) (++version)
69
70 static inline void copy_node_info(struct node_info *dst,
71 struct node_info *src)
72 {
73 dst->nid = src->nid;
74 dst->ino = src->ino;
75 dst->blk_addr = src->blk_addr;
76 dst->version = src->version;
77 /* should not copy flag here */
78 }
79
80 static inline void set_nat_flag(struct nat_entry *ne,
81 unsigned int type, bool set)
82 {
83 unsigned char mask = 0x01 << type;
84 if (set)
85 ne->ni.flag |= mask;
86 else
87 ne->ni.flag &= ~mask;
88 }
89
90 static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
91 {
92 unsigned char mask = 0x01 << type;
93 return ne->ni.flag & mask;
94 }
95
96 static inline void nat_reset_flag(struct nat_entry *ne)
97 {
98 /* these states can be set only after checkpoint was done */
99 set_nat_flag(ne, IS_CHECKPOINTED, true);
100 set_nat_flag(ne, HAS_FSYNCED_INODE, false);
101 set_nat_flag(ne, HAS_LAST_FSYNC, true);
102 }
103
104 static inline void node_info_from_raw_nat(struct node_info *ni,
105 struct f2fs_nat_entry *raw_ne)
106 {
107 ni->ino = le32_to_cpu(raw_ne->ino);
108 ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
109 ni->version = raw_ne->version;
110 }
111
112 static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
113 struct node_info *ni)
114 {
115 raw_ne->ino = cpu_to_le32(ni->ino);
116 raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
117 raw_ne->version = ni->version;
118 }
119
120 enum mem_type {
121 FREE_NIDS, /* indicates the free nid list */
122 NAT_ENTRIES, /* indicates the cached nat entry */
123 DIRTY_DENTS, /* indicates dirty dentry pages */
124 INO_ENTRIES, /* indicates inode entries */
125 EXTENT_CACHE, /* indicates extent cache */
126 BASE_CHECK, /* check kernel status */
127 };
128
129 struct nat_entry_set {
130 struct list_head set_list; /* link with other nat sets */
131 struct list_head entry_list; /* link with dirty nat entries */
132 nid_t set; /* set number*/
133 unsigned int entry_cnt; /* the # of nat entries in set */
134 };
135
136 /*
137 * For free nid mangement
138 */
139 enum nid_state {
140 NID_NEW, /* newly added to free nid list */
141 NID_ALLOC /* it is allocated */
142 };
143
144 struct free_nid {
145 struct list_head list; /* for free node id list */
146 nid_t nid; /* node id */
147 int state; /* in use or not: NID_NEW or NID_ALLOC */
148 };
149
150 static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
151 {
152 struct f2fs_nm_info *nm_i = NM_I(sbi);
153 struct free_nid *fnid;
154
155 spin_lock(&nm_i->free_nid_list_lock);
156 if (nm_i->fcnt <= 0) {
157 spin_unlock(&nm_i->free_nid_list_lock);
158 return;
159 }
160 fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
161 *nid = fnid->nid;
162 spin_unlock(&nm_i->free_nid_list_lock);
163 }
164
165 /*
166 * inline functions
167 */
168 static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
169 {
170 struct f2fs_nm_info *nm_i = NM_I(sbi);
171 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
172 }
173
174 static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
175 {
176 struct f2fs_nm_info *nm_i = NM_I(sbi);
177 pgoff_t block_off;
178 pgoff_t block_addr;
179 int seg_off;
180
181 block_off = NAT_BLOCK_OFFSET(start);
182 seg_off = block_off >> sbi->log_blocks_per_seg;
183
184 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
185 (seg_off << sbi->log_blocks_per_seg << 1) +
186 (block_off & (sbi->blocks_per_seg - 1)));
187
188 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
189 block_addr += sbi->blocks_per_seg;
190
191 return block_addr;
192 }
193
194 static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
195 pgoff_t block_addr)
196 {
197 struct f2fs_nm_info *nm_i = NM_I(sbi);
198
199 block_addr -= nm_i->nat_blkaddr;
200 if ((block_addr >> sbi->log_blocks_per_seg) % 2)
201 block_addr -= sbi->blocks_per_seg;
202 else
203 block_addr += sbi->blocks_per_seg;
204
205 return block_addr + nm_i->nat_blkaddr;
206 }
207
208 static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
209 {
210 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
211
212 f2fs_change_bit(block_off, nm_i->nat_bitmap);
213 }
214
215 static inline void fill_node_footer(struct page *page, nid_t nid,
216 nid_t ino, unsigned int ofs, bool reset)
217 {
218 struct f2fs_node *rn = F2FS_NODE(page);
219 unsigned int old_flag = 0;
220
221 if (reset)
222 memset(rn, 0, sizeof(*rn));
223 else
224 old_flag = le32_to_cpu(rn->footer.flag);
225
226 rn->footer.nid = cpu_to_le32(nid);
227 rn->footer.ino = cpu_to_le32(ino);
228
229 /* should remain old flag bits such as COLD_BIT_SHIFT */
230 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
231 (old_flag & OFFSET_BIT_MASK));
232 }
233
234 static inline void copy_node_footer(struct page *dst, struct page *src)
235 {
236 struct f2fs_node *src_rn = F2FS_NODE(src);
237 struct f2fs_node *dst_rn = F2FS_NODE(dst);
238 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
239 }
240
241 static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
242 {
243 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
244 struct f2fs_node *rn = F2FS_NODE(page);
245
246 rn->footer.cp_ver = ckpt->checkpoint_ver;
247 rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
248 }
249
250 static inline nid_t ino_of_node(struct page *node_page)
251 {
252 struct f2fs_node *rn = F2FS_NODE(node_page);
253 return le32_to_cpu(rn->footer.ino);
254 }
255
256 static inline nid_t nid_of_node(struct page *node_page)
257 {
258 struct f2fs_node *rn = F2FS_NODE(node_page);
259 return le32_to_cpu(rn->footer.nid);
260 }
261
262 static inline unsigned int ofs_of_node(struct page *node_page)
263 {
264 struct f2fs_node *rn = F2FS_NODE(node_page);
265 unsigned flag = le32_to_cpu(rn->footer.flag);
266 return flag >> OFFSET_BIT_SHIFT;
267 }
268
269 static inline unsigned long long cpver_of_node(struct page *node_page)
270 {
271 struct f2fs_node *rn = F2FS_NODE(node_page);
272 return le64_to_cpu(rn->footer.cp_ver);
273 }
274
275 static inline block_t next_blkaddr_of_node(struct page *node_page)
276 {
277 struct f2fs_node *rn = F2FS_NODE(node_page);
278 return le32_to_cpu(rn->footer.next_blkaddr);
279 }
280
281 /*
282 * f2fs assigns the following node offsets described as (num).
283 * N = NIDS_PER_BLOCK
284 *
285 * Inode block (0)
286 * |- direct node (1)
287 * |- direct node (2)
288 * |- indirect node (3)
289 * | `- direct node (4 => 4 + N - 1)
290 * |- indirect node (4 + N)
291 * | `- direct node (5 + N => 5 + 2N - 1)
292 * `- double indirect node (5 + 2N)
293 * `- indirect node (6 + 2N)
294 * `- direct node
295 * ......
296 * `- indirect node ((6 + 2N) + x(N + 1))
297 * `- direct node
298 * ......
299 * `- indirect node ((6 + 2N) + (N - 1)(N + 1))
300 * `- direct node
301 */
302 static inline bool IS_DNODE(struct page *node_page)
303 {
304 unsigned int ofs = ofs_of_node(node_page);
305
306 if (f2fs_has_xattr_block(ofs))
307 return false;
308
309 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
310 ofs == 5 + 2 * NIDS_PER_BLOCK)
311 return false;
312 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
313 ofs -= 6 + 2 * NIDS_PER_BLOCK;
314 if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
315 return false;
316 }
317 return true;
318 }
319
320 static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
321 {
322 struct f2fs_node *rn = F2FS_NODE(p);
323
324 f2fs_wait_on_page_writeback(p, NODE);
325
326 if (i)
327 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
328 else
329 rn->in.nid[off] = cpu_to_le32(nid);
330 return set_page_dirty(p);
331 }
332
333 static inline nid_t get_nid(struct page *p, int off, bool i)
334 {
335 struct f2fs_node *rn = F2FS_NODE(p);
336
337 if (i)
338 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
339 return le32_to_cpu(rn->in.nid[off]);
340 }
341
342 /*
343 * Coldness identification:
344 * - Mark cold files in f2fs_inode_info
345 * - Mark cold node blocks in their node footer
346 * - Mark cold data pages in page cache
347 */
348 static inline int is_cold_data(struct page *page)
349 {
350 return PageChecked(page);
351 }
352
353 static inline void set_cold_data(struct page *page)
354 {
355 SetPageChecked(page);
356 }
357
358 static inline void clear_cold_data(struct page *page)
359 {
360 ClearPageChecked(page);
361 }
362
363 static inline int is_node(struct page *page, int type)
364 {
365 struct f2fs_node *rn = F2FS_NODE(page);
366 return le32_to_cpu(rn->footer.flag) & (1 << type);
367 }
368
369 #define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
370 #define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
371 #define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
372
373 static inline void set_cold_node(struct inode *inode, struct page *page)
374 {
375 struct f2fs_node *rn = F2FS_NODE(page);
376 unsigned int flag = le32_to_cpu(rn->footer.flag);
377
378 if (S_ISDIR(inode->i_mode))
379 flag &= ~(0x1 << COLD_BIT_SHIFT);
380 else
381 flag |= (0x1 << COLD_BIT_SHIFT);
382 rn->footer.flag = cpu_to_le32(flag);
383 }
384
385 static inline void set_mark(struct page *page, int mark, int type)
386 {
387 struct f2fs_node *rn = F2FS_NODE(page);
388 unsigned int flag = le32_to_cpu(rn->footer.flag);
389 if (mark)
390 flag |= (0x1 << type);
391 else
392 flag &= ~(0x1 << type);
393 rn->footer.flag = cpu_to_le32(flag);
394 }
395 #define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
396 #define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
This page took 0.054578 seconds and 6 git commands to generate.