diff options
Diffstat (limited to 'fs/f2fs/node.h')
-rw-r--r-- | fs/f2fs/node.h | 353 |
1 files changed, 353 insertions, 0 deletions
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h new file mode 100644 index 000000000000..afdb130f782e --- /dev/null +++ b/fs/f2fs/node.h | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * fs/f2fs/node.h | ||
3 | * | ||
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com/ | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | /* start node id of a node block dedicated to the given node id */ | ||
12 | #define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) | ||
13 | |||
14 | /* node block offset on the NAT area dedicated to the given start node id */ | ||
15 | #define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK) | ||
16 | |||
17 | /* # of pages to perform readahead before building free nids */ | ||
18 | #define FREE_NID_PAGES 4 | ||
19 | |||
20 | /* maximum # of free node ids to produce during build_free_nids */ | ||
21 | #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) | ||
22 | |||
23 | /* maximum readahead size for node during getting data blocks */ | ||
24 | #define MAX_RA_NODE 128 | ||
25 | |||
26 | /* maximum cached nat entries to manage memory footprint */ | ||
27 | #define NM_WOUT_THRESHOLD (64 * NAT_ENTRY_PER_BLOCK) | ||
28 | |||
29 | /* vector size for gang look-up from nat cache that consists of radix tree */ | ||
30 | #define NATVEC_SIZE 64 | ||
31 | |||
32 | /* | ||
33 | * For node information | ||
34 | */ | ||
35 | struct node_info { | ||
36 | nid_t nid; /* node id */ | ||
37 | nid_t ino; /* inode number of the node's owner */ | ||
38 | block_t blk_addr; /* block address of the node */ | ||
39 | unsigned char version; /* version of the node */ | ||
40 | }; | ||
41 | |||
42 | struct nat_entry { | ||
43 | struct list_head list; /* for clean or dirty nat list */ | ||
44 | bool checkpointed; /* whether it is checkpointed or not */ | ||
45 | struct node_info ni; /* in-memory node information */ | ||
46 | }; | ||
47 | |||
48 | #define nat_get_nid(nat) (nat->ni.nid) | ||
49 | #define nat_set_nid(nat, n) (nat->ni.nid = n) | ||
50 | #define nat_get_blkaddr(nat) (nat->ni.blk_addr) | ||
51 | #define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b) | ||
52 | #define nat_get_ino(nat) (nat->ni.ino) | ||
53 | #define nat_set_ino(nat, i) (nat->ni.ino = i) | ||
54 | #define nat_get_version(nat) (nat->ni.version) | ||
55 | #define nat_set_version(nat, v) (nat->ni.version = v) | ||
56 | |||
57 | #define __set_nat_cache_dirty(nm_i, ne) \ | ||
58 | list_move_tail(&ne->list, &nm_i->dirty_nat_entries); | ||
59 | #define __clear_nat_cache_dirty(nm_i, ne) \ | ||
60 | list_move_tail(&ne->list, &nm_i->nat_entries); | ||
61 | #define inc_node_version(version) (++version) | ||
62 | |||
63 | static inline void node_info_from_raw_nat(struct node_info *ni, | ||
64 | struct f2fs_nat_entry *raw_ne) | ||
65 | { | ||
66 | ni->ino = le32_to_cpu(raw_ne->ino); | ||
67 | ni->blk_addr = le32_to_cpu(raw_ne->block_addr); | ||
68 | ni->version = raw_ne->version; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * For free nid mangement | ||
73 | */ | ||
74 | enum nid_state { | ||
75 | NID_NEW, /* newly added to free nid list */ | ||
76 | NID_ALLOC /* it is allocated */ | ||
77 | }; | ||
78 | |||
79 | struct free_nid { | ||
80 | struct list_head list; /* for free node id list */ | ||
81 | nid_t nid; /* node id */ | ||
82 | int state; /* in use or not: NID_NEW or NID_ALLOC */ | ||
83 | }; | ||
84 | |||
85 | static inline int next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) | ||
86 | { | ||
87 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
88 | struct free_nid *fnid; | ||
89 | |||
90 | if (nm_i->fcnt <= 0) | ||
91 | return -1; | ||
92 | spin_lock(&nm_i->free_nid_list_lock); | ||
93 | fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list); | ||
94 | *nid = fnid->nid; | ||
95 | spin_unlock(&nm_i->free_nid_list_lock); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * inline functions | ||
101 | */ | ||
102 | static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) | ||
103 | { | ||
104 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
105 | memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); | ||
106 | } | ||
107 | |||
108 | static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) | ||
109 | { | ||
110 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
111 | pgoff_t block_off; | ||
112 | pgoff_t block_addr; | ||
113 | int seg_off; | ||
114 | |||
115 | block_off = NAT_BLOCK_OFFSET(start); | ||
116 | seg_off = block_off >> sbi->log_blocks_per_seg; | ||
117 | |||
118 | block_addr = (pgoff_t)(nm_i->nat_blkaddr + | ||
119 | (seg_off << sbi->log_blocks_per_seg << 1) + | ||
120 | (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); | ||
121 | |||
122 | if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) | ||
123 | block_addr += sbi->blocks_per_seg; | ||
124 | |||
125 | return block_addr; | ||
126 | } | ||
127 | |||
128 | static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, | ||
129 | pgoff_t block_addr) | ||
130 | { | ||
131 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
132 | |||
133 | block_addr -= nm_i->nat_blkaddr; | ||
134 | if ((block_addr >> sbi->log_blocks_per_seg) % 2) | ||
135 | block_addr -= sbi->blocks_per_seg; | ||
136 | else | ||
137 | block_addr += sbi->blocks_per_seg; | ||
138 | |||
139 | return block_addr + nm_i->nat_blkaddr; | ||
140 | } | ||
141 | |||
142 | static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) | ||
143 | { | ||
144 | unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); | ||
145 | |||
146 | if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) | ||
147 | f2fs_clear_bit(block_off, nm_i->nat_bitmap); | ||
148 | else | ||
149 | f2fs_set_bit(block_off, nm_i->nat_bitmap); | ||
150 | } | ||
151 | |||
152 | static inline void fill_node_footer(struct page *page, nid_t nid, | ||
153 | nid_t ino, unsigned int ofs, bool reset) | ||
154 | { | ||
155 | void *kaddr = page_address(page); | ||
156 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
157 | if (reset) | ||
158 | memset(rn, 0, sizeof(*rn)); | ||
159 | rn->footer.nid = cpu_to_le32(nid); | ||
160 | rn->footer.ino = cpu_to_le32(ino); | ||
161 | rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT); | ||
162 | } | ||
163 | |||
164 | static inline void copy_node_footer(struct page *dst, struct page *src) | ||
165 | { | ||
166 | void *src_addr = page_address(src); | ||
167 | void *dst_addr = page_address(dst); | ||
168 | struct f2fs_node *src_rn = (struct f2fs_node *)src_addr; | ||
169 | struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr; | ||
170 | memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); | ||
171 | } | ||
172 | |||
173 | static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) | ||
174 | { | ||
175 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | ||
176 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
177 | void *kaddr = page_address(page); | ||
178 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
179 | rn->footer.cp_ver = ckpt->checkpoint_ver; | ||
180 | rn->footer.next_blkaddr = cpu_to_le32(blkaddr); | ||
181 | } | ||
182 | |||
183 | static inline nid_t ino_of_node(struct page *node_page) | ||
184 | { | ||
185 | void *kaddr = page_address(node_page); | ||
186 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
187 | return le32_to_cpu(rn->footer.ino); | ||
188 | } | ||
189 | |||
190 | static inline nid_t nid_of_node(struct page *node_page) | ||
191 | { | ||
192 | void *kaddr = page_address(node_page); | ||
193 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
194 | return le32_to_cpu(rn->footer.nid); | ||
195 | } | ||
196 | |||
197 | static inline unsigned int ofs_of_node(struct page *node_page) | ||
198 | { | ||
199 | void *kaddr = page_address(node_page); | ||
200 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
201 | unsigned flag = le32_to_cpu(rn->footer.flag); | ||
202 | return flag >> OFFSET_BIT_SHIFT; | ||
203 | } | ||
204 | |||
205 | static inline unsigned long long cpver_of_node(struct page *node_page) | ||
206 | { | ||
207 | void *kaddr = page_address(node_page); | ||
208 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
209 | return le64_to_cpu(rn->footer.cp_ver); | ||
210 | } | ||
211 | |||
212 | static inline block_t next_blkaddr_of_node(struct page *node_page) | ||
213 | { | ||
214 | void *kaddr = page_address(node_page); | ||
215 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
216 | return le32_to_cpu(rn->footer.next_blkaddr); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * f2fs assigns the following node offsets described as (num). | ||
221 | * N = NIDS_PER_BLOCK | ||
222 | * | ||
223 | * Inode block (0) | ||
224 | * |- direct node (1) | ||
225 | * |- direct node (2) | ||
226 | * |- indirect node (3) | ||
227 | * | `- direct node (4 => 4 + N - 1) | ||
228 | * |- indirect node (4 + N) | ||
229 | * | `- direct node (5 + N => 5 + 2N - 1) | ||
230 | * `- double indirect node (5 + 2N) | ||
231 | * `- indirect node (6 + 2N) | ||
232 | * `- direct node (x(N + 1)) | ||
233 | */ | ||
234 | static inline bool IS_DNODE(struct page *node_page) | ||
235 | { | ||
236 | unsigned int ofs = ofs_of_node(node_page); | ||
237 | if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || | ||
238 | ofs == 5 + 2 * NIDS_PER_BLOCK) | ||
239 | return false; | ||
240 | if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { | ||
241 | ofs -= 6 + 2 * NIDS_PER_BLOCK; | ||
242 | if ((long int)ofs % (NIDS_PER_BLOCK + 1)) | ||
243 | return false; | ||
244 | } | ||
245 | return true; | ||
246 | } | ||
247 | |||
248 | static inline void set_nid(struct page *p, int off, nid_t nid, bool i) | ||
249 | { | ||
250 | struct f2fs_node *rn = (struct f2fs_node *)page_address(p); | ||
251 | |||
252 | wait_on_page_writeback(p); | ||
253 | |||
254 | if (i) | ||
255 | rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); | ||
256 | else | ||
257 | rn->in.nid[off] = cpu_to_le32(nid); | ||
258 | set_page_dirty(p); | ||
259 | } | ||
260 | |||
261 | static inline nid_t get_nid(struct page *p, int off, bool i) | ||
262 | { | ||
263 | struct f2fs_node *rn = (struct f2fs_node *)page_address(p); | ||
264 | if (i) | ||
265 | return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); | ||
266 | return le32_to_cpu(rn->in.nid[off]); | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Coldness identification: | ||
271 | * - Mark cold files in f2fs_inode_info | ||
272 | * - Mark cold node blocks in their node footer | ||
273 | * - Mark cold data pages in page cache | ||
274 | */ | ||
275 | static inline int is_cold_file(struct inode *inode) | ||
276 | { | ||
277 | return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT; | ||
278 | } | ||
279 | |||
280 | static inline int is_cold_data(struct page *page) | ||
281 | { | ||
282 | return PageChecked(page); | ||
283 | } | ||
284 | |||
285 | static inline void set_cold_data(struct page *page) | ||
286 | { | ||
287 | SetPageChecked(page); | ||
288 | } | ||
289 | |||
290 | static inline void clear_cold_data(struct page *page) | ||
291 | { | ||
292 | ClearPageChecked(page); | ||
293 | } | ||
294 | |||
295 | static inline int is_cold_node(struct page *page) | ||
296 | { | ||
297 | void *kaddr = page_address(page); | ||
298 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
299 | unsigned int flag = le32_to_cpu(rn->footer.flag); | ||
300 | return flag & (0x1 << COLD_BIT_SHIFT); | ||
301 | } | ||
302 | |||
303 | static inline unsigned char is_fsync_dnode(struct page *page) | ||
304 | { | ||
305 | void *kaddr = page_address(page); | ||
306 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
307 | unsigned int flag = le32_to_cpu(rn->footer.flag); | ||
308 | return flag & (0x1 << FSYNC_BIT_SHIFT); | ||
309 | } | ||
310 | |||
311 | static inline unsigned char is_dent_dnode(struct page *page) | ||
312 | { | ||
313 | void *kaddr = page_address(page); | ||
314 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
315 | unsigned int flag = le32_to_cpu(rn->footer.flag); | ||
316 | return flag & (0x1 << DENT_BIT_SHIFT); | ||
317 | } | ||
318 | |||
319 | static inline void set_cold_node(struct inode *inode, struct page *page) | ||
320 | { | ||
321 | struct f2fs_node *rn = (struct f2fs_node *)page_address(page); | ||
322 | unsigned int flag = le32_to_cpu(rn->footer.flag); | ||
323 | |||
324 | if (S_ISDIR(inode->i_mode)) | ||
325 | flag &= ~(0x1 << COLD_BIT_SHIFT); | ||
326 | else | ||
327 | flag |= (0x1 << COLD_BIT_SHIFT); | ||
328 | rn->footer.flag = cpu_to_le32(flag); | ||
329 | } | ||
330 | |||
331 | static inline void set_fsync_mark(struct page *page, int mark) | ||
332 | { | ||
333 | void *kaddr = page_address(page); | ||
334 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
335 | unsigned int flag = le32_to_cpu(rn->footer.flag); | ||
336 | if (mark) | ||
337 | flag |= (0x1 << FSYNC_BIT_SHIFT); | ||
338 | else | ||
339 | flag &= ~(0x1 << FSYNC_BIT_SHIFT); | ||
340 | rn->footer.flag = cpu_to_le32(flag); | ||
341 | } | ||
342 | |||
343 | static inline void set_dentry_mark(struct page *page, int mark) | ||
344 | { | ||
345 | void *kaddr = page_address(page); | ||
346 | struct f2fs_node *rn = (struct f2fs_node *)kaddr; | ||
347 | unsigned int flag = le32_to_cpu(rn->footer.flag); | ||
348 | if (mark) | ||
349 | flag |= (0x1 << DENT_BIT_SHIFT); | ||
350 | else | ||
351 | flag &= ~(0x1 << DENT_BIT_SHIFT); | ||
352 | rn->footer.flag = cpu_to_le32(flag); | ||
353 | } | ||