aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nilfs2/dat.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nilfs2/dat.c')
-rw-r--r--fs/nilfs2/dat.c429
1 files changed, 429 insertions, 0 deletions
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
new file mode 100644
index 000000000000..9360920f7d38
--- /dev/null
+++ b/fs/nilfs2/dat.c
@@ -0,0 +1,429 @@
1/*
2 * dat.c - NILFS disk address translation.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/types.h>
24#include <linux/buffer_head.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include "nilfs.h"
28#include "mdt.h"
29#include "alloc.h"
30#include "dat.h"
31
32
33#define NILFS_CNO_MIN ((__u64)1)
34#define NILFS_CNO_MAX (~(__u64)0)
35
36static int nilfs_dat_prepare_entry(struct inode *dat,
37 struct nilfs_palloc_req *req, int create)
38{
39 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
40 create, &req->pr_entry_bh);
41}
42
43static void nilfs_dat_commit_entry(struct inode *dat,
44 struct nilfs_palloc_req *req)
45{
46 nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
47 nilfs_mdt_mark_dirty(dat);
48 brelse(req->pr_entry_bh);
49}
50
51static void nilfs_dat_abort_entry(struct inode *dat,
52 struct nilfs_palloc_req *req)
53{
54 brelse(req->pr_entry_bh);
55}
56
57int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
58{
59 int ret;
60
61 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
62 if (ret < 0)
63 return ret;
64
65 ret = nilfs_dat_prepare_entry(dat, req, 1);
66 if (ret < 0)
67 nilfs_palloc_abort_alloc_entry(dat, req);
68
69 return ret;
70}
71
72void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
73{
74 struct nilfs_dat_entry *entry;
75 void *kaddr;
76
77 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
78 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
79 req->pr_entry_bh, kaddr);
80 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
81 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
82 entry->de_blocknr = cpu_to_le64(0);
83 kunmap_atomic(kaddr, KM_USER0);
84
85 nilfs_palloc_commit_alloc_entry(dat, req);
86 nilfs_dat_commit_entry(dat, req);
87}
88
89void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90{
91 nilfs_dat_abort_entry(dat, req);
92 nilfs_palloc_abort_alloc_entry(dat, req);
93}
94
95int nilfs_dat_prepare_free(struct inode *dat, struct nilfs_palloc_req *req)
96{
97 int ret;
98
99 ret = nilfs_palloc_prepare_free_entry(dat, req);
100 if (ret < 0)
101 return ret;
102 ret = nilfs_dat_prepare_entry(dat, req, 0);
103 if (ret < 0) {
104 nilfs_palloc_abort_free_entry(dat, req);
105 return ret;
106 }
107 return 0;
108}
109
110void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
111{
112 struct nilfs_dat_entry *entry;
113 void *kaddr;
114
115 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
116 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
117 req->pr_entry_bh, kaddr);
118 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
119 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
120 entry->de_blocknr = cpu_to_le64(0);
121 kunmap_atomic(kaddr, KM_USER0);
122
123 nilfs_dat_commit_entry(dat, req);
124 nilfs_palloc_commit_free_entry(dat, req);
125}
126
127void nilfs_dat_abort_free(struct inode *dat, struct nilfs_palloc_req *req)
128{
129 nilfs_dat_abort_entry(dat, req);
130 nilfs_palloc_abort_free_entry(dat, req);
131}
132
133int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
134{
135 int ret;
136
137 ret = nilfs_dat_prepare_entry(dat, req, 0);
138 BUG_ON(ret == -ENOENT);
139 return ret;
140}
141
142void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
143 sector_t blocknr)
144{
145 struct nilfs_dat_entry *entry;
146 void *kaddr;
147
148 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
149 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
150 req->pr_entry_bh, kaddr);
151 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
152 if (entry->de_blocknr != cpu_to_le64(0) ||
153 entry->de_end != cpu_to_le64(NILFS_CNO_MAX)) {
154 printk(KERN_CRIT
155 "%s: vbn = %llu, start = %llu, end = %llu, pbn = %llu\n",
156 __func__, (unsigned long long)req->pr_entry_nr,
157 (unsigned long long)le64_to_cpu(entry->de_start),
158 (unsigned long long)le64_to_cpu(entry->de_end),
159 (unsigned long long)le64_to_cpu(entry->de_blocknr));
160 BUG();
161 }
162 entry->de_blocknr = cpu_to_le64(blocknr);
163 kunmap_atomic(kaddr, KM_USER0);
164
165 nilfs_dat_commit_entry(dat, req);
166}
167
168void nilfs_dat_abort_start(struct inode *dat, struct nilfs_palloc_req *req)
169{
170 nilfs_dat_abort_entry(dat, req);
171}
172
173int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
174{
175 struct nilfs_dat_entry *entry;
176 __u64 start;
177 sector_t blocknr;
178 void *kaddr;
179 int ret;
180
181 ret = nilfs_dat_prepare_entry(dat, req, 0);
182 if (ret < 0) {
183 BUG_ON(ret == -ENOENT);
184 return ret;
185 }
186
187 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
188 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
189 req->pr_entry_bh, kaddr);
190 start = le64_to_cpu(entry->de_start);
191 blocknr = le64_to_cpu(entry->de_blocknr);
192 kunmap_atomic(kaddr, KM_USER0);
193
194 if (blocknr == 0) {
195 ret = nilfs_palloc_prepare_free_entry(dat, req);
196 if (ret < 0) {
197 nilfs_dat_abort_entry(dat, req);
198 return ret;
199 }
200 }
201
202 return 0;
203}
204
205void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
206 int dead)
207{
208 struct nilfs_dat_entry *entry;
209 __u64 start, end;
210 sector_t blocknr;
211 void *kaddr;
212
213 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
214 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
215 req->pr_entry_bh, kaddr);
216 end = start = le64_to_cpu(entry->de_start);
217 if (!dead) {
218 end = nilfs_mdt_cno(dat);
219 BUG_ON(start > end);
220 }
221 entry->de_end = cpu_to_le64(end);
222 blocknr = le64_to_cpu(entry->de_blocknr);
223 kunmap_atomic(kaddr, KM_USER0);
224
225 if (blocknr == 0)
226 nilfs_dat_commit_free(dat, req);
227 else
228 nilfs_dat_commit_entry(dat, req);
229}
230
231void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
232{
233 struct nilfs_dat_entry *entry;
234 __u64 start;
235 sector_t blocknr;
236 void *kaddr;
237
238 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
239 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
240 req->pr_entry_bh, kaddr);
241 start = le64_to_cpu(entry->de_start);
242 blocknr = le64_to_cpu(entry->de_blocknr);
243 kunmap_atomic(kaddr, KM_USER0);
244
245 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
246 nilfs_palloc_abort_free_entry(dat, req);
247 nilfs_dat_abort_entry(dat, req);
248}
249
250/**
251 * nilfs_dat_mark_dirty -
252 * @dat: DAT file inode
253 * @vblocknr: virtual block number
254 *
255 * Description:
256 *
257 * Return Value: On success, 0 is returned. On error, one of the following
258 * negative error codes is returned.
259 *
260 * %-EIO - I/O error.
261 *
262 * %-ENOMEM - Insufficient amount of memory available.
263 */
264int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
265{
266 struct nilfs_palloc_req req;
267 int ret;
268
269 req.pr_entry_nr = vblocknr;
270 ret = nilfs_dat_prepare_entry(dat, &req, 0);
271 if (ret == 0)
272 nilfs_dat_commit_entry(dat, &req);
273 return ret;
274}
275
276/**
277 * nilfs_dat_freev - free virtual block numbers
278 * @dat: DAT file inode
279 * @vblocknrs: array of virtual block numbers
280 * @nitems: number of virtual block numbers
281 *
282 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
283 * @vblocknrs and @nitems.
284 *
285 * Return Value: On success, 0 is returned. On error, one of the following
286 * nagative error codes is returned.
287 *
288 * %-EIO - I/O error.
289 *
290 * %-ENOMEM - Insufficient amount of memory available.
291 *
292 * %-ENOENT - The virtual block number have not been allocated.
293 */
294int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
295{
296 return nilfs_palloc_freev(dat, vblocknrs, nitems);
297}
298
299/**
300 * nilfs_dat_move - change a block number
301 * @dat: DAT file inode
302 * @vblocknr: virtual block number
303 * @blocknr: block number
304 *
305 * Description: nilfs_dat_move() changes the block number associated with
306 * @vblocknr to @blocknr.
307 *
308 * Return Value: On success, 0 is returned. On error, one of the following
309 * negative error codes is returned.
310 *
311 * %-EIO - I/O error.
312 *
313 * %-ENOMEM - Insufficient amount of memory available.
314 */
315int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
316{
317 struct buffer_head *entry_bh;
318 struct nilfs_dat_entry *entry;
319 void *kaddr;
320 int ret;
321
322 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
323 if (ret < 0)
324 return ret;
325 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
326 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
327 if (entry->de_blocknr == cpu_to_le64(0)) {
328 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
329 (unsigned long long)vblocknr,
330 (unsigned long long)le64_to_cpu(entry->de_start),
331 (unsigned long long)le64_to_cpu(entry->de_end));
332 BUG();
333 }
334 BUG_ON(blocknr == 0);
335 entry->de_blocknr = cpu_to_le64(blocknr);
336 kunmap_atomic(kaddr, KM_USER0);
337
338 nilfs_mdt_mark_buffer_dirty(entry_bh);
339 nilfs_mdt_mark_dirty(dat);
340
341 brelse(entry_bh);
342
343 return 0;
344}
345
346/**
347 * nilfs_dat_translate - translate a virtual block number to a block number
348 * @dat: DAT file inode
349 * @vblocknr: virtual block number
350 * @blocknrp: pointer to a block number
351 *
352 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
353 * to the corresponding block number.
354 *
355 * Return Value: On success, 0 is returned and the block number associated
356 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
357 * of the following negative error codes is returned.
358 *
359 * %-EIO - I/O error.
360 *
361 * %-ENOMEM - Insufficient amount of memory available.
362 *
363 * %-ENOENT - A block number associated with @vblocknr does not exist.
364 */
365int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
366{
367 struct buffer_head *entry_bh;
368 struct nilfs_dat_entry *entry;
369 sector_t blocknr;
370 void *kaddr;
371 int ret;
372
373 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
374 if (ret < 0)
375 return ret;
376
377 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
378 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
379 blocknr = le64_to_cpu(entry->de_blocknr);
380 if (blocknr == 0) {
381 ret = -ENOENT;
382 goto out;
383 }
384 if (blocknrp != NULL)
385 *blocknrp = blocknr;
386
387 out:
388 kunmap_atomic(kaddr, KM_USER0);
389 brelse(entry_bh);
390 return ret;
391}
392
393ssize_t nilfs_dat_get_vinfo(struct inode *dat, struct nilfs_vinfo *vinfo,
394 size_t nvi)
395{
396 struct buffer_head *entry_bh;
397 struct nilfs_dat_entry *entry;
398 __u64 first, last;
399 void *kaddr;
400 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
401 int i, j, n, ret;
402
403 for (i = 0; i < nvi; i += n) {
404 ret = nilfs_palloc_get_entry_block(dat, vinfo[i].vi_vblocknr,
405 0, &entry_bh);
406 if (ret < 0)
407 return ret;
408 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
409 /* last virtual block number in this block */
410 first = vinfo[i].vi_vblocknr;
411 do_div(first, entries_per_block);
412 first *= entries_per_block;
413 last = first + entries_per_block - 1;
414 for (j = i, n = 0;
415 j < nvi && vinfo[j].vi_vblocknr >= first &&
416 vinfo[j].vi_vblocknr <= last;
417 j++, n++) {
418 entry = nilfs_palloc_block_get_entry(
419 dat, vinfo[j].vi_vblocknr, entry_bh, kaddr);
420 vinfo[j].vi_start = le64_to_cpu(entry->de_start);
421 vinfo[j].vi_end = le64_to_cpu(entry->de_end);
422 vinfo[j].vi_blocknr = le64_to_cpu(entry->de_blocknr);
423 }
424 kunmap_atomic(kaddr, KM_USER0);
425 brelse(entry_bh);
426 }
427
428 return nvi;
429}