diff options
Diffstat (limited to 'fs/nilfs2/dat.c')
-rw-r--r-- | fs/nilfs2/dat.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index fcc2f869af16..b5c13f3576b9 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) | |||
85 | struct nilfs_dat_entry *entry; | 85 | struct nilfs_dat_entry *entry; |
86 | void *kaddr; | 86 | void *kaddr; |
87 | 87 | ||
88 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 88 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
89 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 89 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
90 | req->pr_entry_bh, kaddr); | 90 | req->pr_entry_bh, kaddr); |
91 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); | 91 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); |
92 | entry->de_end = cpu_to_le64(NILFS_CNO_MAX); | 92 | entry->de_end = cpu_to_le64(NILFS_CNO_MAX); |
93 | entry->de_blocknr = cpu_to_le64(0); | 93 | entry->de_blocknr = cpu_to_le64(0); |
94 | kunmap_atomic(kaddr, KM_USER0); | 94 | kunmap_atomic(kaddr); |
95 | 95 | ||
96 | nilfs_palloc_commit_alloc_entry(dat, req); | 96 | nilfs_palloc_commit_alloc_entry(dat, req); |
97 | nilfs_dat_commit_entry(dat, req); | 97 | nilfs_dat_commit_entry(dat, req); |
@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat, | |||
109 | struct nilfs_dat_entry *entry; | 109 | struct nilfs_dat_entry *entry; |
110 | void *kaddr; | 110 | void *kaddr; |
111 | 111 | ||
112 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 112 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
113 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 113 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
114 | req->pr_entry_bh, kaddr); | 114 | req->pr_entry_bh, kaddr); |
115 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); | 115 | entry->de_start = cpu_to_le64(NILFS_CNO_MIN); |
116 | entry->de_end = cpu_to_le64(NILFS_CNO_MIN); | 116 | entry->de_end = cpu_to_le64(NILFS_CNO_MIN); |
117 | entry->de_blocknr = cpu_to_le64(0); | 117 | entry->de_blocknr = cpu_to_le64(0); |
118 | kunmap_atomic(kaddr, KM_USER0); | 118 | kunmap_atomic(kaddr); |
119 | 119 | ||
120 | nilfs_dat_commit_entry(dat, req); | 120 | nilfs_dat_commit_entry(dat, req); |
121 | nilfs_palloc_commit_free_entry(dat, req); | 121 | nilfs_palloc_commit_free_entry(dat, req); |
@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, | |||
136 | struct nilfs_dat_entry *entry; | 136 | struct nilfs_dat_entry *entry; |
137 | void *kaddr; | 137 | void *kaddr; |
138 | 138 | ||
139 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 139 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
140 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 140 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
141 | req->pr_entry_bh, kaddr); | 141 | req->pr_entry_bh, kaddr); |
142 | entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); | 142 | entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); |
143 | entry->de_blocknr = cpu_to_le64(blocknr); | 143 | entry->de_blocknr = cpu_to_le64(blocknr); |
144 | kunmap_atomic(kaddr, KM_USER0); | 144 | kunmap_atomic(kaddr); |
145 | 145 | ||
146 | nilfs_dat_commit_entry(dat, req); | 146 | nilfs_dat_commit_entry(dat, req); |
147 | } | 147 | } |
@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) | |||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 163 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
164 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 164 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
165 | req->pr_entry_bh, kaddr); | 165 | req->pr_entry_bh, kaddr); |
166 | start = le64_to_cpu(entry->de_start); | 166 | start = le64_to_cpu(entry->de_start); |
167 | blocknr = le64_to_cpu(entry->de_blocknr); | 167 | blocknr = le64_to_cpu(entry->de_blocknr); |
168 | kunmap_atomic(kaddr, KM_USER0); | 168 | kunmap_atomic(kaddr); |
169 | 169 | ||
170 | if (blocknr == 0) { | 170 | if (blocknr == 0) { |
171 | ret = nilfs_palloc_prepare_free_entry(dat, req); | 171 | ret = nilfs_palloc_prepare_free_entry(dat, req); |
@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, | |||
186 | sector_t blocknr; | 186 | sector_t blocknr; |
187 | void *kaddr; | 187 | void *kaddr; |
188 | 188 | ||
189 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 189 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
190 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 190 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
191 | req->pr_entry_bh, kaddr); | 191 | req->pr_entry_bh, kaddr); |
192 | end = start = le64_to_cpu(entry->de_start); | 192 | end = start = le64_to_cpu(entry->de_start); |
@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, | |||
196 | } | 196 | } |
197 | entry->de_end = cpu_to_le64(end); | 197 | entry->de_end = cpu_to_le64(end); |
198 | blocknr = le64_to_cpu(entry->de_blocknr); | 198 | blocknr = le64_to_cpu(entry->de_blocknr); |
199 | kunmap_atomic(kaddr, KM_USER0); | 199 | kunmap_atomic(kaddr); |
200 | 200 | ||
201 | if (blocknr == 0) | 201 | if (blocknr == 0) |
202 | nilfs_dat_commit_free(dat, req); | 202 | nilfs_dat_commit_free(dat, req); |
@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) | |||
211 | sector_t blocknr; | 211 | sector_t blocknr; |
212 | void *kaddr; | 212 | void *kaddr; |
213 | 213 | ||
214 | kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); | 214 | kaddr = kmap_atomic(req->pr_entry_bh->b_page); |
215 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, | 215 | entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, |
216 | req->pr_entry_bh, kaddr); | 216 | req->pr_entry_bh, kaddr); |
217 | start = le64_to_cpu(entry->de_start); | 217 | start = le64_to_cpu(entry->de_start); |
218 | blocknr = le64_to_cpu(entry->de_blocknr); | 218 | blocknr = le64_to_cpu(entry->de_blocknr); |
219 | kunmap_atomic(kaddr, KM_USER0); | 219 | kunmap_atomic(kaddr); |
220 | 220 | ||
221 | if (start == nilfs_mdt_cno(dat) && blocknr == 0) | 221 | if (start == nilfs_mdt_cno(dat) && blocknr == 0) |
222 | nilfs_palloc_abort_free_entry(dat, req); | 222 | nilfs_palloc_abort_free_entry(dat, req); |
@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) | |||
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
349 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 349 | kaddr = kmap_atomic(entry_bh->b_page); |
350 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); | 350 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); |
351 | if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { | 351 | if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { |
352 | printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, | 352 | printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, |
353 | (unsigned long long)vblocknr, | 353 | (unsigned long long)vblocknr, |
354 | (unsigned long long)le64_to_cpu(entry->de_start), | 354 | (unsigned long long)le64_to_cpu(entry->de_start), |
355 | (unsigned long long)le64_to_cpu(entry->de_end)); | 355 | (unsigned long long)le64_to_cpu(entry->de_end)); |
356 | kunmap_atomic(kaddr, KM_USER0); | 356 | kunmap_atomic(kaddr); |
357 | brelse(entry_bh); | 357 | brelse(entry_bh); |
358 | return -EINVAL; | 358 | return -EINVAL; |
359 | } | 359 | } |
360 | WARN_ON(blocknr == 0); | 360 | WARN_ON(blocknr == 0); |
361 | entry->de_blocknr = cpu_to_le64(blocknr); | 361 | entry->de_blocknr = cpu_to_le64(blocknr); |
362 | kunmap_atomic(kaddr, KM_USER0); | 362 | kunmap_atomic(kaddr); |
363 | 363 | ||
364 | mark_buffer_dirty(entry_bh); | 364 | mark_buffer_dirty(entry_bh); |
365 | nilfs_mdt_mark_dirty(dat); | 365 | nilfs_mdt_mark_dirty(dat); |
@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) | |||
409 | } | 409 | } |
410 | } | 410 | } |
411 | 411 | ||
412 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 412 | kaddr = kmap_atomic(entry_bh->b_page); |
413 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); | 413 | entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); |
414 | blocknr = le64_to_cpu(entry->de_blocknr); | 414 | blocknr = le64_to_cpu(entry->de_blocknr); |
415 | if (blocknr == 0) { | 415 | if (blocknr == 0) { |
@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) | |||
419 | *blocknrp = blocknr; | 419 | *blocknrp = blocknr; |
420 | 420 | ||
421 | out: | 421 | out: |
422 | kunmap_atomic(kaddr, KM_USER0); | 422 | kunmap_atomic(kaddr); |
423 | brelse(entry_bh); | 423 | brelse(entry_bh); |
424 | return ret; | 424 | return ret; |
425 | } | 425 | } |
@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, | |||
440 | 0, &entry_bh); | 440 | 0, &entry_bh); |
441 | if (ret < 0) | 441 | if (ret < 0) |
442 | return ret; | 442 | return ret; |
443 | kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); | 443 | kaddr = kmap_atomic(entry_bh->b_page); |
444 | /* last virtual block number in this block */ | 444 | /* last virtual block number in this block */ |
445 | first = vinfo->vi_vblocknr; | 445 | first = vinfo->vi_vblocknr; |
446 | do_div(first, entries_per_block); | 446 | do_div(first, entries_per_block); |
@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, | |||
456 | vinfo->vi_end = le64_to_cpu(entry->de_end); | 456 | vinfo->vi_end = le64_to_cpu(entry->de_end); |
457 | vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); | 457 | vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); |
458 | } | 458 | } |
459 | kunmap_atomic(kaddr, KM_USER0); | 459 | kunmap_atomic(kaddr); |
460 | brelse(entry_bh); | 460 | brelse(entry_bh); |
461 | } | 461 | } |
462 | 462 | ||