diff options
Diffstat (limited to 'fs/qnx4/inode.c')
-rw-r--r-- | fs/qnx4/inode.c | 88 |
1 files changed, 17 insertions, 71 deletions
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 6b009548d2e0..552e994e3aa1 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c | |||
@@ -52,38 +52,6 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data) | |||
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
55 | static struct buffer_head *qnx4_getblk(struct inode *inode, int nr, | ||
56 | int create) | ||
57 | { | ||
58 | struct buffer_head *result = NULL; | ||
59 | |||
60 | if ( nr >= 0 ) | ||
61 | nr = qnx4_block_map( inode, nr ); | ||
62 | if (nr) { | ||
63 | result = sb_getblk(inode->i_sb, nr); | ||
64 | return result; | ||
65 | } | ||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | struct buffer_head *qnx4_bread(struct inode *inode, int block, int create) | ||
70 | { | ||
71 | struct buffer_head *bh; | ||
72 | |||
73 | bh = qnx4_getblk(inode, block, create); | ||
74 | if (!bh || buffer_uptodate(bh)) { | ||
75 | return bh; | ||
76 | } | ||
77 | ll_rw_block(READ, 1, &bh); | ||
78 | wait_on_buffer(bh); | ||
79 | if (buffer_uptodate(bh)) { | ||
80 | return bh; | ||
81 | } | ||
82 | brelse(bh); | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create ) | 55 | static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create ) |
88 | { | 56 | { |
89 | unsigned long phys; | 57 | unsigned long phys; |
@@ -98,23 +66,31 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h | |||
98 | return 0; | 66 | return 0; |
99 | } | 67 | } |
100 | 68 | ||
69 | static inline u32 try_extent(qnx4_xtnt_t *extent, u32 *offset) | ||
70 | { | ||
71 | u32 size = le32_to_cpu(extent->xtnt_size); | ||
72 | if (*offset < size) | ||
73 | return le32_to_cpu(extent->xtnt_blk) + *offset - 1; | ||
74 | *offset -= size; | ||
75 | return 0; | ||
76 | } | ||
77 | |||
101 | unsigned long qnx4_block_map( struct inode *inode, long iblock ) | 78 | unsigned long qnx4_block_map( struct inode *inode, long iblock ) |
102 | { | 79 | { |
103 | int ix; | 80 | int ix; |
104 | long offset, i_xblk; | 81 | long i_xblk; |
105 | unsigned long block = 0; | ||
106 | struct buffer_head *bh = NULL; | 82 | struct buffer_head *bh = NULL; |
107 | struct qnx4_xblk *xblk = NULL; | 83 | struct qnx4_xblk *xblk = NULL; |
108 | struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode); | 84 | struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode); |
109 | u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts); | 85 | u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts); |
86 | u32 offset = iblock; | ||
87 | u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset); | ||
110 | 88 | ||
111 | if ( iblock < le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size) ) { | 89 | if (block) { |
112 | // iblock is in the first extent. This is easy. | 90 | // iblock is in the first extent. This is easy. |
113 | block = le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_blk) + iblock - 1; | ||
114 | } else { | 91 | } else { |
115 | // iblock is beyond first extent. We have to follow the extent chain. | 92 | // iblock is beyond first extent. We have to follow the extent chain. |
116 | i_xblk = le32_to_cpu(qnx4_inode->di_xblk); | 93 | i_xblk = le32_to_cpu(qnx4_inode->di_xblk); |
117 | offset = iblock - le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size); | ||
118 | ix = 0; | 94 | ix = 0; |
119 | while ( --nxtnt > 0 ) { | 95 | while ( --nxtnt > 0 ) { |
120 | if ( ix == 0 ) { | 96 | if ( ix == 0 ) { |
@@ -130,12 +106,11 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock ) | |||
130 | return -EIO; | 106 | return -EIO; |
131 | } | 107 | } |
132 | } | 108 | } |
133 | if ( offset < le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size) ) { | 109 | block = try_extent(&xblk->xblk_xtnts[ix], &offset); |
110 | if (block) { | ||
134 | // got it! | 111 | // got it! |
135 | block = le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_blk) + offset - 1; | ||
136 | break; | 112 | break; |
137 | } | 113 | } |
138 | offset -= le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size); | ||
139 | if ( ++ix >= xblk->xblk_num_xtnts ) { | 114 | if ( ++ix >= xblk->xblk_num_xtnts ) { |
140 | i_xblk = le32_to_cpu(xblk->xblk_next_xblk); | 115 | i_xblk = le32_to_cpu(xblk->xblk_next_xblk); |
141 | ix = 0; | 116 | ix = 0; |
@@ -260,15 +235,13 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) | |||
260 | } | 235 | } |
261 | 236 | ||
262 | ret = -ENOMEM; | 237 | ret = -ENOMEM; |
263 | s->s_root = d_alloc_root(root); | 238 | s->s_root = d_make_root(root); |
264 | if (s->s_root == NULL) | 239 | if (s->s_root == NULL) |
265 | goto outi; | 240 | goto outb; |
266 | 241 | ||
267 | brelse(bh); | 242 | brelse(bh); |
268 | return 0; | 243 | return 0; |
269 | 244 | ||
270 | outi: | ||
271 | iput(root); | ||
272 | outb: | 245 | outb: |
273 | kfree(qs->BitMap); | 246 | kfree(qs->BitMap); |
274 | out: | 247 | out: |
@@ -288,44 +261,17 @@ static void qnx4_put_super(struct super_block *sb) | |||
288 | return; | 261 | return; |
289 | } | 262 | } |
290 | 263 | ||
291 | static int qnx4_writepage(struct page *page, struct writeback_control *wbc) | ||
292 | { | ||
293 | return block_write_full_page(page,qnx4_get_block, wbc); | ||
294 | } | ||
295 | |||
296 | static int qnx4_readpage(struct file *file, struct page *page) | 264 | static int qnx4_readpage(struct file *file, struct page *page) |
297 | { | 265 | { |
298 | return block_read_full_page(page,qnx4_get_block); | 266 | return block_read_full_page(page,qnx4_get_block); |
299 | } | 267 | } |
300 | 268 | ||
301 | static int qnx4_write_begin(struct file *file, struct address_space *mapping, | ||
302 | loff_t pos, unsigned len, unsigned flags, | ||
303 | struct page **pagep, void **fsdata) | ||
304 | { | ||
305 | struct qnx4_inode_info *qnx4_inode = qnx4_i(mapping->host); | ||
306 | int ret; | ||
307 | |||
308 | *pagep = NULL; | ||
309 | ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | ||
310 | qnx4_get_block, | ||
311 | &qnx4_inode->mmu_private); | ||
312 | if (unlikely(ret)) { | ||
313 | loff_t isize = mapping->host->i_size; | ||
314 | if (pos + len > isize) | ||
315 | vmtruncate(mapping->host, isize); | ||
316 | } | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) | 269 | static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) |
321 | { | 270 | { |
322 | return generic_block_bmap(mapping,block,qnx4_get_block); | 271 | return generic_block_bmap(mapping,block,qnx4_get_block); |
323 | } | 272 | } |
324 | static const struct address_space_operations qnx4_aops = { | 273 | static const struct address_space_operations qnx4_aops = { |
325 | .readpage = qnx4_readpage, | 274 | .readpage = qnx4_readpage, |
326 | .writepage = qnx4_writepage, | ||
327 | .write_begin = qnx4_write_begin, | ||
328 | .write_end = generic_write_end, | ||
329 | .bmap = qnx4_bmap | 275 | .bmap = qnx4_bmap |
330 | }; | 276 | }; |
331 | 277 | ||