diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /fs/hpfs/anode.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'fs/hpfs/anode.c')
-rw-r--r-- | fs/hpfs/anode.c | 49 |
1 files changed, 22 insertions, 27 deletions
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c index 2d5b254ad9e..08b503e8ed2 100644 --- a/fs/hpfs/anode.c +++ b/fs/hpfs/anode.c | |||
@@ -20,7 +20,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, | |||
20 | int c1, c2 = 0; | 20 | int c1, c2 = 0; |
21 | go_down: | 21 | go_down: |
22 | if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; | 22 | if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; |
23 | if (bp_internal(btree)) { | 23 | if (btree->internal) { |
24 | for (i = 0; i < btree->n_used_nodes; i++) | 24 | for (i = 0; i < btree->n_used_nodes; i++) |
25 | if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { | 25 | if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { |
26 | a = le32_to_cpu(btree->u.internal[i].down); | 26 | a = le32_to_cpu(btree->u.internal[i].down); |
@@ -82,7 +82,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
82 | brelse(bh); | 82 | brelse(bh); |
83 | return -1; | 83 | return -1; |
84 | } | 84 | } |
85 | if (bp_internal(btree)) { | 85 | if (btree->internal) { |
86 | a = le32_to_cpu(btree->u.internal[n].down); | 86 | a = le32_to_cpu(btree->u.internal[n].down); |
87 | btree->u.internal[n].file_secno = cpu_to_le32(-1); | 87 | btree->u.internal[n].file_secno = cpu_to_le32(-1); |
88 | mark_buffer_dirty(bh); | 88 | mark_buffer_dirty(bh); |
@@ -102,7 +102,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
102 | return -1; | 102 | return -1; |
103 | } | 103 | } |
104 | if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { | 104 | if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { |
105 | le32_add_cpu(&btree->u.external[n].length, 1); | 105 | btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1); |
106 | mark_buffer_dirty(bh); | 106 | mark_buffer_dirty(bh); |
107 | brelse(bh); | 107 | brelse(bh); |
108 | return se; | 108 | return se; |
@@ -129,12 +129,12 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
129 | } | 129 | } |
130 | if (a == node && fnod) { | 130 | if (a == node && fnod) { |
131 | anode->up = cpu_to_le32(node); | 131 | anode->up = cpu_to_le32(node); |
132 | anode->btree.flags |= BP_fnode_parent; | 132 | anode->btree.fnode_parent = 1; |
133 | anode->btree.n_used_nodes = btree->n_used_nodes; | 133 | anode->btree.n_used_nodes = btree->n_used_nodes; |
134 | anode->btree.first_free = btree->first_free; | 134 | anode->btree.first_free = btree->first_free; |
135 | anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; | 135 | anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; |
136 | memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); | 136 | memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); |
137 | btree->flags |= BP_internal; | 137 | btree->internal = 1; |
138 | btree->n_free_nodes = 11; | 138 | btree->n_free_nodes = 11; |
139 | btree->n_used_nodes = 1; | 139 | btree->n_used_nodes = 1; |
140 | btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); | 140 | btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); |
@@ -153,7 +153,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
153 | btree = &anode->btree; | 153 | btree = &anode->btree; |
154 | } | 154 | } |
155 | btree->n_free_nodes--; n = btree->n_used_nodes++; | 155 | btree->n_free_nodes--; n = btree->n_used_nodes++; |
156 | le16_add_cpu(&btree->first_free, 12); | 156 | btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12); |
157 | btree->u.external[n].disk_secno = cpu_to_le32(se); | 157 | btree->u.external[n].disk_secno = cpu_to_le32(se); |
158 | btree->u.external[n].file_secno = cpu_to_le32(fs); | 158 | btree->u.external[n].file_secno = cpu_to_le32(fs); |
159 | btree->u.external[n].length = cpu_to_le32(1); | 159 | btree->u.external[n].length = cpu_to_le32(1); |
@@ -174,7 +174,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
174 | } | 174 | } |
175 | if (btree->n_free_nodes) { | 175 | if (btree->n_free_nodes) { |
176 | btree->n_free_nodes--; n = btree->n_used_nodes++; | 176 | btree->n_free_nodes--; n = btree->n_used_nodes++; |
177 | le16_add_cpu(&btree->first_free, 8); | 177 | btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8); |
178 | btree->u.internal[n].file_secno = cpu_to_le32(-1); | 178 | btree->u.internal[n].file_secno = cpu_to_le32(-1); |
179 | btree->u.internal[n].down = cpu_to_le32(na); | 179 | btree->u.internal[n].down = cpu_to_le32(na); |
180 | btree->u.internal[n-1].file_secno = cpu_to_le32(fs); | 180 | btree->u.internal[n-1].file_secno = cpu_to_le32(fs); |
@@ -184,10 +184,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
184 | hpfs_free_sectors(s, ra, 1); | 184 | hpfs_free_sectors(s, ra, 1); |
185 | if ((anode = hpfs_map_anode(s, na, &bh))) { | 185 | if ((anode = hpfs_map_anode(s, na, &bh))) { |
186 | anode->up = cpu_to_le32(up); | 186 | anode->up = cpu_to_le32(up); |
187 | if (up == node && fnod) | 187 | anode->btree.fnode_parent = up == node && fnod; |
188 | anode->btree.flags |= BP_fnode_parent; | ||
189 | else | ||
190 | anode->btree.flags &= ~BP_fnode_parent; | ||
191 | mark_buffer_dirty(bh); | 188 | mark_buffer_dirty(bh); |
192 | brelse(bh); | 189 | brelse(bh); |
193 | } | 190 | } |
@@ -201,7 +198,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
201 | if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { | 198 | if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { |
202 | anode = new_anode; | 199 | anode = new_anode; |
203 | /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ | 200 | /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ |
204 | anode->btree.flags |= BP_internal; | 201 | anode->btree.internal = 1; |
205 | anode->btree.n_used_nodes = 1; | 202 | anode->btree.n_used_nodes = 1; |
206 | anode->btree.n_free_nodes = 59; | 203 | anode->btree.n_free_nodes = 59; |
207 | anode->btree.first_free = cpu_to_le16(16); | 204 | anode->btree.first_free = cpu_to_le16(16); |
@@ -218,8 +215,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
218 | } | 215 | } |
219 | if ((anode = hpfs_map_anode(s, na, &bh))) { | 216 | if ((anode = hpfs_map_anode(s, na, &bh))) { |
220 | anode->up = cpu_to_le32(node); | 217 | anode->up = cpu_to_le32(node); |
221 | if (fnod) | 218 | if (fnod) anode->btree.fnode_parent = 1; |
222 | anode->btree.flags |= BP_fnode_parent; | ||
223 | mark_buffer_dirty(bh); | 219 | mark_buffer_dirty(bh); |
224 | brelse(bh); | 220 | brelse(bh); |
225 | } | 221 | } |
@@ -238,19 +234,18 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi | |||
238 | } | 234 | } |
239 | ranode->up = cpu_to_le32(node); | 235 | ranode->up = cpu_to_le32(node); |
240 | memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); | 236 | memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); |
241 | if (fnod) | 237 | if (fnod) ranode->btree.fnode_parent = 1; |
242 | ranode->btree.flags |= BP_fnode_parent; | 238 | ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; |
243 | ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes; | 239 | if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { |
244 | if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) { | ||
245 | struct anode *unode; | 240 | struct anode *unode; |
246 | if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { | 241 | if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { |
247 | unode->up = cpu_to_le32(ra); | 242 | unode->up = cpu_to_le32(ra); |
248 | unode->btree.flags &= ~BP_fnode_parent; | 243 | unode->btree.fnode_parent = 0; |
249 | mark_buffer_dirty(bh1); | 244 | mark_buffer_dirty(bh1); |
250 | brelse(bh1); | 245 | brelse(bh1); |
251 | } | 246 | } |
252 | } | 247 | } |
253 | btree->flags |= BP_internal; | 248 | btree->internal = 1; |
254 | btree->n_free_nodes = fnod ? 10 : 58; | 249 | btree->n_free_nodes = fnod ? 10 : 58; |
255 | btree->n_used_nodes = 2; | 250 | btree->n_used_nodes = 2; |
256 | btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); | 251 | btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); |
@@ -283,7 +278,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) | |||
283 | int d1, d2; | 278 | int d1, d2; |
284 | go_down: | 279 | go_down: |
285 | d2 = 0; | 280 | d2 = 0; |
286 | while (bp_internal(btree1)) { | 281 | while (btree1->internal) { |
287 | ano = le32_to_cpu(btree1->u.internal[pos].down); | 282 | ano = le32_to_cpu(btree1->u.internal[pos].down); |
288 | if (level) brelse(bh); | 283 | if (level) brelse(bh); |
289 | if (hpfs_sb(s)->sb_chk) | 284 | if (hpfs_sb(s)->sb_chk) |
@@ -417,13 +412,13 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) | |||
417 | btree->n_free_nodes = 8; | 412 | btree->n_free_nodes = 8; |
418 | btree->n_used_nodes = 0; | 413 | btree->n_used_nodes = 0; |
419 | btree->first_free = cpu_to_le16(8); | 414 | btree->first_free = cpu_to_le16(8); |
420 | btree->flags &= ~BP_internal; | 415 | btree->internal = 0; |
421 | mark_buffer_dirty(bh); | 416 | mark_buffer_dirty(bh); |
422 | } else hpfs_free_sectors(s, f, 1); | 417 | } else hpfs_free_sectors(s, f, 1); |
423 | brelse(bh); | 418 | brelse(bh); |
424 | return; | 419 | return; |
425 | } | 420 | } |
426 | while (bp_internal(btree)) { | 421 | while (btree->internal) { |
427 | nodes = btree->n_used_nodes + btree->n_free_nodes; | 422 | nodes = btree->n_used_nodes + btree->n_free_nodes; |
428 | for (i = 0; i < btree->n_used_nodes; i++) | 423 | for (i = 0; i < btree->n_used_nodes; i++) |
429 | if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; | 424 | if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; |
@@ -484,13 +479,13 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) | |||
484 | struct extended_attribute *ea; | 479 | struct extended_attribute *ea; |
485 | struct extended_attribute *ea_end; | 480 | struct extended_attribute *ea_end; |
486 | if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; | 481 | if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; |
487 | if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree); | 482 | if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); |
488 | else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); | 483 | else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); |
489 | ea_end = fnode_end_ea(fnode); | 484 | ea_end = fnode_end_ea(fnode); |
490 | for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) | 485 | for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) |
491 | if (ea_indirect(ea)) | 486 | if (ea->indirect) |
492 | hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); | 487 | hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); |
493 | hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l)); | 488 | hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); |
494 | brelse(bh); | 489 | brelse(bh); |
495 | hpfs_free_sectors(s, fno, 1); | 490 | hpfs_free_sectors(s, fno, 1); |
496 | } | 491 | } |