aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hpfs/anode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/hpfs/anode.c')
-rw-r--r--fs/hpfs/anode.c136
1 files changed, 68 insertions, 68 deletions
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index f2a038411e3c..08b503e8ed29 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; 22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
23 if (btree->internal) { 23 if (btree->internal) {
24 for (i = 0; i < btree->n_used_nodes; i++) 24 for (i = 0; i < btree->n_used_nodes; i++)
25 if (btree->u.internal[i].file_secno > sec) { 25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
26 a = btree->u.internal[i].down; 26 a = le32_to_cpu(btree->u.internal[i].down);
27 brelse(bh); 27 brelse(bh);
28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; 28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
29 btree = &anode->btree; 29 btree = &anode->btree;
@@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
34 return -1; 34 return -1;
35 } 35 }
36 for (i = 0; i < btree->n_used_nodes; i++) 36 for (i = 0; i < btree->n_used_nodes; i++)
37 if (btree->u.external[i].file_secno <= sec && 37 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
38 btree->u.external[i].file_secno + btree->u.external[i].length > sec) { 38 le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
39 a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno; 39 a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { 40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
41 brelse(bh); 41 brelse(bh);
42 return -1; 42 return -1;
43 } 43 }
44 if (inode) { 44 if (inode) {
45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
46 hpfs_inode->i_file_sec = btree->u.external[i].file_secno; 46 hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
47 hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno; 47 hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
48 hpfs_inode->i_n_secs = btree->u.external[i].length; 48 hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
49 } 49 }
50 brelse(bh); 50 brelse(bh);
51 return a; 51 return a;
@@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
83 return -1; 83 return -1;
84 } 84 }
85 if (btree->internal) { 85 if (btree->internal) {
86 a = btree->u.internal[n].down; 86 a = le32_to_cpu(btree->u.internal[n].down);
87 btree->u.internal[n].file_secno = -1; 87 btree->u.internal[n].file_secno = cpu_to_le32(-1);
88 mark_buffer_dirty(bh); 88 mark_buffer_dirty(bh);
89 brelse(bh); 89 brelse(bh);
90 if (hpfs_sb(s)->sb_chk) 90 if (hpfs_sb(s)->sb_chk)
@@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
94 goto go_down; 94 goto go_down;
95 } 95 }
96 if (n >= 0) { 96 if (n >= 0) {
97 if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) { 97 if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", 98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
99 btree->u.external[n].file_secno + btree->u.external[n].length, fsecno, 99 le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
100 fnod?'f':'a', node); 100 fnod?'f':'a', node);
101 brelse(bh); 101 brelse(bh);
102 return -1; 102 return -1;
103 } 103 }
104 if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) { 104 if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
105 btree->u.external[n].length++; 105 btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1);
106 mark_buffer_dirty(bh); 106 mark_buffer_dirty(bh);
107 brelse(bh); 107 brelse(bh);
108 return se; 108 return se;
@@ -119,16 +119,16 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
119 brelse(bh); 119 brelse(bh);
120 return -1; 120 return -1;
121 } 121 }
122 fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length; 122 fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
123 if (!btree->n_free_nodes) { 123 if (!btree->n_free_nodes) {
124 up = a != node ? anode->up : -1; 124 up = a != node ? le32_to_cpu(anode->up) : -1;
125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { 125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
126 brelse(bh); 126 brelse(bh);
127 hpfs_free_sectors(s, se, 1); 127 hpfs_free_sectors(s, se, 1);
128 return -1; 128 return -1;
129 } 129 }
130 if (a == node && fnod) { 130 if (a == node && fnod) {
131 anode->up = node; 131 anode->up = cpu_to_le32(node);
132 anode->btree.fnode_parent = 1; 132 anode->btree.fnode_parent = 1;
133 anode->btree.n_used_nodes = btree->n_used_nodes; 133 anode->btree.n_used_nodes = btree->n_used_nodes;
134 anode->btree.first_free = btree->first_free; 134 anode->btree.first_free = btree->first_free;
@@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
137 btree->internal = 1; 137 btree->internal = 1;
138 btree->n_free_nodes = 11; 138 btree->n_free_nodes = 11;
139 btree->n_used_nodes = 1; 139 btree->n_used_nodes = 1;
140 btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree; 140 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
141 btree->u.internal[0].file_secno = -1; 141 btree->u.internal[0].file_secno = cpu_to_le32(-1);
142 btree->u.internal[0].down = na; 142 btree->u.internal[0].down = cpu_to_le32(na);
143 mark_buffer_dirty(bh); 143 mark_buffer_dirty(bh);
144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { 144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
145 brelse(bh); 145 brelse(bh);
@@ -153,15 +153,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
153 btree = &anode->btree; 153 btree = &anode->btree;
154 } 154 }
155 btree->n_free_nodes--; n = btree->n_used_nodes++; 155 btree->n_free_nodes--; n = btree->n_used_nodes++;
156 btree->first_free += 12; 156 btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12);
157 btree->u.external[n].disk_secno = se; 157 btree->u.external[n].disk_secno = cpu_to_le32(se);
158 btree->u.external[n].file_secno = fs; 158 btree->u.external[n].file_secno = cpu_to_le32(fs);
159 btree->u.external[n].length = 1; 159 btree->u.external[n].length = cpu_to_le32(1);
160 mark_buffer_dirty(bh); 160 mark_buffer_dirty(bh);
161 brelse(bh); 161 brelse(bh);
162 if ((a == node && fnod) || na == -1) return se; 162 if ((a == node && fnod) || na == -1) return se;
163 c2 = 0; 163 c2 = 0;
164 while (up != -1) { 164 while (up != (anode_secno)-1) {
165 struct anode *new_anode; 165 struct anode *new_anode;
166 if (hpfs_sb(s)->sb_chk) 166 if (hpfs_sb(s)->sb_chk)
167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; 167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
@@ -174,47 +174,47 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
174 } 174 }
175 if (btree->n_free_nodes) { 175 if (btree->n_free_nodes) {
176 btree->n_free_nodes--; n = btree->n_used_nodes++; 176 btree->n_free_nodes--; n = btree->n_used_nodes++;
177 btree->first_free += 8; 177 btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8);
178 btree->u.internal[n].file_secno = -1; 178 btree->u.internal[n].file_secno = cpu_to_le32(-1);
179 btree->u.internal[n].down = na; 179 btree->u.internal[n].down = cpu_to_le32(na);
180 btree->u.internal[n-1].file_secno = fs; 180 btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
181 mark_buffer_dirty(bh); 181 mark_buffer_dirty(bh);
182 brelse(bh); 182 brelse(bh);
183 brelse(bh2); 183 brelse(bh2);
184 hpfs_free_sectors(s, ra, 1); 184 hpfs_free_sectors(s, ra, 1);
185 if ((anode = hpfs_map_anode(s, na, &bh))) { 185 if ((anode = hpfs_map_anode(s, na, &bh))) {
186 anode->up = up; 186 anode->up = cpu_to_le32(up);
187 anode->btree.fnode_parent = up == node && fnod; 187 anode->btree.fnode_parent = up == node && fnod;
188 mark_buffer_dirty(bh); 188 mark_buffer_dirty(bh);
189 brelse(bh); 189 brelse(bh);
190 } 190 }
191 return se; 191 return se;
192 } 192 }
193 up = up != node ? anode->up : -1; 193 up = up != node ? le32_to_cpu(anode->up) : -1;
194 btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1; 194 btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
195 mark_buffer_dirty(bh); 195 mark_buffer_dirty(bh);
196 brelse(bh); 196 brelse(bh);
197 a = na; 197 a = na;
198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { 198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
199 anode = new_anode; 199 anode = new_anode;
200 /*anode->up = up != -1 ? up : ra;*/ 200 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
201 anode->btree.internal = 1; 201 anode->btree.internal = 1;
202 anode->btree.n_used_nodes = 1; 202 anode->btree.n_used_nodes = 1;
203 anode->btree.n_free_nodes = 59; 203 anode->btree.n_free_nodes = 59;
204 anode->btree.first_free = 16; 204 anode->btree.first_free = cpu_to_le16(16);
205 anode->btree.u.internal[0].down = a; 205 anode->btree.u.internal[0].down = cpu_to_le32(a);
206 anode->btree.u.internal[0].file_secno = -1; 206 anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
207 mark_buffer_dirty(bh); 207 mark_buffer_dirty(bh);
208 brelse(bh); 208 brelse(bh);
209 if ((anode = hpfs_map_anode(s, a, &bh))) { 209 if ((anode = hpfs_map_anode(s, a, &bh))) {
210 anode->up = na; 210 anode->up = cpu_to_le32(na);
211 mark_buffer_dirty(bh); 211 mark_buffer_dirty(bh);
212 brelse(bh); 212 brelse(bh);
213 } 213 }
214 } else na = a; 214 } else na = a;
215 } 215 }
216 if ((anode = hpfs_map_anode(s, na, &bh))) { 216 if ((anode = hpfs_map_anode(s, na, &bh))) {
217 anode->up = node; 217 anode->up = cpu_to_le32(node);
218 if (fnod) anode->btree.fnode_parent = 1; 218 if (fnod) anode->btree.fnode_parent = 1;
219 mark_buffer_dirty(bh); 219 mark_buffer_dirty(bh);
220 brelse(bh); 220 brelse(bh);
@@ -232,14 +232,14 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
232 } 232 }
233 btree = &fnode->btree; 233 btree = &fnode->btree;
234 } 234 }
235 ranode->up = node; 235 ranode->up = cpu_to_le32(node);
236 memcpy(&ranode->btree, btree, btree->first_free); 236 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
237 if (fnod) ranode->btree.fnode_parent = 1; 237 if (fnod) ranode->btree.fnode_parent = 1;
238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; 238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { 239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
240 struct anode *unode; 240 struct anode *unode;
241 if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) { 241 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
242 unode->up = ra; 242 unode->up = cpu_to_le32(ra);
243 unode->btree.fnode_parent = 0; 243 unode->btree.fnode_parent = 0;
244 mark_buffer_dirty(bh1); 244 mark_buffer_dirty(bh1);
245 brelse(bh1); 245 brelse(bh1);
@@ -248,11 +248,11 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
248 btree->internal = 1; 248 btree->internal = 1;
249 btree->n_free_nodes = fnod ? 10 : 58; 249 btree->n_free_nodes = fnod ? 10 : 58;
250 btree->n_used_nodes = 2; 250 btree->n_used_nodes = 2;
251 btree->first_free = (char *)&btree->u.internal[2] - (char *)btree; 251 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
252 btree->u.internal[0].file_secno = fs; 252 btree->u.internal[0].file_secno = cpu_to_le32(fs);
253 btree->u.internal[0].down = ra; 253 btree->u.internal[0].down = cpu_to_le32(ra);
254 btree->u.internal[1].file_secno = -1; 254 btree->u.internal[1].file_secno = cpu_to_le32(-1);
255 btree->u.internal[1].down = na; 255 btree->u.internal[1].down = cpu_to_le32(na);
256 mark_buffer_dirty(bh); 256 mark_buffer_dirty(bh);
257 brelse(bh); 257 brelse(bh);
258 mark_buffer_dirty(bh2); 258 mark_buffer_dirty(bh2);
@@ -279,7 +279,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
279 go_down: 279 go_down:
280 d2 = 0; 280 d2 = 0;
281 while (btree1->internal) { 281 while (btree1->internal) {
282 ano = btree1->u.internal[pos].down; 282 ano = le32_to_cpu(btree1->u.internal[pos].down);
283 if (level) brelse(bh); 283 if (level) brelse(bh);
284 if (hpfs_sb(s)->sb_chk) 284 if (hpfs_sb(s)->sb_chk)
285 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) 285 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
@@ -290,7 +290,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
290 pos = 0; 290 pos = 0;
291 } 291 }
292 for (i = 0; i < btree1->n_used_nodes; i++) 292 for (i = 0; i < btree1->n_used_nodes; i++)
293 hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length); 293 hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
294 go_up: 294 go_up:
295 if (!level) return; 295 if (!level) return;
296 brelse(bh); 296 brelse(bh);
@@ -298,13 +298,13 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
298 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; 298 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
299 hpfs_free_sectors(s, ano, 1); 299 hpfs_free_sectors(s, ano, 1);
300 oano = ano; 300 oano = ano;
301 ano = anode->up; 301 ano = le32_to_cpu(anode->up);
302 if (--level) { 302 if (--level) {
303 if (!(anode = hpfs_map_anode(s, ano, &bh))) return; 303 if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
304 btree1 = &anode->btree; 304 btree1 = &anode->btree;
305 } else btree1 = btree; 305 } else btree1 = btree;
306 for (i = 0; i < btree1->n_used_nodes; i++) { 306 for (i = 0; i < btree1->n_used_nodes; i++) {
307 if (btree1->u.internal[i].down == oano) { 307 if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
308 if ((pos = i + 1) < btree1->n_used_nodes) 308 if ((pos = i + 1) < btree1->n_used_nodes)
309 goto go_down; 309 goto go_down;
310 else 310 else
@@ -411,7 +411,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
411 if (fno) { 411 if (fno) {
412 btree->n_free_nodes = 8; 412 btree->n_free_nodes = 8;
413 btree->n_used_nodes = 0; 413 btree->n_used_nodes = 0;
414 btree->first_free = 8; 414 btree->first_free = cpu_to_le16(8);
415 btree->internal = 0; 415 btree->internal = 0;
416 mark_buffer_dirty(bh); 416 mark_buffer_dirty(bh);
417 } else hpfs_free_sectors(s, f, 1); 417 } else hpfs_free_sectors(s, f, 1);
@@ -421,22 +421,22 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
421 while (btree->internal) { 421 while (btree->internal) {
422 nodes = btree->n_used_nodes + btree->n_free_nodes; 422 nodes = btree->n_used_nodes + btree->n_free_nodes;
423 for (i = 0; i < btree->n_used_nodes; i++) 423 for (i = 0; i < btree->n_used_nodes; i++)
424 if (btree->u.internal[i].file_secno >= secs) goto f; 424 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
425 brelse(bh); 425 brelse(bh);
426 hpfs_error(s, "internal btree %08x doesn't end with -1", node); 426 hpfs_error(s, "internal btree %08x doesn't end with -1", node);
427 return; 427 return;
428 f: 428 f:
429 for (j = i + 1; j < btree->n_used_nodes; j++) 429 for (j = i + 1; j < btree->n_used_nodes; j++)
430 hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0); 430 hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
431 btree->n_used_nodes = i + 1; 431 btree->n_used_nodes = i + 1;
432 btree->n_free_nodes = nodes - btree->n_used_nodes; 432 btree->n_free_nodes = nodes - btree->n_used_nodes;
433 btree->first_free = 8 + 8 * btree->n_used_nodes; 433 btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
434 mark_buffer_dirty(bh); 434 mark_buffer_dirty(bh);
435 if (btree->u.internal[i].file_secno == secs) { 435 if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
436 brelse(bh); 436 brelse(bh);
437 return; 437 return;
438 } 438 }
439 node = btree->u.internal[i].down; 439 node = le32_to_cpu(btree->u.internal[i].down);
440 brelse(bh); 440 brelse(bh);
441 if (hpfs_sb(s)->sb_chk) 441 if (hpfs_sb(s)->sb_chk)
442 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) 442 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
@@ -446,25 +446,25 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
446 } 446 }
447 nodes = btree->n_used_nodes + btree->n_free_nodes; 447 nodes = btree->n_used_nodes + btree->n_free_nodes;
448 for (i = 0; i < btree->n_used_nodes; i++) 448 for (i = 0; i < btree->n_used_nodes; i++)
449 if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff; 449 if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
450 brelse(bh); 450 brelse(bh);
451 return; 451 return;
452 ff: 452 ff:
453 if (secs <= btree->u.external[i].file_secno) { 453 if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
454 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); 454 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
455 if (i) i--; 455 if (i) i--;
456 } 456 }
457 else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) { 457 else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
458 hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs - 458 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
459 btree->u.external[i].file_secno, btree->u.external[i].length 459 le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
460 - secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */ 460 - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
461 btree->u.external[i].length = secs - btree->u.external[i].file_secno; 461 btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
462 } 462 }
463 for (j = i + 1; j < btree->n_used_nodes; j++) 463 for (j = i + 1; j < btree->n_used_nodes; j++)
464 hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length); 464 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
465 btree->n_used_nodes = i + 1; 465 btree->n_used_nodes = i + 1;
466 btree->n_free_nodes = nodes - btree->n_used_nodes; 466 btree->n_free_nodes = nodes - btree->n_used_nodes;
467 btree->first_free = 8 + 12 * btree->n_used_nodes; 467 btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
468 mark_buffer_dirty(bh); 468 mark_buffer_dirty(bh);
469 brelse(bh); 469 brelse(bh);
470} 470}
@@ -480,12 +480,12 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
480 struct extended_attribute *ea_end; 480 struct extended_attribute *ea_end;
481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; 481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); 482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
483 else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno); 483 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
484 ea_end = fnode_end_ea(fnode); 484 ea_end = fnode_end_ea(fnode);
485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
486 if (ea->indirect) 486 if (ea->indirect)
487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
488 hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l); 488 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
489 brelse(bh); 489 brelse(bh);
490 hpfs_free_sectors(s, fno, 1); 490 hpfs_free_sectors(s, fno, 1);
491} 491}