diff options
-rw-r--r-- | fs/hfsplus/btree.c | 112 | ||||
-rw-r--r-- | fs/hfsplus/hfsplus_fs.h | 1 |
2 files changed, 113 insertions, 0 deletions
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 0c6540c91167..0fcec8b2a90b 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c | |||
@@ -15,6 +15,118 @@ | |||
15 | #include "hfsplus_fs.h" | 15 | #include "hfsplus_fs.h" |
16 | #include "hfsplus_raw.h" | 16 | #include "hfsplus_raw.h" |
17 | 17 | ||
18 | /* | ||
19 | * Initial source code of clump size calculation is gotten | ||
20 | * from http://opensource.apple.com/tarballs/diskdev_cmds/ | ||
21 | */ | ||
22 | #define CLUMP_ENTRIES 15 | ||
23 | |||
24 | static short clumptbl[CLUMP_ENTRIES * 3] = { | ||
25 | /* | ||
26 | * Volume Attributes Catalog Extents | ||
27 | * Size Clump (MB) Clump (MB) Clump (MB) | ||
28 | */ | ||
29 | /* 1GB */ 4, 4, 4, | ||
30 | /* 2GB */ 6, 6, 4, | ||
31 | /* 4GB */ 8, 8, 4, | ||
32 | /* 8GB */ 11, 11, 5, | ||
33 | /* | ||
34 | * For volumes 16GB and larger, we want to make sure that a full OS | ||
35 | * install won't require fragmentation of the Catalog or Attributes | ||
36 | * B-trees. We do this by making the clump sizes sufficiently large, | ||
37 | * and by leaving a gap after the B-trees for them to grow into. | ||
38 | * | ||
39 | * For SnowLeopard 10A298, a FullNetInstall with all packages selected | ||
40 | * results in: | ||
41 | * Catalog B-tree Header | ||
42 | * nodeSize: 8192 | ||
43 | * totalNodes: 31616 | ||
44 | * freeNodes: 1978 | ||
45 | * (used = 231.55 MB) | ||
46 | * Attributes B-tree Header | ||
47 | * nodeSize: 8192 | ||
48 | * totalNodes: 63232 | ||
49 | * freeNodes: 958 | ||
50 | * (used = 486.52 MB) | ||
51 | * | ||
52 | * We also want Time Machine backup volumes to have a sufficiently | ||
53 | * large clump size to reduce fragmentation. | ||
54 | * | ||
55 | * The series of numbers for Catalog and Attribute form a geometric | ||
56 | * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times | ||
57 | * the previous term. For Attributes (16GB to 512GB), each term is | ||
58 | * 4**(1/5) times the previous term. For 1TB to 16TB, each term is | ||
59 | * 2**(1/5) times the previous term. | ||
60 | */ | ||
61 | /* 16GB */ 64, 32, 5, | ||
62 | /* 32GB */ 84, 49, 6, | ||
63 | /* 64GB */ 111, 74, 7, | ||
64 | /* 128GB */ 147, 111, 8, | ||
65 | /* 256GB */ 194, 169, 9, | ||
66 | /* 512GB */ 256, 256, 11, | ||
67 | /* 1TB */ 294, 294, 14, | ||
68 | /* 2TB */ 338, 338, 16, | ||
69 | /* 4TB */ 388, 388, 20, | ||
70 | /* 8TB */ 446, 446, 25, | ||
71 | /* 16TB */ 512, 512, 32 | ||
72 | }; | ||
73 | |||
74 | u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, | ||
75 | u64 sectors, int file_id) | ||
76 | { | ||
77 | u32 mod = max(node_size, block_size); | ||
78 | u32 clump_size; | ||
79 | int column; | ||
80 | int i; | ||
81 | |||
82 | /* Figure out which column of the above table to use for this file. */ | ||
83 | switch (file_id) { | ||
84 | case HFSPLUS_ATTR_CNID: | ||
85 | column = 0; | ||
86 | break; | ||
87 | case HFSPLUS_CAT_CNID: | ||
88 | column = 1; | ||
89 | break; | ||
90 | default: | ||
91 | column = 2; | ||
92 | break; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * The default clump size is 0.8% of the volume size. And | ||
97 | * it must also be a multiple of the node and block size. | ||
98 | */ | ||
99 | if (sectors < 0x200000) { | ||
100 | clump_size = sectors << 2; /* 0.8 % */ | ||
101 | if (clump_size < (8 * node_size)) | ||
102 | clump_size = 8 * node_size; | ||
103 | } else { | ||
104 | /* turn exponent into table index... */ | ||
105 | for (i = 0, sectors = sectors >> 22; | ||
106 | sectors && (i < CLUMP_ENTRIES - 1); | ||
107 | ++i, sectors = sectors >> 1) { | ||
108 | /* empty body */ | ||
109 | } | ||
110 | |||
111 | clump_size = clumptbl[column + (i) * 3] * 1024 * 1024; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Round the clump size to a multiple of node and block size. | ||
116 | * NOTE: This rounds down. | ||
117 | */ | ||
118 | clump_size /= mod; | ||
119 | clump_size *= mod; | ||
120 | |||
121 | /* | ||
122 | * Rounding down could have rounded down to 0 if the block size was | ||
123 | * greater than the clump size. If so, just use one block or node. | ||
124 | */ | ||
125 | if (clump_size == 0) | ||
126 | clump_size = mod; | ||
127 | |||
128 | return clump_size; | ||
129 | } | ||
18 | 130 | ||
19 | /* Get a reference to a B*Tree and do some initial checks */ | 131 | /* Get a reference to a B*Tree and do some initial checks */ |
20 | struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | 132 | struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 2b9cd01696e2..1e36f18e904b 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -380,6 +380,7 @@ int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *); | |||
380 | int hfsplus_block_free(struct super_block *, u32, u32); | 380 | int hfsplus_block_free(struct super_block *, u32, u32); |
381 | 381 | ||
382 | /* btree.c */ | 382 | /* btree.c */ |
383 | u32 hfsplus_calc_btree_clump_size(u32, u32, u64, int); | ||
383 | struct hfs_btree *hfs_btree_open(struct super_block *, u32); | 384 | struct hfs_btree *hfs_btree_open(struct super_block *, u32); |
384 | void hfs_btree_close(struct hfs_btree *); | 385 | void hfs_btree_close(struct hfs_btree *); |
385 | int hfs_btree_write(struct hfs_btree *); | 386 | int hfs_btree_write(struct hfs_btree *); |