aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/struct-funcs.c
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2008-02-15 10:40:52 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:00 -0400
commitdf68b8a7ad4a18c9e63f1c12015a59c3b7031adb (patch)
treef2e6569b5f4843a01f23068fdfd3b450c8258459 /fs/btrfs/struct-funcs.c
parent39b5637f6f195852259004bb27b58e2dcf9fb378 (diff)
Btrfs: unaligned access fixes
Btrfs set/get macros lose type information needed to avoid unaligned accesses on sparc64. ere is a patch for the kernel bits which fixes most of the unaligned accesses on sparc64. btrfs_name_hash is modified to return the hash value instead of getting a return location via a (potentially unaligned) pointer. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/struct-funcs.c')
-rw-r--r--fs/btrfs/struct-funcs.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c5715a60554c..ad03a32d1116 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -21,16 +21,15 @@
21u##bits btrfs_##name(struct extent_buffer *eb, \ 21u##bits btrfs_##name(struct extent_buffer *eb, \
22 type *s) \ 22 type *s) \
23{ \ 23{ \
24 unsigned long offset = (unsigned long)s + \ 24 unsigned long part_offset = (unsigned long)s; \
25 offsetof(type, member); \ 25 unsigned long offset = part_offset + offsetof(type, member); \
26 __le##bits *tmp; \ 26 type *p; \
27 /* ugly, but we want the fast path here */ \ 27 /* ugly, but we want the fast path here */ \
28 if (eb->map_token && offset >= eb->map_start && \ 28 if (eb->map_token && offset >= eb->map_start && \
29 offset + sizeof(((type *)0)->member) <= eb->map_start + \ 29 offset + sizeof(((type *)0)->member) <= eb->map_start + \
30 eb->map_len) { \ 30 eb->map_len) { \
31 tmp = (__le##bits *)(eb->kaddr + offset - \ 31 p = (type *)(eb->kaddr + part_offset - eb->map_start); \
32 eb->map_start); \ 32 return le##bits##_to_cpu(p->member); \
33 return le##bits##_to_cpu(*tmp); \
34 } \ 33 } \
35 { \ 34 { \
36 int err; \ 35 int err; \
@@ -48,8 +47,8 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
48 read_eb_member(eb, s, type, member, &res); \ 47 read_eb_member(eb, s, type, member, &res); \
49 return le##bits##_to_cpu(res); \ 48 return le##bits##_to_cpu(res); \
50 } \ 49 } \
51 tmp = (__le##bits *)(kaddr + offset - map_start); \ 50 p = (type *)(kaddr + part_offset - map_start); \
52 res = le##bits##_to_cpu(*tmp); \ 51 res = le##bits##_to_cpu(p->member); \
53 if (unmap_on_exit) \ 52 if (unmap_on_exit) \
54 unmap_extent_buffer(eb, map_token, KM_USER1); \ 53 unmap_extent_buffer(eb, map_token, KM_USER1); \
55 return res; \ 54 return res; \
@@ -58,16 +57,15 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
58void btrfs_set_##name(struct extent_buffer *eb, \ 57void btrfs_set_##name(struct extent_buffer *eb, \
59 type *s, u##bits val) \ 58 type *s, u##bits val) \
60{ \ 59{ \
61 unsigned long offset = (unsigned long)s + \ 60 unsigned long part_offset = (unsigned long)s; \
62 offsetof(type, member); \ 61 unsigned long offset = part_offset + offsetof(type, member); \
63 __le##bits *tmp; \ 62 type *p; \
64 /* ugly, but we want the fast path here */ \ 63 /* ugly, but we want the fast path here */ \
65 if (eb->map_token && offset >= eb->map_start && \ 64 if (eb->map_token && offset >= eb->map_start && \
66 offset + sizeof(((type *)0)->member) <= eb->map_start + \ 65 offset + sizeof(((type *)0)->member) <= eb->map_start + \
67 eb->map_len) { \ 66 eb->map_len) { \
68 tmp = (__le##bits *)(eb->kaddr + offset - \ 67 p = (type *)(eb->kaddr + part_offset - eb->map_start); \
69 eb->map_start); \ 68 p->member = cpu_to_le##bits(val); \
70 *tmp = cpu_to_le##bits(val); \
71 return; \ 69 return; \
72 } \ 70 } \
73 { \ 71 { \
@@ -86,8 +84,8 @@ void btrfs_set_##name(struct extent_buffer *eb, \
86 write_eb_member(eb, s, type, member, &val); \ 84 write_eb_member(eb, s, type, member, &val); \
87 return; \ 85 return; \
88 } \ 86 } \
89 tmp = (__le##bits *)(kaddr + offset - map_start); \ 87 p = (type *)(kaddr + part_offset - map_start); \
90 *tmp = cpu_to_le##bits(val); \ 88 p->member = cpu_to_le##bits(val); \
91 if (unmap_on_exit) \ 89 if (unmap_on_exit) \
92 unmap_extent_buffer(eb, map_token, KM_USER1); \ 90 unmap_extent_buffer(eb, map_token, KM_USER1); \
93 } \ 91 } \