aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/decode.h
diff options
context:
space:
mode:
authorSage Weil <sage@newdream.net>2009-10-14 12:59:09 -0400
committerSage Weil <sage@newdream.net>2009-10-14 12:59:09 -0400
commitc89136ea4253c73e89e97f5138bb22d97ad9f564 (patch)
treeba8080adfaa6f5b84eadd4d65eff70840f9dfe22 /fs/ceph/decode.h
parent535bbb530764b1b2b3b732837f0e61e1baae7109 (diff)
ceph: convert encode/decode macros to inlines
This avoids the fugly pass by reference and makes the code a bit easier to read. Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'fs/ceph/decode.h')
-rw-r--r--fs/ceph/decode.h129
1 files changed, 67 insertions, 62 deletions
diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h
index fc2769df062d..91179fb2cc3f 100644
--- a/fs/ceph/decode.h
+++ b/fs/ceph/decode.h
@@ -3,12 +3,44 @@
3 3
4#include <asm/unaligned.h> 4#include <asm/unaligned.h>
5 5
6#include "types.h"
7
6/* 8/*
7 * in all cases, 9 * in all cases,
8 * void **p pointer to position pointer 10 * void **p pointer to position pointer
9 * void *end pointer to end of buffer (last byte + 1) 11 * void *end pointer to end of buffer (last byte + 1)
10 */ 12 */
11 13
14static inline u64 ceph_decode_64(void **p)
15{
16 u64 v = get_unaligned_le64(*p);
17 *p += sizeof(u64);
18 return v;
19}
20static inline u32 ceph_decode_32(void **p)
21{
22 u32 v = get_unaligned_le32(*p);
23 *p += sizeof(u32);
24 return v;
25}
26static inline u16 ceph_decode_16(void **p)
27{
28 u16 v = get_unaligned_le16(*p);
29 *p += sizeof(u16);
30 return v;
31}
32static inline u8 ceph_decode_8(void **p)
33{
34 u8 v = *(u8 *)*p;
35 (*p)++;
36 return v;
37}
38static inline void ceph_decode_copy(void **p, void *pv, size_t n)
39{
40 memcpy(pv, *p, n);
41 *p += n;
42}
43
12/* 44/*
13 * bounds check input. 45 * bounds check input.
14 */ 46 */
@@ -18,48 +50,20 @@
18 goto bad; \ 50 goto bad; \
19 } while (0) 51 } while (0)
20 52
21#define ceph_decode_64(p, v) \
22 do { \
23 v = get_unaligned_le64(*(p)); \
24 *(p) += sizeof(u64); \
25 } while (0)
26#define ceph_decode_32(p, v) \
27 do { \
28 v = get_unaligned_le32(*(p)); \
29 *(p) += sizeof(u32); \
30 } while (0)
31#define ceph_decode_16(p, v) \
32 do { \
33 v = get_unaligned_le16(*(p)); \
34 *(p) += sizeof(u16); \
35 } while (0)
36#define ceph_decode_8(p, v) \
37 do { \
38 v = *(u8 *)*(p); \
39 (*p)++; \
40 } while (0)
41
42#define ceph_decode_copy(p, pv, n) \
43 do { \
44 memcpy(pv, *(p), n); \
45 *(p) += n; \
46 } while (0)
47
48/* bounds check too */
49#define ceph_decode_64_safe(p, end, v, bad) \ 53#define ceph_decode_64_safe(p, end, v, bad) \
50 do { \ 54 do { \
51 ceph_decode_need(p, end, sizeof(u64), bad); \ 55 ceph_decode_need(p, end, sizeof(u64), bad); \
52 ceph_decode_64(p, v); \ 56 v = ceph_decode_64(p); \
53 } while (0) 57 } while (0)
54#define ceph_decode_32_safe(p, end, v, bad) \ 58#define ceph_decode_32_safe(p, end, v, bad) \
55 do { \ 59 do { \
56 ceph_decode_need(p, end, sizeof(u32), bad); \ 60 ceph_decode_need(p, end, sizeof(u32), bad); \
57 ceph_decode_32(p, v); \ 61 v = ceph_decode_32(p); \
58 } while (0) 62 } while (0)
59#define ceph_decode_16_safe(p, end, v, bad) \ 63#define ceph_decode_16_safe(p, end, v, bad) \
60 do { \ 64 do { \
61 ceph_decode_need(p, end, sizeof(u16), bad); \ 65 ceph_decode_need(p, end, sizeof(u16), bad); \
62 ceph_decode_16(p, v); \ 66 v = ceph_decode_16(p); \
63 } while (0) 67 } while (0)
64 68
65#define ceph_decode_copy_safe(p, end, pv, n, bad) \ 69#define ceph_decode_copy_safe(p, end, pv, n, bad) \
@@ -71,41 +75,42 @@
71/* 75/*
72 * struct ceph_timespec <-> struct timespec 76 * struct ceph_timespec <-> struct timespec
73 */ 77 */
74#define ceph_decode_timespec(ts, tv) \ 78static inline void ceph_decode_timespec(struct timespec *ts,
75 do { \ 79 struct ceph_timespec *tv)
76 (ts)->tv_sec = le32_to_cpu((tv)->tv_sec); \ 80{
77 (ts)->tv_nsec = le32_to_cpu((tv)->tv_nsec); \ 81 ts->tv_sec = le32_to_cpu(tv->tv_sec);
78 } while (0) 82 ts->tv_nsec = le32_to_cpu(tv->tv_nsec);
79#define ceph_encode_timespec(tv, ts) \ 83}
80 do { \ 84static inline void ceph_encode_timespec(struct ceph_timespec *tv,
81 (tv)->tv_sec = cpu_to_le32((ts)->tv_sec); \ 85 struct timespec *ts)
82 (tv)->tv_nsec = cpu_to_le32((ts)->tv_nsec); \ 86{
83 } while (0) 87 tv->tv_sec = cpu_to_le32(ts->tv_sec);
84 88 tv->tv_nsec = cpu_to_le32(ts->tv_nsec);
89}
85 90
86/* 91/*
87 * encoders 92 * encoders
88 */ 93 */
89#define ceph_encode_64(p, v) \ 94static inline void ceph_encode_64(void **p, u64 v)
90 do { \ 95{
91 put_unaligned_le64(v, (__le64 *)*(p)); \ 96 put_unaligned_le64(v, (__le64 *)*p);
92 *(p) += sizeof(u64); \ 97 *p += sizeof(u64);
93 } while (0) 98}
94#define ceph_encode_32(p, v) \ 99static inline void ceph_encode_32(void **p, u32 v)
95 do { \ 100{
96 put_unaligned_le32(v, (__le32 *)*(p)); \ 101 put_unaligned_le32(v, (__le32 *)*p);
97 *(p) += sizeof(u32); \ 102 *p += sizeof(u32);
98 } while (0) 103}
99#define ceph_encode_16(p, v) \ 104static inline void ceph_encode_16(void **p, u16 v)
100 do { \ 105{
101 put_unaligned_le16(v), (__le16 *)*(p)); \ 106 put_unaligned_le16(v, (__le16 *)*p);
102 *(p) += sizeof(u16); \ 107 *p += sizeof(u16);
103 } while (0) 108}
104#define ceph_encode_8(p, v) \ 109static inline void ceph_encode_8(void **p, u8 v)
105 do { \ 110{
106 *(u8 *)*(p) = v; \ 111 *(u8 *)*p = v;
107 (*(p))++; \ 112 (*p)++;
108 } while (0) 113}
109 114
110/* 115/*
111 * filepath, string encoders 116 * filepath, string encoders