diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/sunrpc/xdr.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'net/sunrpc/xdr.c')
-rw-r--r-- | net/sunrpc/xdr.c | 917 |
1 files changed, 917 insertions, 0 deletions
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c new file mode 100644 index 000000000000..4484931018eb --- /dev/null +++ b/net/sunrpc/xdr.c | |||
@@ -0,0 +1,917 @@ | |||
1 | /* | ||
2 | * linux/net/sunrpc/xdr.c | ||
3 | * | ||
4 | * Generic XDR support. | ||
5 | * | ||
6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/socket.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/pagemap.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/in.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <net/sock.h> | ||
18 | #include <linux/sunrpc/xdr.h> | ||
19 | #include <linux/sunrpc/msg_prot.h> | ||
20 | |||
21 | /* | ||
22 | * XDR functions for basic NFS types | ||
23 | */ | ||
24 | u32 * | ||
25 | xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj) | ||
26 | { | ||
27 | unsigned int quadlen = XDR_QUADLEN(obj->len); | ||
28 | |||
29 | p[quadlen] = 0; /* zero trailing bytes */ | ||
30 | *p++ = htonl(obj->len); | ||
31 | memcpy(p, obj->data, obj->len); | ||
32 | return p + XDR_QUADLEN(obj->len); | ||
33 | } | ||
34 | |||
35 | u32 * | ||
36 | xdr_decode_netobj(u32 *p, struct xdr_netobj *obj) | ||
37 | { | ||
38 | unsigned int len; | ||
39 | |||
40 | if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ) | ||
41 | return NULL; | ||
42 | obj->len = len; | ||
43 | obj->data = (u8 *) p; | ||
44 | return p + XDR_QUADLEN(len); | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * xdr_encode_opaque_fixed - Encode fixed length opaque data | ||
49 | * @p - pointer to current position in XDR buffer. | ||
50 | * @ptr - pointer to data to encode (or NULL) | ||
51 | * @nbytes - size of data. | ||
52 | * | ||
53 | * Copy the array of data of length nbytes at ptr to the XDR buffer | ||
54 | * at position p, then align to the next 32-bit boundary by padding | ||
55 | * with zero bytes (see RFC1832). | ||
56 | * Note: if ptr is NULL, only the padding is performed. | ||
57 | * | ||
58 | * Returns the updated current XDR buffer position | ||
59 | * | ||
60 | */ | ||
61 | u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes) | ||
62 | { | ||
63 | if (likely(nbytes != 0)) { | ||
64 | unsigned int quadlen = XDR_QUADLEN(nbytes); | ||
65 | unsigned int padding = (quadlen << 2) - nbytes; | ||
66 | |||
67 | if (ptr != NULL) | ||
68 | memcpy(p, ptr, nbytes); | ||
69 | if (padding != 0) | ||
70 | memset((char *)p + nbytes, 0, padding); | ||
71 | p += quadlen; | ||
72 | } | ||
73 | return p; | ||
74 | } | ||
75 | EXPORT_SYMBOL(xdr_encode_opaque_fixed); | ||
76 | |||
77 | /** | ||
78 | * xdr_encode_opaque - Encode variable length opaque data | ||
79 | * @p - pointer to current position in XDR buffer. | ||
80 | * @ptr - pointer to data to encode (or NULL) | ||
81 | * @nbytes - size of data. | ||
82 | * | ||
83 | * Returns the updated current XDR buffer position | ||
84 | */ | ||
85 | u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes) | ||
86 | { | ||
87 | *p++ = htonl(nbytes); | ||
88 | return xdr_encode_opaque_fixed(p, ptr, nbytes); | ||
89 | } | ||
90 | EXPORT_SYMBOL(xdr_encode_opaque); | ||
91 | |||
92 | u32 * | ||
93 | xdr_encode_string(u32 *p, const char *string) | ||
94 | { | ||
95 | return xdr_encode_array(p, string, strlen(string)); | ||
96 | } | ||
97 | |||
98 | u32 * | ||
99 | xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen) | ||
100 | { | ||
101 | unsigned int len; | ||
102 | char *string; | ||
103 | |||
104 | if ((len = ntohl(*p++)) > maxlen) | ||
105 | return NULL; | ||
106 | if (lenp) | ||
107 | *lenp = len; | ||
108 | if ((len % 4) != 0) { | ||
109 | string = (char *) p; | ||
110 | } else { | ||
111 | string = (char *) (p - 1); | ||
112 | memmove(string, p, len); | ||
113 | } | ||
114 | string[len] = '\0'; | ||
115 | *sp = string; | ||
116 | return p + XDR_QUADLEN(len); | ||
117 | } | ||
118 | |||
119 | u32 * | ||
120 | xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) | ||
121 | { | ||
122 | unsigned int len; | ||
123 | |||
124 | if ((len = ntohl(*p++)) > maxlen) | ||
125 | return NULL; | ||
126 | *lenp = len; | ||
127 | *sp = (char *) p; | ||
128 | return p + XDR_QUADLEN(len); | ||
129 | } | ||
130 | |||
131 | void | ||
132 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, | ||
133 | unsigned int len) | ||
134 | { | ||
135 | struct kvec *tail = xdr->tail; | ||
136 | u32 *p; | ||
137 | |||
138 | xdr->pages = pages; | ||
139 | xdr->page_base = base; | ||
140 | xdr->page_len = len; | ||
141 | |||
142 | p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len); | ||
143 | tail->iov_base = p; | ||
144 | tail->iov_len = 0; | ||
145 | |||
146 | if (len & 3) { | ||
147 | unsigned int pad = 4 - (len & 3); | ||
148 | |||
149 | *p = 0; | ||
150 | tail->iov_base = (char *)p + (len & 3); | ||
151 | tail->iov_len = pad; | ||
152 | len += pad; | ||
153 | } | ||
154 | xdr->buflen += len; | ||
155 | xdr->len += len; | ||
156 | } | ||
157 | |||
158 | void | ||
159 | xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, | ||
160 | struct page **pages, unsigned int base, unsigned int len) | ||
161 | { | ||
162 | struct kvec *head = xdr->head; | ||
163 | struct kvec *tail = xdr->tail; | ||
164 | char *buf = (char *)head->iov_base; | ||
165 | unsigned int buflen = head->iov_len; | ||
166 | |||
167 | head->iov_len = offset; | ||
168 | |||
169 | xdr->pages = pages; | ||
170 | xdr->page_base = base; | ||
171 | xdr->page_len = len; | ||
172 | |||
173 | tail->iov_base = buf + offset; | ||
174 | tail->iov_len = buflen - offset; | ||
175 | |||
176 | xdr->buflen += len; | ||
177 | } | ||
178 | |||
179 | void | ||
180 | xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, | ||
181 | skb_reader_t *desc, | ||
182 | skb_read_actor_t copy_actor) | ||
183 | { | ||
184 | struct page **ppage = xdr->pages; | ||
185 | unsigned int len, pglen = xdr->page_len; | ||
186 | int ret; | ||
187 | |||
188 | len = xdr->head[0].iov_len; | ||
189 | if (base < len) { | ||
190 | len -= base; | ||
191 | ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); | ||
192 | if (ret != len || !desc->count) | ||
193 | return; | ||
194 | base = 0; | ||
195 | } else | ||
196 | base -= len; | ||
197 | |||
198 | if (pglen == 0) | ||
199 | goto copy_tail; | ||
200 | if (base >= pglen) { | ||
201 | base -= pglen; | ||
202 | goto copy_tail; | ||
203 | } | ||
204 | if (base || xdr->page_base) { | ||
205 | pglen -= base; | ||
206 | base += xdr->page_base; | ||
207 | ppage += base >> PAGE_CACHE_SHIFT; | ||
208 | base &= ~PAGE_CACHE_MASK; | ||
209 | } | ||
210 | do { | ||
211 | char *kaddr; | ||
212 | |||
213 | len = PAGE_CACHE_SIZE; | ||
214 | kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); | ||
215 | if (base) { | ||
216 | len -= base; | ||
217 | if (pglen < len) | ||
218 | len = pglen; | ||
219 | ret = copy_actor(desc, kaddr + base, len); | ||
220 | base = 0; | ||
221 | } else { | ||
222 | if (pglen < len) | ||
223 | len = pglen; | ||
224 | ret = copy_actor(desc, kaddr, len); | ||
225 | } | ||
226 | flush_dcache_page(*ppage); | ||
227 | kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); | ||
228 | if (ret != len || !desc->count) | ||
229 | return; | ||
230 | ppage++; | ||
231 | } while ((pglen -= len) != 0); | ||
232 | copy_tail: | ||
233 | len = xdr->tail[0].iov_len; | ||
234 | if (base < len) | ||
235 | copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); | ||
236 | } | ||
237 | |||
238 | |||
239 | int | ||
240 | xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, | ||
241 | struct xdr_buf *xdr, unsigned int base, int msgflags) | ||
242 | { | ||
243 | struct page **ppage = xdr->pages; | ||
244 | unsigned int len, pglen = xdr->page_len; | ||
245 | int err, ret = 0; | ||
246 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | ||
247 | |||
248 | len = xdr->head[0].iov_len; | ||
249 | if (base < len || (addr != NULL && base == 0)) { | ||
250 | struct kvec iov = { | ||
251 | .iov_base = xdr->head[0].iov_base + base, | ||
252 | .iov_len = len - base, | ||
253 | }; | ||
254 | struct msghdr msg = { | ||
255 | .msg_name = addr, | ||
256 | .msg_namelen = addrlen, | ||
257 | .msg_flags = msgflags, | ||
258 | }; | ||
259 | if (xdr->len > len) | ||
260 | msg.msg_flags |= MSG_MORE; | ||
261 | |||
262 | if (iov.iov_len != 0) | ||
263 | err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | ||
264 | else | ||
265 | err = kernel_sendmsg(sock, &msg, NULL, 0, 0); | ||
266 | if (ret == 0) | ||
267 | ret = err; | ||
268 | else if (err > 0) | ||
269 | ret += err; | ||
270 | if (err != iov.iov_len) | ||
271 | goto out; | ||
272 | base = 0; | ||
273 | } else | ||
274 | base -= len; | ||
275 | |||
276 | if (pglen == 0) | ||
277 | goto copy_tail; | ||
278 | if (base >= pglen) { | ||
279 | base -= pglen; | ||
280 | goto copy_tail; | ||
281 | } | ||
282 | if (base || xdr->page_base) { | ||
283 | pglen -= base; | ||
284 | base += xdr->page_base; | ||
285 | ppage += base >> PAGE_CACHE_SHIFT; | ||
286 | base &= ~PAGE_CACHE_MASK; | ||
287 | } | ||
288 | |||
289 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | ||
290 | do { | ||
291 | int flags = msgflags; | ||
292 | |||
293 | len = PAGE_CACHE_SIZE; | ||
294 | if (base) | ||
295 | len -= base; | ||
296 | if (pglen < len) | ||
297 | len = pglen; | ||
298 | |||
299 | if (pglen != len || xdr->tail[0].iov_len != 0) | ||
300 | flags |= MSG_MORE; | ||
301 | |||
302 | /* Hmm... We might be dealing with highmem pages */ | ||
303 | if (PageHighMem(*ppage)) | ||
304 | sendpage = sock_no_sendpage; | ||
305 | err = sendpage(sock, *ppage, base, len, flags); | ||
306 | if (ret == 0) | ||
307 | ret = err; | ||
308 | else if (err > 0) | ||
309 | ret += err; | ||
310 | if (err != len) | ||
311 | goto out; | ||
312 | base = 0; | ||
313 | ppage++; | ||
314 | } while ((pglen -= len) != 0); | ||
315 | copy_tail: | ||
316 | len = xdr->tail[0].iov_len; | ||
317 | if (base < len) { | ||
318 | struct kvec iov = { | ||
319 | .iov_base = xdr->tail[0].iov_base + base, | ||
320 | .iov_len = len - base, | ||
321 | }; | ||
322 | struct msghdr msg = { | ||
323 | .msg_flags = msgflags, | ||
324 | }; | ||
325 | err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | ||
326 | if (ret == 0) | ||
327 | ret = err; | ||
328 | else if (err > 0) | ||
329 | ret += err; | ||
330 | } | ||
331 | out: | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | |||
336 | /* | ||
337 | * Helper routines for doing 'memmove' like operations on a struct xdr_buf | ||
338 | * | ||
339 | * _shift_data_right_pages | ||
340 | * @pages: vector of pages containing both the source and dest memory area. | ||
341 | * @pgto_base: page vector address of destination | ||
342 | * @pgfrom_base: page vector address of source | ||
343 | * @len: number of bytes to copy | ||
344 | * | ||
345 | * Note: the addresses pgto_base and pgfrom_base are both calculated in | ||
346 | * the same way: | ||
347 | * if a memory area starts at byte 'base' in page 'pages[i]', | ||
348 | * then its address is given as (i << PAGE_CACHE_SHIFT) + base | ||
349 | * Also note: pgfrom_base must be < pgto_base, but the memory areas | ||
350 | * they point to may overlap. | ||
351 | */ | ||
352 | static void | ||
353 | _shift_data_right_pages(struct page **pages, size_t pgto_base, | ||
354 | size_t pgfrom_base, size_t len) | ||
355 | { | ||
356 | struct page **pgfrom, **pgto; | ||
357 | char *vfrom, *vto; | ||
358 | size_t copy; | ||
359 | |||
360 | BUG_ON(pgto_base <= pgfrom_base); | ||
361 | |||
362 | pgto_base += len; | ||
363 | pgfrom_base += len; | ||
364 | |||
365 | pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); | ||
366 | pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); | ||
367 | |||
368 | pgto_base &= ~PAGE_CACHE_MASK; | ||
369 | pgfrom_base &= ~PAGE_CACHE_MASK; | ||
370 | |||
371 | do { | ||
372 | /* Are any pointers crossing a page boundary? */ | ||
373 | if (pgto_base == 0) { | ||
374 | flush_dcache_page(*pgto); | ||
375 | pgto_base = PAGE_CACHE_SIZE; | ||
376 | pgto--; | ||
377 | } | ||
378 | if (pgfrom_base == 0) { | ||
379 | pgfrom_base = PAGE_CACHE_SIZE; | ||
380 | pgfrom--; | ||
381 | } | ||
382 | |||
383 | copy = len; | ||
384 | if (copy > pgto_base) | ||
385 | copy = pgto_base; | ||
386 | if (copy > pgfrom_base) | ||
387 | copy = pgfrom_base; | ||
388 | pgto_base -= copy; | ||
389 | pgfrom_base -= copy; | ||
390 | |||
391 | vto = kmap_atomic(*pgto, KM_USER0); | ||
392 | vfrom = kmap_atomic(*pgfrom, KM_USER1); | ||
393 | memmove(vto + pgto_base, vfrom + pgfrom_base, copy); | ||
394 | kunmap_atomic(vfrom, KM_USER1); | ||
395 | kunmap_atomic(vto, KM_USER0); | ||
396 | |||
397 | } while ((len -= copy) != 0); | ||
398 | flush_dcache_page(*pgto); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * _copy_to_pages | ||
403 | * @pages: array of pages | ||
404 | * @pgbase: page vector address of destination | ||
405 | * @p: pointer to source data | ||
406 | * @len: length | ||
407 | * | ||
408 | * Copies data from an arbitrary memory location into an array of pages | ||
409 | * The copy is assumed to be non-overlapping. | ||
410 | */ | ||
411 | static void | ||
412 | _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) | ||
413 | { | ||
414 | struct page **pgto; | ||
415 | char *vto; | ||
416 | size_t copy; | ||
417 | |||
418 | pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); | ||
419 | pgbase &= ~PAGE_CACHE_MASK; | ||
420 | |||
421 | do { | ||
422 | copy = PAGE_CACHE_SIZE - pgbase; | ||
423 | if (copy > len) | ||
424 | copy = len; | ||
425 | |||
426 | vto = kmap_atomic(*pgto, KM_USER0); | ||
427 | memcpy(vto + pgbase, p, copy); | ||
428 | kunmap_atomic(vto, KM_USER0); | ||
429 | |||
430 | pgbase += copy; | ||
431 | if (pgbase == PAGE_CACHE_SIZE) { | ||
432 | flush_dcache_page(*pgto); | ||
433 | pgbase = 0; | ||
434 | pgto++; | ||
435 | } | ||
436 | p += copy; | ||
437 | |||
438 | } while ((len -= copy) != 0); | ||
439 | flush_dcache_page(*pgto); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * _copy_from_pages | ||
444 | * @p: pointer to destination | ||
445 | * @pages: array of pages | ||
446 | * @pgbase: offset of source data | ||
447 | * @len: length | ||
448 | * | ||
449 | * Copies data into an arbitrary memory location from an array of pages | ||
450 | * The copy is assumed to be non-overlapping. | ||
451 | */ | ||
452 | static void | ||
453 | _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) | ||
454 | { | ||
455 | struct page **pgfrom; | ||
456 | char *vfrom; | ||
457 | size_t copy; | ||
458 | |||
459 | pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); | ||
460 | pgbase &= ~PAGE_CACHE_MASK; | ||
461 | |||
462 | do { | ||
463 | copy = PAGE_CACHE_SIZE - pgbase; | ||
464 | if (copy > len) | ||
465 | copy = len; | ||
466 | |||
467 | vfrom = kmap_atomic(*pgfrom, KM_USER0); | ||
468 | memcpy(p, vfrom + pgbase, copy); | ||
469 | kunmap_atomic(vfrom, KM_USER0); | ||
470 | |||
471 | pgbase += copy; | ||
472 | if (pgbase == PAGE_CACHE_SIZE) { | ||
473 | pgbase = 0; | ||
474 | pgfrom++; | ||
475 | } | ||
476 | p += copy; | ||
477 | |||
478 | } while ((len -= copy) != 0); | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * xdr_shrink_bufhead | ||
483 | * @buf: xdr_buf | ||
484 | * @len: bytes to remove from buf->head[0] | ||
485 | * | ||
486 | * Shrinks XDR buffer's header kvec buf->head[0] by | ||
487 | * 'len' bytes. The extra data is not lost, but is instead | ||
488 | * moved into the inlined pages and/or the tail. | ||
489 | */ | ||
490 | static void | ||
491 | xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) | ||
492 | { | ||
493 | struct kvec *head, *tail; | ||
494 | size_t copy, offs; | ||
495 | unsigned int pglen = buf->page_len; | ||
496 | |||
497 | tail = buf->tail; | ||
498 | head = buf->head; | ||
499 | BUG_ON (len > head->iov_len); | ||
500 | |||
501 | /* Shift the tail first */ | ||
502 | if (tail->iov_len != 0) { | ||
503 | if (tail->iov_len > len) { | ||
504 | copy = tail->iov_len - len; | ||
505 | memmove((char *)tail->iov_base + len, | ||
506 | tail->iov_base, copy); | ||
507 | } | ||
508 | /* Copy from the inlined pages into the tail */ | ||
509 | copy = len; | ||
510 | if (copy > pglen) | ||
511 | copy = pglen; | ||
512 | offs = len - copy; | ||
513 | if (offs >= tail->iov_len) | ||
514 | copy = 0; | ||
515 | else if (copy > tail->iov_len - offs) | ||
516 | copy = tail->iov_len - offs; | ||
517 | if (copy != 0) | ||
518 | _copy_from_pages((char *)tail->iov_base + offs, | ||
519 | buf->pages, | ||
520 | buf->page_base + pglen + offs - len, | ||
521 | copy); | ||
522 | /* Do we also need to copy data from the head into the tail ? */ | ||
523 | if (len > pglen) { | ||
524 | offs = copy = len - pglen; | ||
525 | if (copy > tail->iov_len) | ||
526 | copy = tail->iov_len; | ||
527 | memcpy(tail->iov_base, | ||
528 | (char *)head->iov_base + | ||
529 | head->iov_len - offs, | ||
530 | copy); | ||
531 | } | ||
532 | } | ||
533 | /* Now handle pages */ | ||
534 | if (pglen != 0) { | ||
535 | if (pglen > len) | ||
536 | _shift_data_right_pages(buf->pages, | ||
537 | buf->page_base + len, | ||
538 | buf->page_base, | ||
539 | pglen - len); | ||
540 | copy = len; | ||
541 | if (len > pglen) | ||
542 | copy = pglen; | ||
543 | _copy_to_pages(buf->pages, buf->page_base, | ||
544 | (char *)head->iov_base + head->iov_len - len, | ||
545 | copy); | ||
546 | } | ||
547 | head->iov_len -= len; | ||
548 | buf->buflen -= len; | ||
549 | /* Have we truncated the message? */ | ||
550 | if (buf->len > buf->buflen) | ||
551 | buf->len = buf->buflen; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * xdr_shrink_pagelen | ||
556 | * @buf: xdr_buf | ||
557 | * @len: bytes to remove from buf->pages | ||
558 | * | ||
559 | * Shrinks XDR buffer's page array buf->pages by | ||
560 | * 'len' bytes. The extra data is not lost, but is instead | ||
561 | * moved into the tail. | ||
562 | */ | ||
563 | static void | ||
564 | xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) | ||
565 | { | ||
566 | struct kvec *tail; | ||
567 | size_t copy; | ||
568 | char *p; | ||
569 | unsigned int pglen = buf->page_len; | ||
570 | |||
571 | tail = buf->tail; | ||
572 | BUG_ON (len > pglen); | ||
573 | |||
574 | /* Shift the tail first */ | ||
575 | if (tail->iov_len != 0) { | ||
576 | p = (char *)tail->iov_base + len; | ||
577 | if (tail->iov_len > len) { | ||
578 | copy = tail->iov_len - len; | ||
579 | memmove(p, tail->iov_base, copy); | ||
580 | } else | ||
581 | buf->buflen -= len; | ||
582 | /* Copy from the inlined pages into the tail */ | ||
583 | copy = len; | ||
584 | if (copy > tail->iov_len) | ||
585 | copy = tail->iov_len; | ||
586 | _copy_from_pages((char *)tail->iov_base, | ||
587 | buf->pages, buf->page_base + pglen - len, | ||
588 | copy); | ||
589 | } | ||
590 | buf->page_len -= len; | ||
591 | buf->buflen -= len; | ||
592 | /* Have we truncated the message? */ | ||
593 | if (buf->len > buf->buflen) | ||
594 | buf->len = buf->buflen; | ||
595 | } | ||
596 | |||
597 | void | ||
598 | xdr_shift_buf(struct xdr_buf *buf, size_t len) | ||
599 | { | ||
600 | xdr_shrink_bufhead(buf, len); | ||
601 | } | ||
602 | |||
603 | /** | ||
604 | * xdr_init_encode - Initialize a struct xdr_stream for sending data. | ||
605 | * @xdr: pointer to xdr_stream struct | ||
606 | * @buf: pointer to XDR buffer in which to encode data | ||
607 | * @p: current pointer inside XDR buffer | ||
608 | * | ||
609 | * Note: at the moment the RPC client only passes the length of our | ||
610 | * scratch buffer in the xdr_buf's header kvec. Previously this | ||
611 | * meant we needed to call xdr_adjust_iovec() after encoding the | ||
612 | * data. With the new scheme, the xdr_stream manages the details | ||
613 | * of the buffer length, and takes care of adjusting the kvec | ||
614 | * length for us. | ||
615 | */ | ||
616 | void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) | ||
617 | { | ||
618 | struct kvec *iov = buf->head; | ||
619 | |||
620 | xdr->buf = buf; | ||
621 | xdr->iov = iov; | ||
622 | xdr->end = (uint32_t *)((char *)iov->iov_base + iov->iov_len); | ||
623 | buf->len = iov->iov_len = (char *)p - (char *)iov->iov_base; | ||
624 | xdr->p = p; | ||
625 | } | ||
626 | EXPORT_SYMBOL(xdr_init_encode); | ||
627 | |||
628 | /** | ||
629 | * xdr_reserve_space - Reserve buffer space for sending | ||
630 | * @xdr: pointer to xdr_stream | ||
631 | * @nbytes: number of bytes to reserve | ||
632 | * | ||
633 | * Checks that we have enough buffer space to encode 'nbytes' more | ||
634 | * bytes of data. If so, update the total xdr_buf length, and | ||
635 | * adjust the length of the current kvec. | ||
636 | */ | ||
637 | uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) | ||
638 | { | ||
639 | uint32_t *p = xdr->p; | ||
640 | uint32_t *q; | ||
641 | |||
642 | /* align nbytes on the next 32-bit boundary */ | ||
643 | nbytes += 3; | ||
644 | nbytes &= ~3; | ||
645 | q = p + (nbytes >> 2); | ||
646 | if (unlikely(q > xdr->end || q < p)) | ||
647 | return NULL; | ||
648 | xdr->p = q; | ||
649 | xdr->iov->iov_len += nbytes; | ||
650 | xdr->buf->len += nbytes; | ||
651 | return p; | ||
652 | } | ||
653 | EXPORT_SYMBOL(xdr_reserve_space); | ||
654 | |||
655 | /** | ||
656 | * xdr_write_pages - Insert a list of pages into an XDR buffer for sending | ||
657 | * @xdr: pointer to xdr_stream | ||
658 | * @pages: list of pages | ||
659 | * @base: offset of first byte | ||
660 | * @len: length of data in bytes | ||
661 | * | ||
662 | */ | ||
663 | void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, | ||
664 | unsigned int len) | ||
665 | { | ||
666 | struct xdr_buf *buf = xdr->buf; | ||
667 | struct kvec *iov = buf->tail; | ||
668 | buf->pages = pages; | ||
669 | buf->page_base = base; | ||
670 | buf->page_len = len; | ||
671 | |||
672 | iov->iov_base = (char *)xdr->p; | ||
673 | iov->iov_len = 0; | ||
674 | xdr->iov = iov; | ||
675 | |||
676 | if (len & 3) { | ||
677 | unsigned int pad = 4 - (len & 3); | ||
678 | |||
679 | BUG_ON(xdr->p >= xdr->end); | ||
680 | iov->iov_base = (char *)xdr->p + (len & 3); | ||
681 | iov->iov_len += pad; | ||
682 | len += pad; | ||
683 | *xdr->p++ = 0; | ||
684 | } | ||
685 | buf->buflen += len; | ||
686 | buf->len += len; | ||
687 | } | ||
688 | EXPORT_SYMBOL(xdr_write_pages); | ||
689 | |||
690 | /** | ||
691 | * xdr_init_decode - Initialize an xdr_stream for decoding data. | ||
692 | * @xdr: pointer to xdr_stream struct | ||
693 | * @buf: pointer to XDR buffer from which to decode data | ||
694 | * @p: current pointer inside XDR buffer | ||
695 | */ | ||
696 | void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) | ||
697 | { | ||
698 | struct kvec *iov = buf->head; | ||
699 | unsigned int len = iov->iov_len; | ||
700 | |||
701 | if (len > buf->len) | ||
702 | len = buf->len; | ||
703 | xdr->buf = buf; | ||
704 | xdr->iov = iov; | ||
705 | xdr->p = p; | ||
706 | xdr->end = (uint32_t *)((char *)iov->iov_base + len); | ||
707 | } | ||
708 | EXPORT_SYMBOL(xdr_init_decode); | ||
709 | |||
710 | /** | ||
711 | * xdr_inline_decode - Retrieve non-page XDR data to decode | ||
712 | * @xdr: pointer to xdr_stream struct | ||
713 | * @nbytes: number of bytes of data to decode | ||
714 | * | ||
715 | * Check if the input buffer is long enough to enable us to decode | ||
716 | * 'nbytes' more bytes of data starting at the current position. | ||
717 | * If so return the current pointer, then update the current | ||
718 | * pointer position. | ||
719 | */ | ||
720 | uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) | ||
721 | { | ||
722 | uint32_t *p = xdr->p; | ||
723 | uint32_t *q = p + XDR_QUADLEN(nbytes); | ||
724 | |||
725 | if (unlikely(q > xdr->end || q < p)) | ||
726 | return NULL; | ||
727 | xdr->p = q; | ||
728 | return p; | ||
729 | } | ||
730 | EXPORT_SYMBOL(xdr_inline_decode); | ||
731 | |||
732 | /** | ||
733 | * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position | ||
734 | * @xdr: pointer to xdr_stream struct | ||
735 | * @len: number of bytes of page data | ||
736 | * | ||
737 | * Moves data beyond the current pointer position from the XDR head[] buffer | ||
738 | * into the page list. Any data that lies beyond current position + "len" | ||
739 | * bytes is moved into the XDR tail[]. The current pointer is then | ||
740 | * repositioned at the beginning of the XDR tail. | ||
741 | */ | ||
742 | void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) | ||
743 | { | ||
744 | struct xdr_buf *buf = xdr->buf; | ||
745 | struct kvec *iov; | ||
746 | ssize_t shift; | ||
747 | unsigned int end; | ||
748 | int padding; | ||
749 | |||
750 | /* Realign pages to current pointer position */ | ||
751 | iov = buf->head; | ||
752 | shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p; | ||
753 | if (shift > 0) | ||
754 | xdr_shrink_bufhead(buf, shift); | ||
755 | |||
756 | /* Truncate page data and move it into the tail */ | ||
757 | if (buf->page_len > len) | ||
758 | xdr_shrink_pagelen(buf, buf->page_len - len); | ||
759 | padding = (XDR_QUADLEN(len) << 2) - len; | ||
760 | xdr->iov = iov = buf->tail; | ||
761 | /* Compute remaining message length. */ | ||
762 | end = iov->iov_len; | ||
763 | shift = buf->buflen - buf->len; | ||
764 | if (shift < end) | ||
765 | end -= shift; | ||
766 | else if (shift > 0) | ||
767 | end = 0; | ||
768 | /* | ||
769 | * Position current pointer at beginning of tail, and | ||
770 | * set remaining message length. | ||
771 | */ | ||
772 | xdr->p = (uint32_t *)((char *)iov->iov_base + padding); | ||
773 | xdr->end = (uint32_t *)((char *)iov->iov_base + end); | ||
774 | } | ||
775 | EXPORT_SYMBOL(xdr_read_pages); | ||
776 | |||
777 | static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; | ||
778 | |||
779 | void | ||
780 | xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) | ||
781 | { | ||
782 | buf->head[0] = *iov; | ||
783 | buf->tail[0] = empty_iov; | ||
784 | buf->page_len = 0; | ||
785 | buf->buflen = buf->len = iov->iov_len; | ||
786 | } | ||
787 | |||
788 | /* Sets subiov to the intersection of iov with the buffer of length len | ||
789 | * starting base bytes after iov. Indicates empty intersection by setting | ||
790 | * length of subiov to zero. Decrements len by length of subiov, sets base | ||
791 | * to zero (or decrements it by length of iov if subiov is empty). */ | ||
792 | static void | ||
793 | iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len) | ||
794 | { | ||
795 | if (*base > iov->iov_len) { | ||
796 | subiov->iov_base = NULL; | ||
797 | subiov->iov_len = 0; | ||
798 | *base -= iov->iov_len; | ||
799 | } else { | ||
800 | subiov->iov_base = iov->iov_base + *base; | ||
801 | subiov->iov_len = min(*len, (int)iov->iov_len - *base); | ||
802 | *base = 0; | ||
803 | } | ||
804 | *len -= subiov->iov_len; | ||
805 | } | ||
806 | |||
807 | /* Sets subbuf to the portion of buf of length len beginning base bytes | ||
808 | * from the start of buf. Returns -1 if base of length are out of bounds. */ | ||
809 | int | ||
810 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | ||
811 | int base, int len) | ||
812 | { | ||
813 | int i; | ||
814 | |||
815 | subbuf->buflen = subbuf->len = len; | ||
816 | iov_subsegment(buf->head, subbuf->head, &base, &len); | ||
817 | |||
818 | if (base < buf->page_len) { | ||
819 | i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; | ||
820 | subbuf->pages = &buf->pages[i]; | ||
821 | subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; | ||
822 | subbuf->page_len = min((int)buf->page_len - base, len); | ||
823 | len -= subbuf->page_len; | ||
824 | base = 0; | ||
825 | } else { | ||
826 | base -= buf->page_len; | ||
827 | subbuf->page_len = 0; | ||
828 | } | ||
829 | |||
830 | iov_subsegment(buf->tail, subbuf->tail, &base, &len); | ||
831 | if (base || len) | ||
832 | return -1; | ||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | /* obj is assumed to point to allocated memory of size at least len: */ | ||
837 | int | ||
838 | read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | ||
839 | { | ||
840 | struct xdr_buf subbuf; | ||
841 | int this_len; | ||
842 | int status; | ||
843 | |||
844 | status = xdr_buf_subsegment(buf, &subbuf, base, len); | ||
845 | if (status) | ||
846 | goto out; | ||
847 | this_len = min(len, (int)subbuf.head[0].iov_len); | ||
848 | memcpy(obj, subbuf.head[0].iov_base, this_len); | ||
849 | len -= this_len; | ||
850 | obj += this_len; | ||
851 | this_len = min(len, (int)subbuf.page_len); | ||
852 | if (this_len) | ||
853 | _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); | ||
854 | len -= this_len; | ||
855 | obj += this_len; | ||
856 | this_len = min(len, (int)subbuf.tail[0].iov_len); | ||
857 | memcpy(obj, subbuf.tail[0].iov_base, this_len); | ||
858 | out: | ||
859 | return status; | ||
860 | } | ||
861 | |||
862 | static int | ||
863 | read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) | ||
864 | { | ||
865 | u32 raw; | ||
866 | int status; | ||
867 | |||
868 | status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); | ||
869 | if (status) | ||
870 | return status; | ||
871 | *obj = ntohl(raw); | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | /* If the netobj starting offset bytes from the start of xdr_buf is contained | ||
876 | * entirely in the head or the tail, set object to point to it; otherwise | ||
877 | * try to find space for it at the end of the tail, copy it there, and | ||
878 | * set obj to point to it. */ | ||
879 | int | ||
880 | xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset) | ||
881 | { | ||
882 | u32 tail_offset = buf->head[0].iov_len + buf->page_len; | ||
883 | u32 obj_end_offset; | ||
884 | |||
885 | if (read_u32_from_xdr_buf(buf, offset, &obj->len)) | ||
886 | goto out; | ||
887 | obj_end_offset = offset + 4 + obj->len; | ||
888 | |||
889 | if (obj_end_offset <= buf->head[0].iov_len) { | ||
890 | /* The obj is contained entirely in the head: */ | ||
891 | obj->data = buf->head[0].iov_base + offset + 4; | ||
892 | } else if (offset + 4 >= tail_offset) { | ||
893 | if (obj_end_offset - tail_offset | ||
894 | > buf->tail[0].iov_len) | ||
895 | goto out; | ||
896 | /* The obj is contained entirely in the tail: */ | ||
897 | obj->data = buf->tail[0].iov_base | ||
898 | + offset - tail_offset + 4; | ||
899 | } else { | ||
900 | /* use end of tail as storage for obj: | ||
901 | * (We don't copy to the beginning because then we'd have | ||
902 | * to worry about doing a potentially overlapping copy. | ||
903 | * This assumes the object is at most half the length of the | ||
904 | * tail.) */ | ||
905 | if (obj->len > buf->tail[0].iov_len) | ||
906 | goto out; | ||
907 | obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len - | ||
908 | obj->len; | ||
909 | if (read_bytes_from_xdr_buf(buf, offset + 4, | ||
910 | obj->data, obj->len)) | ||
911 | goto out; | ||
912 | |||
913 | } | ||
914 | return 0; | ||
915 | out: | ||
916 | return -1; | ||
917 | } | ||