diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/svc.c | 154 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 101 |
2 files changed, 181 insertions, 74 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 8353829bc5c6..b4db53ff1435 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -27,22 +27,26 @@ | |||
27 | 27 | ||
28 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | 28 | #define RPCDBG_FACILITY RPCDBG_SVCDSP |
29 | 29 | ||
30 | #define svc_serv_is_pooled(serv) ((serv)->sv_function) | ||
31 | |||
30 | /* | 32 | /* |
31 | * Mode for mapping cpus to pools. | 33 | * Mode for mapping cpus to pools. |
32 | */ | 34 | */ |
33 | enum { | 35 | enum { |
34 | SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */ | 36 | SVC_POOL_AUTO = -1, /* choose one of the others */ |
35 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool | 37 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool |
36 | * (legacy & UP mode) */ | 38 | * (legacy & UP mode) */ |
37 | SVC_POOL_PERCPU, /* one pool per cpu */ | 39 | SVC_POOL_PERCPU, /* one pool per cpu */ |
38 | SVC_POOL_PERNODE /* one pool per numa node */ | 40 | SVC_POOL_PERNODE /* one pool per numa node */ |
39 | }; | 41 | }; |
42 | #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL | ||
40 | 43 | ||
41 | /* | 44 | /* |
42 | * Structure for mapping cpus to pools and vice versa. | 45 | * Structure for mapping cpus to pools and vice versa. |
43 | * Setup once during sunrpc initialisation. | 46 | * Setup once during sunrpc initialisation. |
44 | */ | 47 | */ |
45 | static struct svc_pool_map { | 48 | static struct svc_pool_map { |
49 | int count; /* How many svc_servs use us */ | ||
46 | int mode; /* Note: int not enum to avoid | 50 | int mode; /* Note: int not enum to avoid |
47 | * warnings about "enumeration value | 51 | * warnings about "enumeration value |
48 | * not handled in switch" */ | 52 | * not handled in switch" */ |
@@ -50,9 +54,63 @@ static struct svc_pool_map { | |||
50 | unsigned int *pool_to; /* maps pool id to cpu or node */ | 54 | unsigned int *pool_to; /* maps pool id to cpu or node */ |
51 | unsigned int *to_pool; /* maps cpu or node to pool id */ | 55 | unsigned int *to_pool; /* maps cpu or node to pool id */ |
52 | } svc_pool_map = { | 56 | } svc_pool_map = { |
53 | .mode = SVC_POOL_NONE | 57 | .count = 0, |
58 | .mode = SVC_POOL_DEFAULT | ||
54 | }; | 59 | }; |
60 | static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ | ||
61 | |||
62 | static int | ||
63 | param_set_pool_mode(const char *val, struct kernel_param *kp) | ||
64 | { | ||
65 | int *ip = (int *)kp->arg; | ||
66 | struct svc_pool_map *m = &svc_pool_map; | ||
67 | int err; | ||
68 | |||
69 | mutex_lock(&svc_pool_map_mutex); | ||
70 | |||
71 | err = -EBUSY; | ||
72 | if (m->count) | ||
73 | goto out; | ||
74 | |||
75 | err = 0; | ||
76 | if (!strncmp(val, "auto", 4)) | ||
77 | *ip = SVC_POOL_AUTO; | ||
78 | else if (!strncmp(val, "global", 6)) | ||
79 | *ip = SVC_POOL_GLOBAL; | ||
80 | else if (!strncmp(val, "percpu", 6)) | ||
81 | *ip = SVC_POOL_PERCPU; | ||
82 | else if (!strncmp(val, "pernode", 7)) | ||
83 | *ip = SVC_POOL_PERNODE; | ||
84 | else | ||
85 | err = -EINVAL; | ||
86 | |||
87 | out: | ||
88 | mutex_unlock(&svc_pool_map_mutex); | ||
89 | return err; | ||
90 | } | ||
55 | 91 | ||
92 | static int | ||
93 | param_get_pool_mode(char *buf, struct kernel_param *kp) | ||
94 | { | ||
95 | int *ip = (int *)kp->arg; | ||
96 | |||
97 | switch (*ip) | ||
98 | { | ||
99 | case SVC_POOL_AUTO: | ||
100 | return strlcpy(buf, "auto", 20); | ||
101 | case SVC_POOL_GLOBAL: | ||
102 | return strlcpy(buf, "global", 20); | ||
103 | case SVC_POOL_PERCPU: | ||
104 | return strlcpy(buf, "percpu", 20); | ||
105 | case SVC_POOL_PERNODE: | ||
106 | return strlcpy(buf, "pernode", 20); | ||
107 | default: | ||
108 | return sprintf(buf, "%d", *ip); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, | ||
113 | &svc_pool_map.mode, 0644); | ||
56 | 114 | ||
57 | /* | 115 | /* |
58 | * Detect best pool mapping mode heuristically, | 116 | * Detect best pool mapping mode heuristically, |
@@ -166,18 +224,25 @@ svc_pool_map_init_pernode(struct svc_pool_map *m) | |||
166 | 224 | ||
167 | 225 | ||
168 | /* | 226 | /* |
169 | * Build the global map of cpus to pools and vice versa. | 227 | * Add a reference to the global map of cpus to pools (and |
228 | * vice versa). Initialise the map if we're the first user. | ||
229 | * Returns the number of pools. | ||
170 | */ | 230 | */ |
171 | static unsigned int | 231 | static unsigned int |
172 | svc_pool_map_init(void) | 232 | svc_pool_map_get(void) |
173 | { | 233 | { |
174 | struct svc_pool_map *m = &svc_pool_map; | 234 | struct svc_pool_map *m = &svc_pool_map; |
175 | int npools = -1; | 235 | int npools = -1; |
176 | 236 | ||
177 | if (m->mode != SVC_POOL_NONE) | 237 | mutex_lock(&svc_pool_map_mutex); |
238 | |||
239 | if (m->count++) { | ||
240 | mutex_unlock(&svc_pool_map_mutex); | ||
178 | return m->npools; | 241 | return m->npools; |
242 | } | ||
179 | 243 | ||
180 | m->mode = svc_pool_map_choose_mode(); | 244 | if (m->mode == SVC_POOL_AUTO) |
245 | m->mode = svc_pool_map_choose_mode(); | ||
181 | 246 | ||
182 | switch (m->mode) { | 247 | switch (m->mode) { |
183 | case SVC_POOL_PERCPU: | 248 | case SVC_POOL_PERCPU: |
@@ -195,9 +260,36 @@ svc_pool_map_init(void) | |||
195 | } | 260 | } |
196 | m->npools = npools; | 261 | m->npools = npools; |
197 | 262 | ||
263 | mutex_unlock(&svc_pool_map_mutex); | ||
198 | return m->npools; | 264 | return m->npools; |
199 | } | 265 | } |
200 | 266 | ||
267 | |||
268 | /* | ||
269 | * Drop a reference to the global map of cpus to pools. | ||
270 | * When the last reference is dropped, the map data is | ||
271 | * freed; this allows the sysadmin to change the pool | ||
272 | * mode using the pool_mode module option without | ||
273 | * rebooting or re-loading sunrpc.ko. | ||
274 | */ | ||
275 | static void | ||
276 | svc_pool_map_put(void) | ||
277 | { | ||
278 | struct svc_pool_map *m = &svc_pool_map; | ||
279 | |||
280 | mutex_lock(&svc_pool_map_mutex); | ||
281 | |||
282 | if (!--m->count) { | ||
283 | m->mode = SVC_POOL_DEFAULT; | ||
284 | kfree(m->to_pool); | ||
285 | kfree(m->pool_to); | ||
286 | m->npools = 0; | ||
287 | } | ||
288 | |||
289 | mutex_unlock(&svc_pool_map_mutex); | ||
290 | } | ||
291 | |||
292 | |||
201 | /* | 293 | /* |
202 | * Set the current thread's cpus_allowed mask so that it | 294 | * Set the current thread's cpus_allowed mask so that it |
203 | * will only run on cpus in the given pool. | 295 | * will only run on cpus in the given pool. |
@@ -212,10 +304,9 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
212 | 304 | ||
213 | /* | 305 | /* |
214 | * The caller checks for sv_nrpools > 1, which | 306 | * The caller checks for sv_nrpools > 1, which |
215 | * implies that we've been initialized and the | 307 | * implies that we've been initialized. |
216 | * map mode is not NONE. | ||
217 | */ | 308 | */ |
218 | BUG_ON(m->mode == SVC_POOL_NONE); | 309 | BUG_ON(m->count == 0); |
219 | 310 | ||
220 | switch (m->mode) | 311 | switch (m->mode) |
221 | { | 312 | { |
@@ -246,18 +337,19 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu) | |||
246 | unsigned int pidx = 0; | 337 | unsigned int pidx = 0; |
247 | 338 | ||
248 | /* | 339 | /* |
249 | * SVC_POOL_NONE happens in a pure client when | 340 | * An uninitialised map happens in a pure client when |
250 | * lockd is brought up, so silently treat it the | 341 | * lockd is brought up, so silently treat it the |
251 | * same as SVC_POOL_GLOBAL. | 342 | * same as SVC_POOL_GLOBAL. |
252 | */ | 343 | */ |
253 | 344 | if (svc_serv_is_pooled(serv)) { | |
254 | switch (m->mode) { | 345 | switch (m->mode) { |
255 | case SVC_POOL_PERCPU: | 346 | case SVC_POOL_PERCPU: |
256 | pidx = m->to_pool[cpu]; | 347 | pidx = m->to_pool[cpu]; |
257 | break; | 348 | break; |
258 | case SVC_POOL_PERNODE: | 349 | case SVC_POOL_PERNODE: |
259 | pidx = m->to_pool[cpu_to_node(cpu)]; | 350 | pidx = m->to_pool[cpu_to_node(cpu)]; |
260 | break; | 351 | break; |
352 | } | ||
261 | } | 353 | } |
262 | return &serv->sv_pools[pidx % serv->sv_nrpools]; | 354 | return &serv->sv_pools[pidx % serv->sv_nrpools]; |
263 | } | 355 | } |
@@ -347,7 +439,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
347 | svc_thread_fn func, int sig, struct module *mod) | 439 | svc_thread_fn func, int sig, struct module *mod) |
348 | { | 440 | { |
349 | struct svc_serv *serv; | 441 | struct svc_serv *serv; |
350 | unsigned int npools = svc_pool_map_init(); | 442 | unsigned int npools = svc_pool_map_get(); |
351 | 443 | ||
352 | serv = __svc_create(prog, bufsize, npools, shutdown); | 444 | serv = __svc_create(prog, bufsize, npools, shutdown); |
353 | 445 | ||
@@ -367,6 +459,7 @@ void | |||
367 | svc_destroy(struct svc_serv *serv) | 459 | svc_destroy(struct svc_serv *serv) |
368 | { | 460 | { |
369 | struct svc_sock *svsk; | 461 | struct svc_sock *svsk; |
462 | struct svc_sock *tmp; | ||
370 | 463 | ||
371 | dprintk("svc: svc_destroy(%s, %d)\n", | 464 | dprintk("svc: svc_destroy(%s, %d)\n", |
372 | serv->sv_program->pg_name, | 465 | serv->sv_program->pg_name, |
@@ -382,24 +475,23 @@ svc_destroy(struct svc_serv *serv) | |||
382 | 475 | ||
383 | del_timer_sync(&serv->sv_temptimer); | 476 | del_timer_sync(&serv->sv_temptimer); |
384 | 477 | ||
385 | while (!list_empty(&serv->sv_tempsocks)) { | 478 | list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list) |
386 | svsk = list_entry(serv->sv_tempsocks.next, | 479 | svc_force_close_socket(svsk); |
387 | struct svc_sock, | 480 | |
388 | sk_list); | ||
389 | svc_close_socket(svsk); | ||
390 | } | ||
391 | if (serv->sv_shutdown) | 481 | if (serv->sv_shutdown) |
392 | serv->sv_shutdown(serv); | 482 | serv->sv_shutdown(serv); |
393 | 483 | ||
394 | while (!list_empty(&serv->sv_permsocks)) { | 484 | list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list) |
395 | svsk = list_entry(serv->sv_permsocks.next, | 485 | svc_force_close_socket(svsk); |
396 | struct svc_sock, | 486 | |
397 | sk_list); | 487 | BUG_ON(!list_empty(&serv->sv_permsocks)); |
398 | svc_close_socket(svsk); | 488 | BUG_ON(!list_empty(&serv->sv_tempsocks)); |
399 | } | ||
400 | 489 | ||
401 | cache_clean_deferred(serv); | 490 | cache_clean_deferred(serv); |
402 | 491 | ||
492 | if (svc_serv_is_pooled(serv)) | ||
493 | svc_pool_map_put(); | ||
494 | |||
403 | /* Unregister service with the portmapper */ | 495 | /* Unregister service with the portmapper */ |
404 | svc_register(serv, 0, 0); | 496 | svc_register(serv, 0, 0); |
405 | kfree(serv->sv_pools); | 497 | kfree(serv->sv_pools); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 63ae94771b8e..f6e1eb1ea720 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -82,6 +82,7 @@ static void svc_delete_socket(struct svc_sock *svsk); | |||
82 | static void svc_udp_data_ready(struct sock *, int); | 82 | static void svc_udp_data_ready(struct sock *, int); |
83 | static int svc_udp_recvfrom(struct svc_rqst *); | 83 | static int svc_udp_recvfrom(struct svc_rqst *); |
84 | static int svc_udp_sendto(struct svc_rqst *); | 84 | static int svc_udp_sendto(struct svc_rqst *); |
85 | static void svc_close_socket(struct svc_sock *svsk); | ||
85 | 86 | ||
86 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); | 87 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); |
87 | static int svc_deferred_recv(struct svc_rqst *rqstp); | 88 | static int svc_deferred_recv(struct svc_rqst *rqstp); |
@@ -131,13 +132,13 @@ static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) | |||
131 | NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), | 132 | NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), |
132 | htons(((struct sockaddr_in *) addr)->sin_port)); | 133 | htons(((struct sockaddr_in *) addr)->sin_port)); |
133 | break; | 134 | break; |
134 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 135 | |
135 | case AF_INET6: | 136 | case AF_INET6: |
136 | snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", | 137 | snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", |
137 | NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), | 138 | NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), |
138 | htons(((struct sockaddr_in6 *) addr)->sin6_port)); | 139 | htons(((struct sockaddr_in6 *) addr)->sin6_port)); |
139 | break; | 140 | break; |
140 | #endif | 141 | |
141 | default: | 142 | default: |
142 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); | 143 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); |
143 | break; | 144 | break; |
@@ -449,9 +450,7 @@ svc_wake_up(struct svc_serv *serv) | |||
449 | 450 | ||
450 | union svc_pktinfo_u { | 451 | union svc_pktinfo_u { |
451 | struct in_pktinfo pkti; | 452 | struct in_pktinfo pkti; |
452 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
453 | struct in6_pktinfo pkti6; | 453 | struct in6_pktinfo pkti6; |
454 | #endif | ||
455 | }; | 454 | }; |
456 | 455 | ||
457 | static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | 456 | static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) |
@@ -467,7 +466,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
467 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | 466 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); |
468 | } | 467 | } |
469 | break; | 468 | break; |
470 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 469 | |
471 | case AF_INET6: { | 470 | case AF_INET6: { |
472 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | 471 | struct in6_pktinfo *pki = CMSG_DATA(cmh); |
473 | 472 | ||
@@ -479,7 +478,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
479 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | 478 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); |
480 | } | 479 | } |
481 | break; | 480 | break; |
482 | #endif | ||
483 | } | 481 | } |
484 | return; | 482 | return; |
485 | } | 483 | } |
@@ -721,45 +719,21 @@ svc_write_space(struct sock *sk) | |||
721 | } | 719 | } |
722 | } | 720 | } |
723 | 721 | ||
724 | static void svc_udp_get_sender_address(struct svc_rqst *rqstp, | 722 | static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp, |
725 | struct sk_buff *skb) | 723 | struct cmsghdr *cmh) |
726 | { | 724 | { |
727 | switch (rqstp->rq_sock->sk_sk->sk_family) { | 725 | switch (rqstp->rq_sock->sk_sk->sk_family) { |
728 | case AF_INET: { | 726 | case AF_INET: { |
729 | /* this seems to come from net/ipv4/udp.c:udp_recvmsg */ | 727 | struct in_pktinfo *pki = CMSG_DATA(cmh); |
730 | struct sockaddr_in *sin = svc_addr_in(rqstp); | 728 | rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; |
731 | |||
732 | sin->sin_family = AF_INET; | ||
733 | sin->sin_port = skb->h.uh->source; | ||
734 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | ||
735 | rqstp->rq_addrlen = sizeof(struct sockaddr_in); | ||
736 | /* Remember which interface received this request */ | ||
737 | rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr; | ||
738 | } | ||
739 | break; | 729 | break; |
740 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
741 | case AF_INET6: { | ||
742 | /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */ | ||
743 | struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp); | ||
744 | |||
745 | sin6->sin6_family = AF_INET6; | ||
746 | sin6->sin6_port = skb->h.uh->source; | ||
747 | sin6->sin6_flowinfo = 0; | ||
748 | sin6->sin6_scope_id = 0; | ||
749 | if (ipv6_addr_type(&sin6->sin6_addr) & | ||
750 | IPV6_ADDR_LINKLOCAL) | ||
751 | sin6->sin6_scope_id = IP6CB(skb)->iif; | ||
752 | ipv6_addr_copy(&sin6->sin6_addr, | ||
753 | &skb->nh.ipv6h->saddr); | ||
754 | rqstp->rq_addrlen = sizeof(struct sockaddr_in); | ||
755 | /* Remember which interface received this request */ | ||
756 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, | ||
757 | &skb->nh.ipv6h->saddr); | ||
758 | } | 730 | } |
731 | case AF_INET6: { | ||
732 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | ||
733 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); | ||
759 | break; | 734 | break; |
760 | #endif | 735 | } |
761 | } | 736 | } |
762 | return; | ||
763 | } | 737 | } |
764 | 738 | ||
765 | /* | 739 | /* |
@@ -771,7 +745,15 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
771 | struct svc_sock *svsk = rqstp->rq_sock; | 745 | struct svc_sock *svsk = rqstp->rq_sock; |
772 | struct svc_serv *serv = svsk->sk_server; | 746 | struct svc_serv *serv = svsk->sk_server; |
773 | struct sk_buff *skb; | 747 | struct sk_buff *skb; |
748 | char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; | ||
749 | struct cmsghdr *cmh = (struct cmsghdr *)buffer; | ||
774 | int err, len; | 750 | int err, len; |
751 | struct msghdr msg = { | ||
752 | .msg_name = svc_addr(rqstp), | ||
753 | .msg_control = cmh, | ||
754 | .msg_controllen = sizeof(buffer), | ||
755 | .msg_flags = MSG_DONTWAIT, | ||
756 | }; | ||
775 | 757 | ||
776 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | 758 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) |
777 | /* udp sockets need large rcvbuf as all pending | 759 | /* udp sockets need large rcvbuf as all pending |
@@ -797,7 +779,9 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
797 | } | 779 | } |
798 | 780 | ||
799 | clear_bit(SK_DATA, &svsk->sk_flags); | 781 | clear_bit(SK_DATA, &svsk->sk_flags); |
800 | while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | 782 | while ((err == kernel_recvmsg(svsk->sk_sock, &msg, NULL, |
783 | 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 || | ||
784 | (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | ||
801 | if (err == -EAGAIN) { | 785 | if (err == -EAGAIN) { |
802 | svc_sock_received(svsk); | 786 | svc_sock_received(svsk); |
803 | return err; | 787 | return err; |
@@ -805,6 +789,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
805 | /* possibly an icmp error */ | 789 | /* possibly an icmp error */ |
806 | dprintk("svc: recvfrom returned error %d\n", -err); | 790 | dprintk("svc: recvfrom returned error %d\n", -err); |
807 | } | 791 | } |
792 | rqstp->rq_addrlen = sizeof(rqstp->rq_addr); | ||
808 | if (skb->tstamp.off_sec == 0) { | 793 | if (skb->tstamp.off_sec == 0) { |
809 | struct timeval tv; | 794 | struct timeval tv; |
810 | 795 | ||
@@ -827,7 +812,16 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
827 | 812 | ||
828 | rqstp->rq_prot = IPPROTO_UDP; | 813 | rqstp->rq_prot = IPPROTO_UDP; |
829 | 814 | ||
830 | svc_udp_get_sender_address(rqstp, skb); | 815 | if (cmh->cmsg_level != IPPROTO_IP || |
816 | cmh->cmsg_type != IP_PKTINFO) { | ||
817 | if (net_ratelimit()) | ||
818 | printk("rpcsvc: received unknown control message:" | ||
819 | "%d/%d\n", | ||
820 | cmh->cmsg_level, cmh->cmsg_type); | ||
821 | skb_free_datagram(svsk->sk_sk, skb); | ||
822 | return 0; | ||
823 | } | ||
824 | svc_udp_get_dest_address(rqstp, cmh); | ||
831 | 825 | ||
832 | if (skb_is_nonlinear(skb)) { | 826 | if (skb_is_nonlinear(skb)) { |
833 | /* we have to copy */ | 827 | /* we have to copy */ |
@@ -884,6 +878,9 @@ svc_udp_sendto(struct svc_rqst *rqstp) | |||
884 | static void | 878 | static void |
885 | svc_udp_init(struct svc_sock *svsk) | 879 | svc_udp_init(struct svc_sock *svsk) |
886 | { | 880 | { |
881 | int one = 1; | ||
882 | mm_segment_t oldfs; | ||
883 | |||
887 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | 884 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; |
888 | svsk->sk_sk->sk_write_space = svc_write_space; | 885 | svsk->sk_sk->sk_write_space = svc_write_space; |
889 | svsk->sk_recvfrom = svc_udp_recvfrom; | 886 | svsk->sk_recvfrom = svc_udp_recvfrom; |
@@ -899,6 +896,13 @@ svc_udp_init(struct svc_sock *svsk) | |||
899 | 896 | ||
900 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ | 897 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ |
901 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | 898 | set_bit(SK_CHNGBUF, &svsk->sk_flags); |
899 | |||
900 | oldfs = get_fs(); | ||
901 | set_fs(KERNEL_DS); | ||
902 | /* make sure we get destination address info */ | ||
903 | svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, | ||
904 | (char __user *)&one, sizeof(one)); | ||
905 | set_fs(oldfs); | ||
902 | } | 906 | } |
903 | 907 | ||
904 | /* | 908 | /* |
@@ -977,11 +981,9 @@ static inline int svc_port_is_privileged(struct sockaddr *sin) | |||
977 | case AF_INET: | 981 | case AF_INET: |
978 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | 982 | return ntohs(((struct sockaddr_in *)sin)->sin_port) |
979 | < PROT_SOCK; | 983 | < PROT_SOCK; |
980 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
981 | case AF_INET6: | 984 | case AF_INET6: |
982 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | 985 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) |
983 | < PROT_SOCK; | 986 | < PROT_SOCK; |
984 | #endif | ||
985 | default: | 987 | default: |
986 | return 0; | 988 | return 0; |
987 | } | 989 | } |
@@ -1786,7 +1788,7 @@ svc_delete_socket(struct svc_sock *svsk) | |||
1786 | spin_unlock_bh(&serv->sv_lock); | 1788 | spin_unlock_bh(&serv->sv_lock); |
1787 | } | 1789 | } |
1788 | 1790 | ||
1789 | void svc_close_socket(struct svc_sock *svsk) | 1791 | static void svc_close_socket(struct svc_sock *svsk) |
1790 | { | 1792 | { |
1791 | set_bit(SK_CLOSE, &svsk->sk_flags); | 1793 | set_bit(SK_CLOSE, &svsk->sk_flags); |
1792 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) | 1794 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) |
@@ -1799,6 +1801,19 @@ void svc_close_socket(struct svc_sock *svsk) | |||
1799 | svc_sock_put(svsk); | 1801 | svc_sock_put(svsk); |
1800 | } | 1802 | } |
1801 | 1803 | ||
1804 | void svc_force_close_socket(struct svc_sock *svsk) | ||
1805 | { | ||
1806 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
1807 | if (test_bit(SK_BUSY, &svsk->sk_flags)) { | ||
1808 | /* Waiting to be processed, but no threads left, | ||
1809 | * So just remove it from the waiting list | ||
1810 | */ | ||
1811 | list_del_init(&svsk->sk_ready); | ||
1812 | clear_bit(SK_BUSY, &svsk->sk_flags); | ||
1813 | } | ||
1814 | svc_close_socket(svsk); | ||
1815 | } | ||
1816 | |||
1802 | /** | 1817 | /** |
1803 | * svc_makesock - Make a socket for nfsd and lockd | 1818 | * svc_makesock - Make a socket for nfsd and lockd |
1804 | * @serv: RPC server structure | 1819 | * @serv: RPC server structure |