diff options
author | David Howells <dhowells@redhat.com> | 2017-05-24 12:02:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-05-25 13:15:11 -0400 |
commit | 2baec2c3f854d1f79c7bb28386484e144e864a14 (patch) | |
tree | 4b416db84d3579fe825458d021951955d120ed69 | |
parent | 878cd3ba37f77ded9c85e9857e3182a7fe8f5dc3 (diff) |
rxrpc: Support network namespacing
Support network namespacing in AF_RXRPC with the following changes:
(1) All the local endpoint, peer and call lists, locks, counters, etc. are
moved into the per-namespace record.
(2) All the connection tracking is moved into the per-namespace record
with the exception of the client connection ID tree, which is kept
global so that connection IDs are kept unique per-machine.
(3) Each namespace gets its own epoch. This allows each network namespace
to pretend to be a separate client machine.
(4) The /proc/net/rxrpc_xxx files are now called /proc/net/rxrpc/xxx and
the contents reflect the namespace.
fs/afs/ should be okay with this patch as it explicitly requires the current
net namespace to be init_net to permit a mount to proceed at the moment. It
will, however, need updating so that cells, IP addresses and DNS records are
per-namespace also.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/rxrpc/Makefile | 1 | ||||
-rw-r--r-- | net/rxrpc/af_rxrpc.c | 35 | ||||
-rw-r--r-- | net/rxrpc/ar-internal.h | 65 | ||||
-rw-r--r-- | net/rxrpc/call_accept.c | 14 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 39 | ||||
-rw-r--r-- | net/rxrpc/conn_client.c | 153 | ||||
-rw-r--r-- | net/rxrpc/conn_object.c | 55 | ||||
-rw-r--r-- | net/rxrpc/conn_service.c | 11 | ||||
-rw-r--r-- | net/rxrpc/local_object.c | 48 | ||||
-rw-r--r-- | net/rxrpc/net_ns.c | 85 | ||||
-rw-r--r-- | net/rxrpc/peer_object.c | 26 | ||||
-rw-r--r-- | net/rxrpc/proc.c | 40 |
12 files changed, 356 insertions, 216 deletions
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index b9da4d6b914f..9c68d2f8ba39 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile | |||
@@ -19,6 +19,7 @@ rxrpc-y := \ | |||
19 | local_event.o \ | 19 | local_event.o \ |
20 | local_object.o \ | 20 | local_object.o \ |
21 | misc.o \ | 21 | misc.o \ |
22 | net_ns.o \ | ||
22 | output.o \ | 23 | output.o \ |
23 | peer_event.o \ | 24 | peer_event.o \ |
24 | peer_object.o \ | 25 | peer_object.o \ |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 7fb59c3f1542..cd34ffbff1d1 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -38,9 +38,6 @@ MODULE_PARM_DESC(debug, "RxRPC debugging mask"); | |||
38 | static struct proto rxrpc_proto; | 38 | static struct proto rxrpc_proto; |
39 | static const struct proto_ops rxrpc_rpc_ops; | 39 | static const struct proto_ops rxrpc_rpc_ops; |
40 | 40 | ||
41 | /* local epoch for detecting local-end reset */ | ||
42 | u32 rxrpc_epoch; | ||
43 | |||
44 | /* current debugging ID */ | 41 | /* current debugging ID */ |
45 | atomic_t rxrpc_debug_id; | 42 | atomic_t rxrpc_debug_id; |
46 | 43 | ||
@@ -155,7 +152,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
155 | 152 | ||
156 | memcpy(&rx->srx, srx, sizeof(rx->srx)); | 153 | memcpy(&rx->srx, srx, sizeof(rx->srx)); |
157 | 154 | ||
158 | local = rxrpc_lookup_local(&rx->srx); | 155 | local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx); |
159 | if (IS_ERR(local)) { | 156 | if (IS_ERR(local)) { |
160 | ret = PTR_ERR(local); | 157 | ret = PTR_ERR(local); |
161 | goto error_unlock; | 158 | goto error_unlock; |
@@ -434,7 +431,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) | |||
434 | ret = -EAFNOSUPPORT; | 431 | ret = -EAFNOSUPPORT; |
435 | goto error_unlock; | 432 | goto error_unlock; |
436 | } | 433 | } |
437 | local = rxrpc_lookup_local(&rx->srx); | 434 | local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx); |
438 | if (IS_ERR(local)) { | 435 | if (IS_ERR(local)) { |
439 | ret = PTR_ERR(local); | 436 | ret = PTR_ERR(local); |
440 | goto error_unlock; | 437 | goto error_unlock; |
@@ -582,9 +579,6 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol, | |||
582 | 579 | ||
583 | _enter("%p,%d", sock, protocol); | 580 | _enter("%p,%d", sock, protocol); |
584 | 581 | ||
585 | if (!net_eq(net, &init_net)) | ||
586 | return -EAFNOSUPPORT; | ||
587 | |||
588 | /* we support transport protocol UDP/UDP6 only */ | 582 | /* we support transport protocol UDP/UDP6 only */ |
589 | if (protocol != PF_INET && | 583 | if (protocol != PF_INET && |
590 | IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6) | 584 | IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6) |
@@ -780,8 +774,6 @@ static int __init af_rxrpc_init(void) | |||
780 | 774 | ||
781 | BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); | 775 | BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); |
782 | 776 | ||
783 | get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch)); | ||
784 | rxrpc_epoch |= RXRPC_RANDOM_EPOCH; | ||
785 | get_random_bytes(&tmp, sizeof(tmp)); | 777 | get_random_bytes(&tmp, sizeof(tmp)); |
786 | tmp &= 0x3fffffff; | 778 | tmp &= 0x3fffffff; |
787 | if (tmp == 0) | 779 | if (tmp == 0) |
@@ -809,6 +801,10 @@ static int __init af_rxrpc_init(void) | |||
809 | goto error_security; | 801 | goto error_security; |
810 | } | 802 | } |
811 | 803 | ||
804 | ret = register_pernet_subsys(&rxrpc_net_ops); | ||
805 | if (ret) | ||
806 | goto error_pernet; | ||
807 | |||
812 | ret = proto_register(&rxrpc_proto, 1); | 808 | ret = proto_register(&rxrpc_proto, 1); |
813 | if (ret < 0) { | 809 | if (ret < 0) { |
814 | pr_crit("Cannot register protocol\n"); | 810 | pr_crit("Cannot register protocol\n"); |
@@ -839,11 +835,6 @@ static int __init af_rxrpc_init(void) | |||
839 | goto error_sysctls; | 835 | goto error_sysctls; |
840 | } | 836 | } |
841 | 837 | ||
842 | #ifdef CONFIG_PROC_FS | ||
843 | proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops); | ||
844 | proc_create("rxrpc_conns", 0, init_net.proc_net, | ||
845 | &rxrpc_connection_seq_fops); | ||
846 | #endif | ||
847 | return 0; | 838 | return 0; |
848 | 839 | ||
849 | error_sysctls: | 840 | error_sysctls: |
@@ -855,6 +846,8 @@ error_key_type: | |||
855 | error_sock: | 846 | error_sock: |
856 | proto_unregister(&rxrpc_proto); | 847 | proto_unregister(&rxrpc_proto); |
857 | error_proto: | 848 | error_proto: |
849 | unregister_pernet_subsys(&rxrpc_net_ops); | ||
850 | error_pernet: | ||
858 | rxrpc_exit_security(); | 851 | rxrpc_exit_security(); |
859 | error_security: | 852 | error_security: |
860 | destroy_workqueue(rxrpc_workqueue); | 853 | destroy_workqueue(rxrpc_workqueue); |
@@ -875,14 +868,16 @@ static void __exit af_rxrpc_exit(void) | |||
875 | unregister_key_type(&key_type_rxrpc); | 868 | unregister_key_type(&key_type_rxrpc); |
876 | sock_unregister(PF_RXRPC); | 869 | sock_unregister(PF_RXRPC); |
877 | proto_unregister(&rxrpc_proto); | 870 | proto_unregister(&rxrpc_proto); |
878 | rxrpc_destroy_all_calls(); | 871 | unregister_pernet_subsys(&rxrpc_net_ops); |
879 | rxrpc_destroy_all_connections(); | ||
880 | ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); | 872 | ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); |
881 | ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); | 873 | ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); |
882 | rxrpc_destroy_all_locals(); | ||
883 | 874 | ||
884 | remove_proc_entry("rxrpc_conns", init_net.proc_net); | 875 | /* Make sure the local and peer records pinned by any dying connections |
885 | remove_proc_entry("rxrpc_calls", init_net.proc_net); | 876 | * are released. |
877 | */ | ||
878 | rcu_barrier(); | ||
879 | rxrpc_destroy_client_conn_ids(); | ||
880 | |||
886 | destroy_workqueue(rxrpc_workqueue); | 881 | destroy_workqueue(rxrpc_workqueue); |
887 | rxrpc_exit_security(); | 882 | rxrpc_exit_security(); |
888 | kmem_cache_destroy(rxrpc_call_jar); | 883 | kmem_cache_destroy(rxrpc_call_jar); |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7486926e60a8..067dbb3121d0 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -11,6 +11,8 @@ | |||
11 | 11 | ||
12 | #include <linux/atomic.h> | 12 | #include <linux/atomic.h> |
13 | #include <linux/seqlock.h> | 13 | #include <linux/seqlock.h> |
14 | #include <net/net_namespace.h> | ||
15 | #include <net/netns/generic.h> | ||
14 | #include <net/sock.h> | 16 | #include <net/sock.h> |
15 | #include <net/af_rxrpc.h> | 17 | #include <net/af_rxrpc.h> |
16 | #include <rxrpc/packet.h> | 18 | #include <rxrpc/packet.h> |
@@ -65,6 +67,37 @@ enum { | |||
65 | }; | 67 | }; |
66 | 68 | ||
67 | /* | 69 | /* |
70 | * Per-network namespace data. | ||
71 | */ | ||
72 | struct rxrpc_net { | ||
73 | struct proc_dir_entry *proc_net; /* Subdir in /proc/net */ | ||
74 | u32 epoch; /* Local epoch for detecting local-end reset */ | ||
75 | struct list_head calls; /* List of calls active in this namespace */ | ||
76 | rwlock_t call_lock; /* Lock for ->calls */ | ||
77 | |||
78 | struct list_head conn_proc_list; /* List of conns in this namespace for proc */ | ||
79 | struct list_head service_conns; /* Service conns in this namespace */ | ||
80 | rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ | ||
81 | struct delayed_work service_conn_reaper; | ||
82 | |||
83 | unsigned int nr_client_conns; | ||
84 | unsigned int nr_active_client_conns; | ||
85 | bool kill_all_client_conns; | ||
86 | spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ | ||
87 | spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ | ||
88 | struct list_head waiting_client_conns; | ||
89 | struct list_head active_client_conns; | ||
90 | struct list_head idle_client_conns; | ||
91 | struct delayed_work client_conn_reaper; | ||
92 | |||
93 | struct list_head local_endpoints; | ||
94 | struct mutex local_mutex; /* Lock for ->local_endpoints */ | ||
95 | |||
96 | spinlock_t peer_hash_lock; /* Lock for ->peer_hash */ | ||
97 | DECLARE_HASHTABLE (peer_hash, 10); | ||
98 | }; | ||
99 | |||
100 | /* | ||
68 | * Service backlog preallocation. | 101 | * Service backlog preallocation. |
69 | * | 102 | * |
70 | * This contains circular buffers of preallocated peers, connections and calls | 103 | * This contains circular buffers of preallocated peers, connections and calls |
@@ -211,6 +244,7 @@ struct rxrpc_security { | |||
211 | struct rxrpc_local { | 244 | struct rxrpc_local { |
212 | struct rcu_head rcu; | 245 | struct rcu_head rcu; |
213 | atomic_t usage; | 246 | atomic_t usage; |
247 | struct rxrpc_net *rxnet; /* The network ns in which this resides */ | ||
214 | struct list_head link; | 248 | struct list_head link; |
215 | struct socket *socket; /* my UDP socket */ | 249 | struct socket *socket; /* my UDP socket */ |
216 | struct work_struct processor; | 250 | struct work_struct processor; |
@@ -601,7 +635,6 @@ struct rxrpc_ack_summary { | |||
601 | * af_rxrpc.c | 635 | * af_rxrpc.c |
602 | */ | 636 | */ |
603 | extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; | 637 | extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; |
604 | extern u32 rxrpc_epoch; | ||
605 | extern atomic_t rxrpc_debug_id; | 638 | extern atomic_t rxrpc_debug_id; |
606 | extern struct workqueue_struct *rxrpc_workqueue; | 639 | extern struct workqueue_struct *rxrpc_workqueue; |
607 | 640 | ||
@@ -634,8 +667,6 @@ extern const char *const rxrpc_call_states[]; | |||
634 | extern const char *const rxrpc_call_completions[]; | 667 | extern const char *const rxrpc_call_completions[]; |
635 | extern unsigned int rxrpc_max_call_lifetime; | 668 | extern unsigned int rxrpc_max_call_lifetime; |
636 | extern struct kmem_cache *rxrpc_call_jar; | 669 | extern struct kmem_cache *rxrpc_call_jar; |
637 | extern struct list_head rxrpc_calls; | ||
638 | extern rwlock_t rxrpc_call_lock; | ||
639 | 670 | ||
640 | struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); | 671 | struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); |
641 | struct rxrpc_call *rxrpc_alloc_call(gfp_t); | 672 | struct rxrpc_call *rxrpc_alloc_call(gfp_t); |
@@ -653,7 +684,7 @@ void rxrpc_see_call(struct rxrpc_call *); | |||
653 | void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); | 684 | void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); |
654 | void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); | 685 | void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); |
655 | void rxrpc_cleanup_call(struct rxrpc_call *); | 686 | void rxrpc_cleanup_call(struct rxrpc_call *); |
656 | void __exit rxrpc_destroy_all_calls(void); | 687 | void rxrpc_destroy_all_calls(struct rxrpc_net *); |
657 | 688 | ||
658 | static inline bool rxrpc_is_service_call(const struct rxrpc_call *call) | 689 | static inline bool rxrpc_is_service_call(const struct rxrpc_call *call) |
659 | { | 690 | { |
@@ -773,7 +804,8 @@ int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, | |||
773 | void rxrpc_expose_client_call(struct rxrpc_call *); | 804 | void rxrpc_expose_client_call(struct rxrpc_call *); |
774 | void rxrpc_disconnect_client_call(struct rxrpc_call *); | 805 | void rxrpc_disconnect_client_call(struct rxrpc_call *); |
775 | void rxrpc_put_client_conn(struct rxrpc_connection *); | 806 | void rxrpc_put_client_conn(struct rxrpc_connection *); |
776 | void __exit rxrpc_destroy_all_client_connections(void); | 807 | void rxrpc_discard_expired_client_conns(struct work_struct *); |
808 | void rxrpc_destroy_all_client_connections(struct rxrpc_net *); | ||
777 | 809 | ||
778 | /* | 810 | /* |
779 | * conn_event.c | 811 | * conn_event.c |
@@ -784,9 +816,6 @@ void rxrpc_process_connection(struct work_struct *); | |||
784 | * conn_object.c | 816 | * conn_object.c |
785 | */ | 817 | */ |
786 | extern unsigned int rxrpc_connection_expiry; | 818 | extern unsigned int rxrpc_connection_expiry; |
787 | extern struct list_head rxrpc_connections; | ||
788 | extern struct list_head rxrpc_connection_proc_list; | ||
789 | extern rwlock_t rxrpc_connection_lock; | ||
790 | 819 | ||
791 | int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); | 820 | int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); |
792 | struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); | 821 | struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); |
@@ -800,7 +829,8 @@ void rxrpc_see_connection(struct rxrpc_connection *); | |||
800 | void rxrpc_get_connection(struct rxrpc_connection *); | 829 | void rxrpc_get_connection(struct rxrpc_connection *); |
801 | struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *); | 830 | struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *); |
802 | void rxrpc_put_service_conn(struct rxrpc_connection *); | 831 | void rxrpc_put_service_conn(struct rxrpc_connection *); |
803 | void __exit rxrpc_destroy_all_connections(void); | 832 | void rxrpc_service_connection_reaper(struct work_struct *); |
833 | void rxrpc_destroy_all_connections(struct rxrpc_net *); | ||
804 | 834 | ||
805 | static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) | 835 | static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) |
806 | { | 836 | { |
@@ -828,7 +858,7 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn) | |||
828 | */ | 858 | */ |
829 | struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, | 859 | struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, |
830 | struct sk_buff *); | 860 | struct sk_buff *); |
831 | struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t); | 861 | struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); |
832 | void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *); | 862 | void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *); |
833 | void rxrpc_unpublish_service_conn(struct rxrpc_connection *); | 863 | void rxrpc_unpublish_service_conn(struct rxrpc_connection *); |
834 | 864 | ||
@@ -861,9 +891,9 @@ extern void rxrpc_process_local_events(struct rxrpc_local *); | |||
861 | /* | 891 | /* |
862 | * local_object.c | 892 | * local_object.c |
863 | */ | 893 | */ |
864 | struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *); | 894 | struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *); |
865 | void __rxrpc_put_local(struct rxrpc_local *); | 895 | void __rxrpc_put_local(struct rxrpc_local *); |
866 | void __exit rxrpc_destroy_all_locals(void); | 896 | void rxrpc_destroy_all_locals(struct rxrpc_net *); |
867 | 897 | ||
868 | static inline void rxrpc_get_local(struct rxrpc_local *local) | 898 | static inline void rxrpc_get_local(struct rxrpc_local *local) |
869 | { | 899 | { |
@@ -902,6 +932,17 @@ extern unsigned int rxrpc_resend_timeout; | |||
902 | extern const s8 rxrpc_ack_priority[]; | 932 | extern const s8 rxrpc_ack_priority[]; |
903 | 933 | ||
904 | /* | 934 | /* |
935 | * net_ns.c | ||
936 | */ | ||
937 | extern unsigned int rxrpc_net_id; | ||
938 | extern struct pernet_operations rxrpc_net_ops; | ||
939 | |||
940 | static inline struct rxrpc_net *rxrpc_net(struct net *net) | ||
941 | { | ||
942 | return net_generic(net, rxrpc_net_id); | ||
943 | } | ||
944 | |||
945 | /* | ||
905 | * output.c | 946 | * output.c |
906 | */ | 947 | */ |
907 | int rxrpc_send_ack_packet(struct rxrpc_call *, bool); | 948 | int rxrpc_send_ack_packet(struct rxrpc_call *, bool); |
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 1752fcf8e8f1..a8515b0d4717 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
@@ -38,6 +38,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |||
38 | { | 38 | { |
39 | const void *here = __builtin_return_address(0); | 39 | const void *here = __builtin_return_address(0); |
40 | struct rxrpc_call *call; | 40 | struct rxrpc_call *call; |
41 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); | ||
41 | int max, tmp; | 42 | int max, tmp; |
42 | unsigned int size = RXRPC_BACKLOG_MAX; | 43 | unsigned int size = RXRPC_BACKLOG_MAX; |
43 | unsigned int head, tail, call_head, call_tail; | 44 | unsigned int head, tail, call_head, call_tail; |
@@ -79,7 +80,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |||
79 | if (CIRC_CNT(head, tail, size) < max) { | 80 | if (CIRC_CNT(head, tail, size) < max) { |
80 | struct rxrpc_connection *conn; | 81 | struct rxrpc_connection *conn; |
81 | 82 | ||
82 | conn = rxrpc_prealloc_service_connection(gfp); | 83 | conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
83 | if (!conn) | 84 | if (!conn) |
84 | return -ENOMEM; | 85 | return -ENOMEM; |
85 | b->conn_backlog[head] = conn; | 86 | b->conn_backlog[head] = conn; |
@@ -136,9 +137,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |||
136 | 137 | ||
137 | write_unlock(&rx->call_lock); | 138 | write_unlock(&rx->call_lock); |
138 | 139 | ||
139 | write_lock(&rxrpc_call_lock); | 140 | write_lock(&rxnet->call_lock); |
140 | list_add_tail(&call->link, &rxrpc_calls); | 141 | list_add_tail(&call->link, &rxnet->calls); |
141 | write_unlock(&rxrpc_call_lock); | 142 | write_unlock(&rxnet->call_lock); |
142 | 143 | ||
143 | b->call_backlog[call_head] = call; | 144 | b->call_backlog[call_head] = call; |
144 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); | 145 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); |
@@ -185,6 +186,7 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) | |||
185 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | 186 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) |
186 | { | 187 | { |
187 | struct rxrpc_backlog *b = rx->backlog; | 188 | struct rxrpc_backlog *b = rx->backlog; |
189 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); | ||
188 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; | 190 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
189 | 191 | ||
190 | if (!b) | 192 | if (!b) |
@@ -209,10 +211,10 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | |||
209 | tail = b->conn_backlog_tail; | 211 | tail = b->conn_backlog_tail; |
210 | while (CIRC_CNT(head, tail, size) > 0) { | 212 | while (CIRC_CNT(head, tail, size) > 0) { |
211 | struct rxrpc_connection *conn = b->conn_backlog[tail]; | 213 | struct rxrpc_connection *conn = b->conn_backlog[tail]; |
212 | write_lock(&rxrpc_connection_lock); | 214 | write_lock(&rxnet->conn_lock); |
213 | list_del(&conn->link); | 215 | list_del(&conn->link); |
214 | list_del(&conn->proc_link); | 216 | list_del(&conn->proc_link); |
215 | write_unlock(&rxrpc_connection_lock); | 217 | write_unlock(&rxnet->conn_lock); |
216 | kfree(conn); | 218 | kfree(conn); |
217 | tail = (tail + 1) & (size - 1); | 219 | tail = (tail + 1) & (size - 1); |
218 | } | 220 | } |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 47f7f4205653..692110808baa 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -44,8 +44,6 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct kmem_cache *rxrpc_call_jar; | 46 | struct kmem_cache *rxrpc_call_jar; |
47 | LIST_HEAD(rxrpc_calls); | ||
48 | DEFINE_RWLOCK(rxrpc_call_lock); | ||
49 | 47 | ||
50 | static void rxrpc_call_timer_expired(unsigned long _call) | 48 | static void rxrpc_call_timer_expired(unsigned long _call) |
51 | { | 49 | { |
@@ -207,6 +205,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, | |||
207 | __releases(&rx->sk.sk_lock.slock) | 205 | __releases(&rx->sk.sk_lock.slock) |
208 | { | 206 | { |
209 | struct rxrpc_call *call, *xcall; | 207 | struct rxrpc_call *call, *xcall; |
208 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); | ||
210 | struct rb_node *parent, **pp; | 209 | struct rb_node *parent, **pp; |
211 | const void *here = __builtin_return_address(0); | 210 | const void *here = __builtin_return_address(0); |
212 | int ret; | 211 | int ret; |
@@ -255,9 +254,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, | |||
255 | 254 | ||
256 | write_unlock(&rx->call_lock); | 255 | write_unlock(&rx->call_lock); |
257 | 256 | ||
258 | write_lock(&rxrpc_call_lock); | 257 | write_lock(&rxnet->call_lock); |
259 | list_add_tail(&call->link, &rxrpc_calls); | 258 | list_add_tail(&call->link, &rxnet->calls); |
260 | write_unlock(&rxrpc_call_lock); | 259 | write_unlock(&rxnet->call_lock); |
261 | 260 | ||
262 | /* From this point on, the call is protected by its own lock. */ | 261 | /* From this point on, the call is protected by its own lock. */ |
263 | release_sock(&rx->sk); | 262 | release_sock(&rx->sk); |
@@ -508,6 +507,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) | |||
508 | */ | 507 | */ |
509 | void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) | 508 | void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) |
510 | { | 509 | { |
510 | struct rxrpc_net *rxnet; | ||
511 | const void *here = __builtin_return_address(0); | 511 | const void *here = __builtin_return_address(0); |
512 | int n; | 512 | int n; |
513 | 513 | ||
@@ -520,9 +520,12 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) | |||
520 | _debug("call %d dead", call->debug_id); | 520 | _debug("call %d dead", call->debug_id); |
521 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); | 521 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); |
522 | 522 | ||
523 | write_lock(&rxrpc_call_lock); | 523 | if (!list_empty(&call->link)) { |
524 | list_del_init(&call->link); | 524 | rxnet = rxrpc_net(sock_net(&call->socket->sk)); |
525 | write_unlock(&rxrpc_call_lock); | 525 | write_lock(&rxnet->call_lock); |
526 | list_del_init(&call->link); | ||
527 | write_unlock(&rxnet->call_lock); | ||
528 | } | ||
526 | 529 | ||
527 | rxrpc_cleanup_call(call); | 530 | rxrpc_cleanup_call(call); |
528 | } | 531 | } |
@@ -570,21 +573,23 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) | |||
570 | } | 573 | } |
571 | 574 | ||
572 | /* | 575 | /* |
573 | * Make sure that all calls are gone. | 576 | * Make sure that all calls are gone from a network namespace. To reach this |
577 | * point, any open UDP sockets in that namespace must have been closed, so any | ||
578 | * outstanding calls cannot be doing I/O. | ||
574 | */ | 579 | */ |
575 | void __exit rxrpc_destroy_all_calls(void) | 580 | void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) |
576 | { | 581 | { |
577 | struct rxrpc_call *call; | 582 | struct rxrpc_call *call; |
578 | 583 | ||
579 | _enter(""); | 584 | _enter(""); |
580 | 585 | ||
581 | if (list_empty(&rxrpc_calls)) | 586 | if (list_empty(&rxnet->calls)) |
582 | return; | 587 | return; |
583 | 588 | ||
584 | write_lock(&rxrpc_call_lock); | 589 | write_lock(&rxnet->call_lock); |
585 | 590 | ||
586 | while (!list_empty(&rxrpc_calls)) { | 591 | while (!list_empty(&rxnet->calls)) { |
587 | call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); | 592 | call = list_entry(rxnet->calls.next, struct rxrpc_call, link); |
588 | _debug("Zapping call %p", call); | 593 | _debug("Zapping call %p", call); |
589 | 594 | ||
590 | rxrpc_see_call(call); | 595 | rxrpc_see_call(call); |
@@ -595,10 +600,10 @@ void __exit rxrpc_destroy_all_calls(void) | |||
595 | rxrpc_call_states[call->state], | 600 | rxrpc_call_states[call->state], |
596 | call->flags, call->events); | 601 | call->flags, call->events); |
597 | 602 | ||
598 | write_unlock(&rxrpc_call_lock); | 603 | write_unlock(&rxnet->call_lock); |
599 | cond_resched(); | 604 | cond_resched(); |
600 | write_lock(&rxrpc_call_lock); | 605 | write_lock(&rxnet->call_lock); |
601 | } | 606 | } |
602 | 607 | ||
603 | write_unlock(&rxrpc_call_lock); | 608 | write_unlock(&rxnet->call_lock); |
604 | } | 609 | } |
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index e8dea0d49e7f..c86f3202f967 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c | |||
@@ -31,7 +31,7 @@ | |||
31 | * may freely grant available channels to new calls and calls may be | 31 | * may freely grant available channels to new calls and calls may be |
32 | * waiting on it for channels to become available. | 32 | * waiting on it for channels to become available. |
33 | * | 33 | * |
34 | * The connection is on the rxrpc_active_client_conns list which is kept | 34 | * The connection is on the rxnet->active_client_conns list which is kept |
35 | * in activation order for culling purposes. | 35 | * in activation order for culling purposes. |
36 | * | 36 | * |
37 | * rxrpc_nr_active_client_conns is held incremented also. | 37 | * rxrpc_nr_active_client_conns is held incremented also. |
@@ -46,7 +46,7 @@ | |||
46 | * expires, the EXPOSED flag is cleared and the connection transitions to | 46 | * expires, the EXPOSED flag is cleared and the connection transitions to |
47 | * the INACTIVE state. | 47 | * the INACTIVE state. |
48 | * | 48 | * |
49 | * The connection is on the rxrpc_idle_client_conns list which is kept in | 49 | * The connection is on the rxnet->idle_client_conns list which is kept in |
50 | * order of how soon they'll expire. | 50 | * order of how soon they'll expire. |
51 | * | 51 | * |
52 | * There are flags of relevance to the cache: | 52 | * There are flags of relevance to the cache: |
@@ -85,27 +85,13 @@ __read_mostly unsigned int rxrpc_reap_client_connections = 900; | |||
85 | __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; | 85 | __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; |
86 | __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; | 86 | __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; |
87 | 87 | ||
88 | static unsigned int rxrpc_nr_client_conns; | ||
89 | static unsigned int rxrpc_nr_active_client_conns; | ||
90 | static __read_mostly bool rxrpc_kill_all_client_conns; | ||
91 | |||
92 | static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock); | ||
93 | static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex); | ||
94 | static LIST_HEAD(rxrpc_waiting_client_conns); | ||
95 | static LIST_HEAD(rxrpc_active_client_conns); | ||
96 | static LIST_HEAD(rxrpc_idle_client_conns); | ||
97 | |||
98 | /* | 88 | /* |
99 | * We use machine-unique IDs for our client connections. | 89 | * We use machine-unique IDs for our client connections. |
100 | */ | 90 | */ |
101 | DEFINE_IDR(rxrpc_client_conn_ids); | 91 | DEFINE_IDR(rxrpc_client_conn_ids); |
102 | static DEFINE_SPINLOCK(rxrpc_conn_id_lock); | 92 | static DEFINE_SPINLOCK(rxrpc_conn_id_lock); |
103 | 93 | ||
104 | static void rxrpc_cull_active_client_conns(void); | 94 | static void rxrpc_cull_active_client_conns(struct rxrpc_net *); |
105 | static void rxrpc_discard_expired_client_conns(struct work_struct *); | ||
106 | |||
107 | static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap, | ||
108 | rxrpc_discard_expired_client_conns); | ||
109 | 95 | ||
110 | /* | 96 | /* |
111 | * Get a connection ID and epoch for a client connection from the global pool. | 97 | * Get a connection ID and epoch for a client connection from the global pool. |
@@ -116,6 +102,7 @@ static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap, | |||
116 | static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, | 102 | static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, |
117 | gfp_t gfp) | 103 | gfp_t gfp) |
118 | { | 104 | { |
105 | struct rxrpc_net *rxnet = conn->params.local->rxnet; | ||
119 | int id; | 106 | int id; |
120 | 107 | ||
121 | _enter(""); | 108 | _enter(""); |
@@ -131,7 +118,7 @@ static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, | |||
131 | spin_unlock(&rxrpc_conn_id_lock); | 118 | spin_unlock(&rxrpc_conn_id_lock); |
132 | idr_preload_end(); | 119 | idr_preload_end(); |
133 | 120 | ||
134 | conn->proto.epoch = rxrpc_epoch; | 121 | conn->proto.epoch = rxnet->epoch; |
135 | conn->proto.cid = id << RXRPC_CIDSHIFT; | 122 | conn->proto.cid = id << RXRPC_CIDSHIFT; |
136 | set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); | 123 | set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); |
137 | _leave(" [CID %x]", conn->proto.cid); | 124 | _leave(" [CID %x]", conn->proto.cid); |
@@ -183,6 +170,7 @@ static struct rxrpc_connection * | |||
183 | rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) | 170 | rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) |
184 | { | 171 | { |
185 | struct rxrpc_connection *conn; | 172 | struct rxrpc_connection *conn; |
173 | struct rxrpc_net *rxnet = cp->local->rxnet; | ||
186 | int ret; | 174 | int ret; |
187 | 175 | ||
188 | _enter(""); | 176 | _enter(""); |
@@ -213,9 +201,9 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) | |||
213 | if (ret < 0) | 201 | if (ret < 0) |
214 | goto error_2; | 202 | goto error_2; |
215 | 203 | ||
216 | write_lock(&rxrpc_connection_lock); | 204 | write_lock(&rxnet->conn_lock); |
217 | list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list); | 205 | list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); |
218 | write_unlock(&rxrpc_connection_lock); | 206 | write_unlock(&rxnet->conn_lock); |
219 | 207 | ||
220 | /* We steal the caller's peer ref. */ | 208 | /* We steal the caller's peer ref. */ |
221 | cp->peer = NULL; | 209 | cp->peer = NULL; |
@@ -243,12 +231,13 @@ error_0: | |||
243 | */ | 231 | */ |
244 | static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) | 232 | static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) |
245 | { | 233 | { |
234 | struct rxrpc_net *rxnet = conn->params.local->rxnet; | ||
246 | int id_cursor, id, distance, limit; | 235 | int id_cursor, id, distance, limit; |
247 | 236 | ||
248 | if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) | 237 | if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) |
249 | goto dont_reuse; | 238 | goto dont_reuse; |
250 | 239 | ||
251 | if (conn->proto.epoch != rxrpc_epoch) | 240 | if (conn->proto.epoch != rxnet->epoch) |
252 | goto mark_dont_reuse; | 241 | goto mark_dont_reuse; |
253 | 242 | ||
254 | /* The IDR tree gets very expensive on memory if the connection IDs are | 243 | /* The IDR tree gets very expensive on memory if the connection IDs are |
@@ -440,12 +429,13 @@ error: | |||
440 | /* | 429 | /* |
441 | * Activate a connection. | 430 | * Activate a connection. |
442 | */ | 431 | */ |
443 | static void rxrpc_activate_conn(struct rxrpc_connection *conn) | 432 | static void rxrpc_activate_conn(struct rxrpc_net *rxnet, |
433 | struct rxrpc_connection *conn) | ||
444 | { | 434 | { |
445 | trace_rxrpc_client(conn, -1, rxrpc_client_to_active); | 435 | trace_rxrpc_client(conn, -1, rxrpc_client_to_active); |
446 | conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; | 436 | conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; |
447 | rxrpc_nr_active_client_conns++; | 437 | rxnet->nr_active_client_conns++; |
448 | list_move_tail(&conn->cache_link, &rxrpc_active_client_conns); | 438 | list_move_tail(&conn->cache_link, &rxnet->active_client_conns); |
449 | } | 439 | } |
450 | 440 | ||
451 | /* | 441 | /* |
@@ -460,7 +450,8 @@ static void rxrpc_activate_conn(struct rxrpc_connection *conn) | |||
460 | * channels if it has been culled to make space and then re-requested by a new | 450 | * channels if it has been culled to make space and then re-requested by a new |
461 | * call. | 451 | * call. |
462 | */ | 452 | */ |
463 | static void rxrpc_animate_client_conn(struct rxrpc_connection *conn) | 453 | static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet, |
454 | struct rxrpc_connection *conn) | ||
464 | { | 455 | { |
465 | unsigned int nr_conns; | 456 | unsigned int nr_conns; |
466 | 457 | ||
@@ -469,12 +460,12 @@ static void rxrpc_animate_client_conn(struct rxrpc_connection *conn) | |||
469 | if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE) | 460 | if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE) |
470 | goto out; | 461 | goto out; |
471 | 462 | ||
472 | spin_lock(&rxrpc_client_conn_cache_lock); | 463 | spin_lock(&rxnet->client_conn_cache_lock); |
473 | 464 | ||
474 | nr_conns = rxrpc_nr_client_conns; | 465 | nr_conns = rxnet->nr_client_conns; |
475 | if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { | 466 | if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) { |
476 | trace_rxrpc_client(conn, -1, rxrpc_client_count); | 467 | trace_rxrpc_client(conn, -1, rxrpc_client_count); |
477 | rxrpc_nr_client_conns = nr_conns + 1; | 468 | rxnet->nr_client_conns = nr_conns + 1; |
478 | } | 469 | } |
479 | 470 | ||
480 | switch (conn->cache_state) { | 471 | switch (conn->cache_state) { |
@@ -494,21 +485,21 @@ static void rxrpc_animate_client_conn(struct rxrpc_connection *conn) | |||
494 | } | 485 | } |
495 | 486 | ||
496 | out_unlock: | 487 | out_unlock: |
497 | spin_unlock(&rxrpc_client_conn_cache_lock); | 488 | spin_unlock(&rxnet->client_conn_cache_lock); |
498 | out: | 489 | out: |
499 | _leave(" [%d]", conn->cache_state); | 490 | _leave(" [%d]", conn->cache_state); |
500 | return; | 491 | return; |
501 | 492 | ||
502 | activate_conn: | 493 | activate_conn: |
503 | _debug("activate"); | 494 | _debug("activate"); |
504 | rxrpc_activate_conn(conn); | 495 | rxrpc_activate_conn(rxnet, conn); |
505 | goto out_unlock; | 496 | goto out_unlock; |
506 | 497 | ||
507 | wait_for_capacity: | 498 | wait_for_capacity: |
508 | _debug("wait"); | 499 | _debug("wait"); |
509 | trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); | 500 | trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); |
510 | conn->cache_state = RXRPC_CONN_CLIENT_WAITING; | 501 | conn->cache_state = RXRPC_CONN_CLIENT_WAITING; |
511 | list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns); | 502 | list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns); |
512 | goto out_unlock; | 503 | goto out_unlock; |
513 | } | 504 | } |
514 | 505 | ||
@@ -660,18 +651,19 @@ int rxrpc_connect_call(struct rxrpc_call *call, | |||
660 | struct sockaddr_rxrpc *srx, | 651 | struct sockaddr_rxrpc *srx, |
661 | gfp_t gfp) | 652 | gfp_t gfp) |
662 | { | 653 | { |
654 | struct rxrpc_net *rxnet = cp->local->rxnet; | ||
663 | int ret; | 655 | int ret; |
664 | 656 | ||
665 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); | 657 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
666 | 658 | ||
667 | rxrpc_discard_expired_client_conns(NULL); | 659 | rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work); |
668 | rxrpc_cull_active_client_conns(); | 660 | rxrpc_cull_active_client_conns(rxnet); |
669 | 661 | ||
670 | ret = rxrpc_get_client_conn(call, cp, srx, gfp); | 662 | ret = rxrpc_get_client_conn(call, cp, srx, gfp); |
671 | if (ret < 0) | 663 | if (ret < 0) |
672 | return ret; | 664 | return ret; |
673 | 665 | ||
674 | rxrpc_animate_client_conn(call->conn); | 666 | rxrpc_animate_client_conn(rxnet, call->conn); |
675 | rxrpc_activate_channels(call->conn); | 667 | rxrpc_activate_channels(call->conn); |
676 | 668 | ||
677 | ret = rxrpc_wait_for_channel(call, gfp); | 669 | ret = rxrpc_wait_for_channel(call, gfp); |
@@ -729,6 +721,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) | |||
729 | unsigned int channel = call->cid & RXRPC_CHANNELMASK; | 721 | unsigned int channel = call->cid & RXRPC_CHANNELMASK; |
730 | struct rxrpc_connection *conn = call->conn; | 722 | struct rxrpc_connection *conn = call->conn; |
731 | struct rxrpc_channel *chan = &conn->channels[channel]; | 723 | struct rxrpc_channel *chan = &conn->channels[channel]; |
724 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk)); | ||
732 | 725 | ||
733 | trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); | 726 | trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); |
734 | call->conn = NULL; | 727 | call->conn = NULL; |
@@ -750,7 +743,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) | |||
750 | /* We must deactivate or idle the connection if it's now | 743 | /* We must deactivate or idle the connection if it's now |
751 | * waiting for nothing. | 744 | * waiting for nothing. |
752 | */ | 745 | */ |
753 | spin_lock(&rxrpc_client_conn_cache_lock); | 746 | spin_lock(&rxnet->client_conn_cache_lock); |
754 | if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && | 747 | if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING && |
755 | list_empty(&conn->waiting_calls) && | 748 | list_empty(&conn->waiting_calls) && |
756 | !conn->active_chans) | 749 | !conn->active_chans) |
@@ -787,14 +780,14 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) | |||
787 | * list. It might even get moved back to the active list whilst we're | 780 | * list. It might even get moved back to the active list whilst we're |
788 | * waiting for the lock. | 781 | * waiting for the lock. |
789 | */ | 782 | */ |
790 | spin_lock(&rxrpc_client_conn_cache_lock); | 783 | spin_lock(&rxnet->client_conn_cache_lock); |
791 | 784 | ||
792 | switch (conn->cache_state) { | 785 | switch (conn->cache_state) { |
793 | case RXRPC_CONN_CLIENT_ACTIVE: | 786 | case RXRPC_CONN_CLIENT_ACTIVE: |
794 | if (list_empty(&conn->waiting_calls)) { | 787 | if (list_empty(&conn->waiting_calls)) { |
795 | rxrpc_deactivate_one_channel(conn, channel); | 788 | rxrpc_deactivate_one_channel(conn, channel); |
796 | if (!conn->active_chans) { | 789 | if (!conn->active_chans) { |
797 | rxrpc_nr_active_client_conns--; | 790 | rxnet->nr_active_client_conns--; |
798 | goto idle_connection; | 791 | goto idle_connection; |
799 | } | 792 | } |
800 | goto out; | 793 | goto out; |
@@ -820,7 +813,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) | |||
820 | } | 813 | } |
821 | 814 | ||
822 | out: | 815 | out: |
823 | spin_unlock(&rxrpc_client_conn_cache_lock); | 816 | spin_unlock(&rxnet->client_conn_cache_lock); |
824 | out_2: | 817 | out_2: |
825 | spin_unlock(&conn->channel_lock); | 818 | spin_unlock(&conn->channel_lock); |
826 | rxrpc_put_connection(conn); | 819 | rxrpc_put_connection(conn); |
@@ -835,11 +828,11 @@ idle_connection: | |||
835 | trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); | 828 | trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); |
836 | conn->idle_timestamp = jiffies; | 829 | conn->idle_timestamp = jiffies; |
837 | conn->cache_state = RXRPC_CONN_CLIENT_IDLE; | 830 | conn->cache_state = RXRPC_CONN_CLIENT_IDLE; |
838 | list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns); | 831 | list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); |
839 | if (rxrpc_idle_client_conns.next == &conn->cache_link && | 832 | if (rxnet->idle_client_conns.next == &conn->cache_link && |
840 | !rxrpc_kill_all_client_conns) | 833 | !rxnet->kill_all_client_conns) |
841 | queue_delayed_work(rxrpc_workqueue, | 834 | queue_delayed_work(rxrpc_workqueue, |
842 | &rxrpc_client_conn_reap, | 835 | &rxnet->client_conn_reaper, |
843 | rxrpc_conn_idle_client_expiry); | 836 | rxrpc_conn_idle_client_expiry); |
844 | } else { | 837 | } else { |
845 | trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); | 838 | trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); |
@@ -857,6 +850,7 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn) | |||
857 | { | 850 | { |
858 | struct rxrpc_connection *next = NULL; | 851 | struct rxrpc_connection *next = NULL; |
859 | struct rxrpc_local *local = conn->params.local; | 852 | struct rxrpc_local *local = conn->params.local; |
853 | struct rxrpc_net *rxnet = local->rxnet; | ||
860 | unsigned int nr_conns; | 854 | unsigned int nr_conns; |
861 | 855 | ||
862 | trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); | 856 | trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); |
@@ -875,18 +869,18 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn) | |||
875 | 869 | ||
876 | if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { | 870 | if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) { |
877 | trace_rxrpc_client(conn, -1, rxrpc_client_uncount); | 871 | trace_rxrpc_client(conn, -1, rxrpc_client_uncount); |
878 | spin_lock(&rxrpc_client_conn_cache_lock); | 872 | spin_lock(&rxnet->client_conn_cache_lock); |
879 | nr_conns = --rxrpc_nr_client_conns; | 873 | nr_conns = --rxnet->nr_client_conns; |
880 | 874 | ||
881 | if (nr_conns < rxrpc_max_client_connections && | 875 | if (nr_conns < rxrpc_max_client_connections && |
882 | !list_empty(&rxrpc_waiting_client_conns)) { | 876 | !list_empty(&rxnet->waiting_client_conns)) { |
883 | next = list_entry(rxrpc_waiting_client_conns.next, | 877 | next = list_entry(rxnet->waiting_client_conns.next, |
884 | struct rxrpc_connection, cache_link); | 878 | struct rxrpc_connection, cache_link); |
885 | rxrpc_get_connection(next); | 879 | rxrpc_get_connection(next); |
886 | rxrpc_activate_conn(next); | 880 | rxrpc_activate_conn(rxnet, next); |
887 | } | 881 | } |
888 | 882 | ||
889 | spin_unlock(&rxrpc_client_conn_cache_lock); | 883 | spin_unlock(&rxnet->client_conn_cache_lock); |
890 | } | 884 | } |
891 | 885 | ||
892 | rxrpc_kill_connection(conn); | 886 | rxrpc_kill_connection(conn); |
@@ -921,10 +915,10 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn) | |||
921 | /* | 915 | /* |
922 | * Kill the longest-active client connections to make room for new ones. | 916 | * Kill the longest-active client connections to make room for new ones. |
923 | */ | 917 | */ |
924 | static void rxrpc_cull_active_client_conns(void) | 918 | static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet) |
925 | { | 919 | { |
926 | struct rxrpc_connection *conn; | 920 | struct rxrpc_connection *conn; |
927 | unsigned int nr_conns = rxrpc_nr_client_conns; | 921 | unsigned int nr_conns = rxnet->nr_client_conns; |
928 | unsigned int nr_active, limit; | 922 | unsigned int nr_active, limit; |
929 | 923 | ||
930 | _enter(""); | 924 | _enter(""); |
@@ -936,12 +930,12 @@ static void rxrpc_cull_active_client_conns(void) | |||
936 | } | 930 | } |
937 | limit = rxrpc_reap_client_connections; | 931 | limit = rxrpc_reap_client_connections; |
938 | 932 | ||
939 | spin_lock(&rxrpc_client_conn_cache_lock); | 933 | spin_lock(&rxnet->client_conn_cache_lock); |
940 | nr_active = rxrpc_nr_active_client_conns; | 934 | nr_active = rxnet->nr_active_client_conns; |
941 | 935 | ||
942 | while (nr_active > limit) { | 936 | while (nr_active > limit) { |
943 | ASSERT(!list_empty(&rxrpc_active_client_conns)); | 937 | ASSERT(!list_empty(&rxnet->active_client_conns)); |
944 | conn = list_entry(rxrpc_active_client_conns.next, | 938 | conn = list_entry(rxnet->active_client_conns.next, |
945 | struct rxrpc_connection, cache_link); | 939 | struct rxrpc_connection, cache_link); |
946 | ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE); | 940 | ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE); |
947 | 941 | ||
@@ -953,14 +947,14 @@ static void rxrpc_cull_active_client_conns(void) | |||
953 | trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); | 947 | trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting); |
954 | conn->cache_state = RXRPC_CONN_CLIENT_WAITING; | 948 | conn->cache_state = RXRPC_CONN_CLIENT_WAITING; |
955 | list_move_tail(&conn->cache_link, | 949 | list_move_tail(&conn->cache_link, |
956 | &rxrpc_waiting_client_conns); | 950 | &rxnet->waiting_client_conns); |
957 | } | 951 | } |
958 | 952 | ||
959 | nr_active--; | 953 | nr_active--; |
960 | } | 954 | } |
961 | 955 | ||
962 | rxrpc_nr_active_client_conns = nr_active; | 956 | rxnet->nr_active_client_conns = nr_active; |
963 | spin_unlock(&rxrpc_client_conn_cache_lock); | 957 | spin_unlock(&rxnet->client_conn_cache_lock); |
964 | ASSERTCMP(nr_active, >=, 0); | 958 | ASSERTCMP(nr_active, >=, 0); |
965 | _leave(" [culled]"); | 959 | _leave(" [culled]"); |
966 | } | 960 | } |
@@ -972,22 +966,25 @@ static void rxrpc_cull_active_client_conns(void) | |||
972 | * This may be called from conn setup or from a work item so cannot be | 966 | * This may be called from conn setup or from a work item so cannot be |
973 | * considered non-reentrant. | 967 | * considered non-reentrant. |
974 | */ | 968 | */ |
975 | static void rxrpc_discard_expired_client_conns(struct work_struct *work) | 969 | void rxrpc_discard_expired_client_conns(struct work_struct *work) |
976 | { | 970 | { |
977 | struct rxrpc_connection *conn; | 971 | struct rxrpc_connection *conn; |
972 | struct rxrpc_net *rxnet = | ||
973 | container_of(to_delayed_work(work), | ||
974 | struct rxrpc_net, client_conn_reaper); | ||
978 | unsigned long expiry, conn_expires_at, now; | 975 | unsigned long expiry, conn_expires_at, now; |
979 | unsigned int nr_conns; | 976 | unsigned int nr_conns; |
980 | bool did_discard = false; | 977 | bool did_discard = false; |
981 | 978 | ||
982 | _enter("%c", work ? 'w' : 'n'); | 979 | _enter(""); |
983 | 980 | ||
984 | if (list_empty(&rxrpc_idle_client_conns)) { | 981 | if (list_empty(&rxnet->idle_client_conns)) { |
985 | _leave(" [empty]"); | 982 | _leave(" [empty]"); |
986 | return; | 983 | return; |
987 | } | 984 | } |
988 | 985 | ||
989 | /* Don't double up on the discarding */ | 986 | /* Don't double up on the discarding */ |
990 | if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) { | 987 | if (!spin_trylock(&rxnet->client_conn_discard_lock)) { |
991 | _leave(" [already]"); | 988 | _leave(" [already]"); |
992 | return; | 989 | return; |
993 | } | 990 | } |
@@ -995,19 +992,19 @@ static void rxrpc_discard_expired_client_conns(struct work_struct *work) | |||
995 | /* We keep an estimate of what the number of conns ought to be after | 992 | /* We keep an estimate of what the number of conns ought to be after |
996 | * we've discarded some so that we don't overdo the discarding. | 993 | * we've discarded some so that we don't overdo the discarding. |
997 | */ | 994 | */ |
998 | nr_conns = rxrpc_nr_client_conns; | 995 | nr_conns = rxnet->nr_client_conns; |
999 | 996 | ||
1000 | next: | 997 | next: |
1001 | spin_lock(&rxrpc_client_conn_cache_lock); | 998 | spin_lock(&rxnet->client_conn_cache_lock); |
1002 | 999 | ||
1003 | if (list_empty(&rxrpc_idle_client_conns)) | 1000 | if (list_empty(&rxnet->idle_client_conns)) |
1004 | goto out; | 1001 | goto out; |
1005 | 1002 | ||
1006 | conn = list_entry(rxrpc_idle_client_conns.next, | 1003 | conn = list_entry(rxnet->idle_client_conns.next, |
1007 | struct rxrpc_connection, cache_link); | 1004 | struct rxrpc_connection, cache_link); |
1008 | ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); | 1005 | ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags)); |
1009 | 1006 | ||
1010 | if (!rxrpc_kill_all_client_conns) { | 1007 | if (!rxnet->kill_all_client_conns) { |
1011 | /* If the number of connections is over the reap limit, we | 1008 | /* If the number of connections is over the reap limit, we |
1012 | * expedite discard by reducing the expiry timeout. We must, | 1009 | * expedite discard by reducing the expiry timeout. We must, |
1013 | * however, have at least a short grace period to be able to do | 1010 | * however, have at least a short grace period to be able to do |
@@ -1030,7 +1027,7 @@ next: | |||
1030 | conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; | 1027 | conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; |
1031 | list_del_init(&conn->cache_link); | 1028 | list_del_init(&conn->cache_link); |
1032 | 1029 | ||
1033 | spin_unlock(&rxrpc_client_conn_cache_lock); | 1030 | spin_unlock(&rxnet->client_conn_cache_lock); |
1034 | 1031 | ||
1035 | /* When we cleared the EXPOSED flag, we took on responsibility for the | 1032 | /* When we cleared the EXPOSED flag, we took on responsibility for the |
1036 | * reference that that had on the usage count. We deal with that here. | 1033 | * reference that that had on the usage count. We deal with that here. |
@@ -1050,14 +1047,14 @@ not_yet_expired: | |||
1050 | * then things get messier. | 1047 | * then things get messier. |
1051 | */ | 1048 | */ |
1052 | _debug("not yet"); | 1049 | _debug("not yet"); |
1053 | if (!rxrpc_kill_all_client_conns) | 1050 | if (!rxnet->kill_all_client_conns) |
1054 | queue_delayed_work(rxrpc_workqueue, | 1051 | queue_delayed_work(rxrpc_workqueue, |
1055 | &rxrpc_client_conn_reap, | 1052 | &rxnet->client_conn_reaper, |
1056 | conn_expires_at - now); | 1053 | conn_expires_at - now); |
1057 | 1054 | ||
1058 | out: | 1055 | out: |
1059 | spin_unlock(&rxrpc_client_conn_cache_lock); | 1056 | spin_unlock(&rxnet->client_conn_cache_lock); |
1060 | spin_unlock(&rxrpc_client_conn_discard_mutex); | 1057 | spin_unlock(&rxnet->client_conn_discard_lock); |
1061 | _leave(""); | 1058 | _leave(""); |
1062 | } | 1059 | } |
1063 | 1060 | ||
@@ -1065,17 +1062,17 @@ out: | |||
1065 | * Preemptively destroy all the client connection records rather than waiting | 1062 | * Preemptively destroy all the client connection records rather than waiting |
1066 | * for them to time out | 1063 | * for them to time out |
1067 | */ | 1064 | */ |
1068 | void __exit rxrpc_destroy_all_client_connections(void) | 1065 | void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) |
1069 | { | 1066 | { |
1070 | _enter(""); | 1067 | _enter(""); |
1071 | 1068 | ||
1072 | spin_lock(&rxrpc_client_conn_cache_lock); | 1069 | spin_lock(&rxnet->client_conn_cache_lock); |
1073 | rxrpc_kill_all_client_conns = true; | 1070 | rxnet->kill_all_client_conns = true; |
1074 | spin_unlock(&rxrpc_client_conn_cache_lock); | 1071 | spin_unlock(&rxnet->client_conn_cache_lock); |
1075 | 1072 | ||
1076 | cancel_delayed_work(&rxrpc_client_conn_reap); | 1073 | cancel_delayed_work(&rxnet->client_conn_reaper); |
1077 | 1074 | ||
1078 | if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0)) | 1075 | if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0)) |
1079 | _debug("destroy: queue failed"); | 1076 | _debug("destroy: queue failed"); |
1080 | 1077 | ||
1081 | _leave(""); | 1078 | _leave(""); |
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index b0ecb770fdce..ade4d3d0b2a7 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c | |||
@@ -22,13 +22,6 @@ | |||
22 | */ | 22 | */ |
23 | unsigned int rxrpc_connection_expiry = 10 * 60; | 23 | unsigned int rxrpc_connection_expiry = 10 * 60; |
24 | 24 | ||
25 | static void rxrpc_connection_reaper(struct work_struct *work); | ||
26 | |||
27 | LIST_HEAD(rxrpc_connections); | ||
28 | LIST_HEAD(rxrpc_connection_proc_list); | ||
29 | DEFINE_RWLOCK(rxrpc_connection_lock); | ||
30 | static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); | ||
31 | |||
32 | static void rxrpc_destroy_connection(struct rcu_head *); | 25 | static void rxrpc_destroy_connection(struct rcu_head *); |
33 | 26 | ||
34 | /* | 27 | /* |
@@ -222,15 +215,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) | |||
222 | */ | 215 | */ |
223 | void rxrpc_kill_connection(struct rxrpc_connection *conn) | 216 | void rxrpc_kill_connection(struct rxrpc_connection *conn) |
224 | { | 217 | { |
218 | struct rxrpc_net *rxnet = conn->params.local->rxnet; | ||
219 | |||
225 | ASSERT(!rcu_access_pointer(conn->channels[0].call) && | 220 | ASSERT(!rcu_access_pointer(conn->channels[0].call) && |
226 | !rcu_access_pointer(conn->channels[1].call) && | 221 | !rcu_access_pointer(conn->channels[1].call) && |
227 | !rcu_access_pointer(conn->channels[2].call) && | 222 | !rcu_access_pointer(conn->channels[2].call) && |
228 | !rcu_access_pointer(conn->channels[3].call)); | 223 | !rcu_access_pointer(conn->channels[3].call)); |
229 | ASSERT(list_empty(&conn->cache_link)); | 224 | ASSERT(list_empty(&conn->cache_link)); |
230 | 225 | ||
231 | write_lock(&rxrpc_connection_lock); | 226 | write_lock(&rxnet->conn_lock); |
232 | list_del_init(&conn->proc_link); | 227 | list_del_init(&conn->proc_link); |
233 | write_unlock(&rxrpc_connection_lock); | 228 | write_unlock(&rxnet->conn_lock); |
234 | 229 | ||
235 | /* Drain the Rx queue. Note that even though we've unpublished, an | 230 | /* Drain the Rx queue. Note that even though we've unpublished, an |
236 | * incoming packet could still be being added to our Rx queue, so we | 231 | * incoming packet could still be being added to our Rx queue, so we |
@@ -309,14 +304,17 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn) | |||
309 | */ | 304 | */ |
310 | void rxrpc_put_service_conn(struct rxrpc_connection *conn) | 305 | void rxrpc_put_service_conn(struct rxrpc_connection *conn) |
311 | { | 306 | { |
307 | struct rxrpc_net *rxnet; | ||
312 | const void *here = __builtin_return_address(0); | 308 | const void *here = __builtin_return_address(0); |
313 | int n; | 309 | int n; |
314 | 310 | ||
315 | n = atomic_dec_return(&conn->usage); | 311 | n = atomic_dec_return(&conn->usage); |
316 | trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); | 312 | trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); |
317 | ASSERTCMP(n, >=, 0); | 313 | ASSERTCMP(n, >=, 0); |
318 | if (n == 0) | 314 | if (n == 0) { |
319 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); | 315 | rxnet = conn->params.local->rxnet; |
316 | rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); | ||
317 | } | ||
320 | } | 318 | } |
321 | 319 | ||
322 | /* | 320 | /* |
@@ -348,9 +346,12 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) | |||
348 | /* | 346 | /* |
349 | * reap dead service connections | 347 | * reap dead service connections |
350 | */ | 348 | */ |
351 | static void rxrpc_connection_reaper(struct work_struct *work) | 349 | void rxrpc_service_connection_reaper(struct work_struct *work) |
352 | { | 350 | { |
353 | struct rxrpc_connection *conn, *_p; | 351 | struct rxrpc_connection *conn, *_p; |
352 | struct rxrpc_net *rxnet = | ||
353 | container_of(to_delayed_work(work), | ||
354 | struct rxrpc_net, service_conn_reaper); | ||
354 | unsigned long reap_older_than, earliest, idle_timestamp, now; | 355 | unsigned long reap_older_than, earliest, idle_timestamp, now; |
355 | 356 | ||
356 | LIST_HEAD(graveyard); | 357 | LIST_HEAD(graveyard); |
@@ -361,8 +362,8 @@ static void rxrpc_connection_reaper(struct work_struct *work) | |||
361 | reap_older_than = now - rxrpc_connection_expiry * HZ; | 362 | reap_older_than = now - rxrpc_connection_expiry * HZ; |
362 | earliest = ULONG_MAX; | 363 | earliest = ULONG_MAX; |
363 | 364 | ||
364 | write_lock(&rxrpc_connection_lock); | 365 | write_lock(&rxnet->conn_lock); |
365 | list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { | 366 | list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { |
366 | ASSERTCMP(atomic_read(&conn->usage), >, 0); | 367 | ASSERTCMP(atomic_read(&conn->usage), >, 0); |
367 | if (likely(atomic_read(&conn->usage) > 1)) | 368 | if (likely(atomic_read(&conn->usage) > 1)) |
368 | continue; | 369 | continue; |
@@ -393,12 +394,12 @@ static void rxrpc_connection_reaper(struct work_struct *work) | |||
393 | 394 | ||
394 | list_move_tail(&conn->link, &graveyard); | 395 | list_move_tail(&conn->link, &graveyard); |
395 | } | 396 | } |
396 | write_unlock(&rxrpc_connection_lock); | 397 | write_unlock(&rxnet->conn_lock); |
397 | 398 | ||
398 | if (earliest != ULONG_MAX) { | 399 | if (earliest != ULONG_MAX) { |
399 | _debug("reschedule reaper %ld", (long) earliest - now); | 400 | _debug("reschedule reaper %ld", (long) earliest - now); |
400 | ASSERT(time_after(earliest, now)); | 401 | ASSERT(time_after(earliest, now)); |
401 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, | 402 | rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, |
402 | earliest - now); | 403 | earliest - now); |
403 | } | 404 | } |
404 | 405 | ||
@@ -418,36 +419,30 @@ static void rxrpc_connection_reaper(struct work_struct *work) | |||
418 | * preemptively destroy all the service connection records rather than | 419 | * preemptively destroy all the service connection records rather than |
419 | * waiting for them to time out | 420 | * waiting for them to time out |
420 | */ | 421 | */ |
421 | void __exit rxrpc_destroy_all_connections(void) | 422 | void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) |
422 | { | 423 | { |
423 | struct rxrpc_connection *conn, *_p; | 424 | struct rxrpc_connection *conn, *_p; |
424 | bool leak = false; | 425 | bool leak = false; |
425 | 426 | ||
426 | _enter(""); | 427 | _enter(""); |
427 | 428 | ||
428 | rxrpc_destroy_all_client_connections(); | 429 | rxrpc_destroy_all_client_connections(rxnet); |
429 | 430 | ||
430 | rxrpc_connection_expiry = 0; | 431 | rxrpc_connection_expiry = 0; |
431 | cancel_delayed_work(&rxrpc_connection_reap); | 432 | cancel_delayed_work(&rxnet->client_conn_reaper); |
432 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); | 433 | rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0); |
433 | flush_workqueue(rxrpc_workqueue); | 434 | flush_workqueue(rxrpc_workqueue); |
434 | 435 | ||
435 | write_lock(&rxrpc_connection_lock); | 436 | write_lock(&rxnet->conn_lock); |
436 | list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { | 437 | list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { |
437 | pr_err("AF_RXRPC: Leaked conn %p {%d}\n", | 438 | pr_err("AF_RXRPC: Leaked conn %p {%d}\n", |
438 | conn, atomic_read(&conn->usage)); | 439 | conn, atomic_read(&conn->usage)); |
439 | leak = true; | 440 | leak = true; |
440 | } | 441 | } |
441 | write_unlock(&rxrpc_connection_lock); | 442 | write_unlock(&rxnet->conn_lock); |
442 | BUG_ON(leak); | 443 | BUG_ON(leak); |
443 | 444 | ||
444 | ASSERT(list_empty(&rxrpc_connection_proc_list)); | 445 | ASSERT(list_empty(&rxnet->conn_proc_list)); |
445 | |||
446 | /* Make sure the local and peer records pinned by any dying connections | ||
447 | * are released. | ||
448 | */ | ||
449 | rcu_barrier(); | ||
450 | rxrpc_destroy_client_conn_ids(); | ||
451 | 446 | ||
452 | _leave(""); | 447 | _leave(""); |
453 | } | 448 | } |
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index eef551f40dc2..edfc633f7d5e 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c | |||
@@ -121,7 +121,8 @@ replace_old_connection: | |||
121 | * Preallocate a service connection. The connection is placed on the proc and | 121 | * Preallocate a service connection. The connection is placed on the proc and |
122 | * reap lists so that we don't have to get the lock from BH context. | 122 | * reap lists so that we don't have to get the lock from BH context. |
123 | */ | 123 | */ |
124 | struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp) | 124 | struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet, |
125 | gfp_t gfp) | ||
125 | { | 126 | { |
126 | struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp); | 127 | struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp); |
127 | 128 | ||
@@ -132,10 +133,10 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp) | |||
132 | conn->state = RXRPC_CONN_SERVICE_PREALLOC; | 133 | conn->state = RXRPC_CONN_SERVICE_PREALLOC; |
133 | atomic_set(&conn->usage, 2); | 134 | atomic_set(&conn->usage, 2); |
134 | 135 | ||
135 | write_lock(&rxrpc_connection_lock); | 136 | write_lock(&rxnet->conn_lock); |
136 | list_add_tail(&conn->link, &rxrpc_connections); | 137 | list_add_tail(&conn->link, &rxnet->service_conns); |
137 | list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list); | 138 | list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); |
138 | write_unlock(&rxrpc_connection_lock); | 139 | write_unlock(&rxnet->conn_lock); |
139 | 140 | ||
140 | trace_rxrpc_conn(conn, rxrpc_conn_new_service, | 141 | trace_rxrpc_conn(conn, rxrpc_conn_new_service, |
141 | atomic_read(&conn->usage), | 142 | atomic_read(&conn->usage), |
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index ff4864d550b8..17d79fd73ade 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -25,9 +25,6 @@ | |||
25 | static void rxrpc_local_processor(struct work_struct *); | 25 | static void rxrpc_local_processor(struct work_struct *); |
26 | static void rxrpc_local_rcu(struct rcu_head *); | 26 | static void rxrpc_local_rcu(struct rcu_head *); |
27 | 27 | ||
28 | static DEFINE_MUTEX(rxrpc_local_mutex); | ||
29 | static LIST_HEAD(rxrpc_local_endpoints); | ||
30 | |||
31 | /* | 28 | /* |
32 | * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, | 29 | * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, |
33 | * same or greater than. | 30 | * same or greater than. |
@@ -77,13 +74,15 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local, | |||
77 | /* | 74 | /* |
78 | * Allocate a new local endpoint. | 75 | * Allocate a new local endpoint. |
79 | */ | 76 | */ |
80 | static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx) | 77 | static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, |
78 | const struct sockaddr_rxrpc *srx) | ||
81 | { | 79 | { |
82 | struct rxrpc_local *local; | 80 | struct rxrpc_local *local; |
83 | 81 | ||
84 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); | 82 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); |
85 | if (local) { | 83 | if (local) { |
86 | atomic_set(&local->usage, 1); | 84 | atomic_set(&local->usage, 1); |
85 | local->rxnet = rxnet; | ||
87 | INIT_LIST_HEAD(&local->link); | 86 | INIT_LIST_HEAD(&local->link); |
88 | INIT_WORK(&local->processor, rxrpc_local_processor); | 87 | INIT_WORK(&local->processor, rxrpc_local_processor); |
89 | init_rwsem(&local->defrag_sem); | 88 | init_rwsem(&local->defrag_sem); |
@@ -105,7 +104,7 @@ static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx) | |||
105 | * create the local socket | 104 | * create the local socket |
106 | * - must be called with rxrpc_local_mutex locked | 105 | * - must be called with rxrpc_local_mutex locked |
107 | */ | 106 | */ |
108 | static int rxrpc_open_socket(struct rxrpc_local *local) | 107 | static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) |
109 | { | 108 | { |
110 | struct sock *sock; | 109 | struct sock *sock; |
111 | int ret, opt; | 110 | int ret, opt; |
@@ -114,7 +113,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local) | |||
114 | local, local->srx.transport_type, local->srx.transport.family); | 113 | local, local->srx.transport_type, local->srx.transport.family); |
115 | 114 | ||
116 | /* create a socket to represent the local endpoint */ | 115 | /* create a socket to represent the local endpoint */ |
117 | ret = sock_create_kern(&init_net, local->srx.transport.family, | 116 | ret = sock_create_kern(net, local->srx.transport.family, |
118 | local->srx.transport_type, 0, &local->socket); | 117 | local->srx.transport_type, 0, &local->socket); |
119 | if (ret < 0) { | 118 | if (ret < 0) { |
120 | _leave(" = %d [socket]", ret); | 119 | _leave(" = %d [socket]", ret); |
@@ -172,9 +171,11 @@ error: | |||
172 | /* | 171 | /* |
173 | * Look up or create a new local endpoint using the specified local address. | 172 | * Look up or create a new local endpoint using the specified local address. |
174 | */ | 173 | */ |
175 | struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) | 174 | struct rxrpc_local *rxrpc_lookup_local(struct net *net, |
175 | const struct sockaddr_rxrpc *srx) | ||
176 | { | 176 | { |
177 | struct rxrpc_local *local; | 177 | struct rxrpc_local *local; |
178 | struct rxrpc_net *rxnet = rxrpc_net(net); | ||
178 | struct list_head *cursor; | 179 | struct list_head *cursor; |
179 | const char *age; | 180 | const char *age; |
180 | long diff; | 181 | long diff; |
@@ -183,10 +184,10 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) | |||
183 | _enter("{%d,%d,%pISp}", | 184 | _enter("{%d,%d,%pISp}", |
184 | srx->transport_type, srx->transport.family, &srx->transport); | 185 | srx->transport_type, srx->transport.family, &srx->transport); |
185 | 186 | ||
186 | mutex_lock(&rxrpc_local_mutex); | 187 | mutex_lock(&rxnet->local_mutex); |
187 | 188 | ||
188 | for (cursor = rxrpc_local_endpoints.next; | 189 | for (cursor = rxnet->local_endpoints.next; |
189 | cursor != &rxrpc_local_endpoints; | 190 | cursor != &rxnet->local_endpoints; |
190 | cursor = cursor->next) { | 191 | cursor = cursor->next) { |
191 | local = list_entry(cursor, struct rxrpc_local, link); | 192 | local = list_entry(cursor, struct rxrpc_local, link); |
192 | 193 | ||
@@ -220,11 +221,11 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) | |||
220 | goto found; | 221 | goto found; |
221 | } | 222 | } |
222 | 223 | ||
223 | local = rxrpc_alloc_local(srx); | 224 | local = rxrpc_alloc_local(rxnet, srx); |
224 | if (!local) | 225 | if (!local) |
225 | goto nomem; | 226 | goto nomem; |
226 | 227 | ||
227 | ret = rxrpc_open_socket(local); | 228 | ret = rxrpc_open_socket(local, net); |
228 | if (ret < 0) | 229 | if (ret < 0) |
229 | goto sock_error; | 230 | goto sock_error; |
230 | 231 | ||
@@ -232,7 +233,7 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) | |||
232 | age = "new"; | 233 | age = "new"; |
233 | 234 | ||
234 | found: | 235 | found: |
235 | mutex_unlock(&rxrpc_local_mutex); | 236 | mutex_unlock(&rxnet->local_mutex); |
236 | 237 | ||
237 | _net("LOCAL %s %d {%pISp}", | 238 | _net("LOCAL %s %d {%pISp}", |
238 | age, local->debug_id, &local->srx.transport); | 239 | age, local->debug_id, &local->srx.transport); |
@@ -243,13 +244,13 @@ found: | |||
243 | nomem: | 244 | nomem: |
244 | ret = -ENOMEM; | 245 | ret = -ENOMEM; |
245 | sock_error: | 246 | sock_error: |
246 | mutex_unlock(&rxrpc_local_mutex); | 247 | mutex_unlock(&rxnet->local_mutex); |
247 | kfree(local); | 248 | kfree(local); |
248 | _leave(" = %d", ret); | 249 | _leave(" = %d", ret); |
249 | return ERR_PTR(ret); | 250 | return ERR_PTR(ret); |
250 | 251 | ||
251 | addr_in_use: | 252 | addr_in_use: |
252 | mutex_unlock(&rxrpc_local_mutex); | 253 | mutex_unlock(&rxnet->local_mutex); |
253 | _leave(" = -EADDRINUSE"); | 254 | _leave(" = -EADDRINUSE"); |
254 | return ERR_PTR(-EADDRINUSE); | 255 | return ERR_PTR(-EADDRINUSE); |
255 | } | 256 | } |
@@ -273,6 +274,7 @@ void __rxrpc_put_local(struct rxrpc_local *local) | |||
273 | static void rxrpc_local_destroyer(struct rxrpc_local *local) | 274 | static void rxrpc_local_destroyer(struct rxrpc_local *local) |
274 | { | 275 | { |
275 | struct socket *socket = local->socket; | 276 | struct socket *socket = local->socket; |
277 | struct rxrpc_net *rxnet = local->rxnet; | ||
276 | 278 | ||
277 | _enter("%d", local->debug_id); | 279 | _enter("%d", local->debug_id); |
278 | 280 | ||
@@ -286,9 +288,9 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) | |||
286 | } | 288 | } |
287 | local->dead = true; | 289 | local->dead = true; |
288 | 290 | ||
289 | mutex_lock(&rxrpc_local_mutex); | 291 | mutex_lock(&rxnet->local_mutex); |
290 | list_del_init(&local->link); | 292 | list_del_init(&local->link); |
291 | mutex_unlock(&rxrpc_local_mutex); | 293 | mutex_unlock(&rxnet->local_mutex); |
292 | 294 | ||
293 | ASSERT(RB_EMPTY_ROOT(&local->client_conns)); | 295 | ASSERT(RB_EMPTY_ROOT(&local->client_conns)); |
294 | ASSERT(!local->service); | 296 | ASSERT(!local->service); |
@@ -357,7 +359,7 @@ static void rxrpc_local_rcu(struct rcu_head *rcu) | |||
357 | /* | 359 | /* |
358 | * Verify the local endpoint list is empty by this point. | 360 | * Verify the local endpoint list is empty by this point. |
359 | */ | 361 | */ |
360 | void __exit rxrpc_destroy_all_locals(void) | 362 | void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) |
361 | { | 363 | { |
362 | struct rxrpc_local *local; | 364 | struct rxrpc_local *local; |
363 | 365 | ||
@@ -365,15 +367,13 @@ void __exit rxrpc_destroy_all_locals(void) | |||
365 | 367 | ||
366 | flush_workqueue(rxrpc_workqueue); | 368 | flush_workqueue(rxrpc_workqueue); |
367 | 369 | ||
368 | if (!list_empty(&rxrpc_local_endpoints)) { | 370 | if (!list_empty(&rxnet->local_endpoints)) { |
369 | mutex_lock(&rxrpc_local_mutex); | 371 | mutex_lock(&rxnet->local_mutex); |
370 | list_for_each_entry(local, &rxrpc_local_endpoints, link) { | 372 | list_for_each_entry(local, &rxnet->local_endpoints, link) { |
371 | pr_err("AF_RXRPC: Leaked local %p {%d}\n", | 373 | pr_err("AF_RXRPC: Leaked local %p {%d}\n", |
372 | local, atomic_read(&local->usage)); | 374 | local, atomic_read(&local->usage)); |
373 | } | 375 | } |
374 | mutex_unlock(&rxrpc_local_mutex); | 376 | mutex_unlock(&rxnet->local_mutex); |
375 | BUG(); | 377 | BUG(); |
376 | } | 378 | } |
377 | |||
378 | rcu_barrier(); | ||
379 | } | 379 | } |
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c new file mode 100644 index 000000000000..26449a6bb076 --- /dev/null +++ b/net/rxrpc/net_ns.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* rxrpc network namespace handling. | ||
2 | * | ||
3 | * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/proc_fs.h> | ||
13 | #include "ar-internal.h" | ||
14 | |||
15 | unsigned int rxrpc_net_id; | ||
16 | |||
17 | /* | ||
18 | * Initialise a per-network namespace record. | ||
19 | */ | ||
20 | static __net_init int rxrpc_init_net(struct net *net) | ||
21 | { | ||
22 | struct rxrpc_net *rxnet = rxrpc_net(net); | ||
23 | int ret; | ||
24 | |||
25 | get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); | ||
26 | rxnet->epoch |= RXRPC_RANDOM_EPOCH; | ||
27 | |||
28 | INIT_LIST_HEAD(&rxnet->calls); | ||
29 | rwlock_init(&rxnet->call_lock); | ||
30 | |||
31 | INIT_LIST_HEAD(&rxnet->conn_proc_list); | ||
32 | INIT_LIST_HEAD(&rxnet->service_conns); | ||
33 | rwlock_init(&rxnet->conn_lock); | ||
34 | INIT_DELAYED_WORK(&rxnet->service_conn_reaper, | ||
35 | rxrpc_service_connection_reaper); | ||
36 | |||
37 | rxnet->nr_client_conns = 0; | ||
38 | rxnet->nr_active_client_conns = 0; | ||
39 | rxnet->kill_all_client_conns = false; | ||
40 | spin_lock_init(&rxnet->client_conn_cache_lock); | ||
41 | spin_lock_init(&rxnet->client_conn_discard_lock); | ||
42 | INIT_LIST_HEAD(&rxnet->waiting_client_conns); | ||
43 | INIT_LIST_HEAD(&rxnet->active_client_conns); | ||
44 | INIT_LIST_HEAD(&rxnet->idle_client_conns); | ||
45 | INIT_DELAYED_WORK(&rxnet->client_conn_reaper, | ||
46 | rxrpc_discard_expired_client_conns); | ||
47 | |||
48 | INIT_LIST_HEAD(&rxnet->local_endpoints); | ||
49 | mutex_init(&rxnet->local_mutex); | ||
50 | hash_init(rxnet->peer_hash); | ||
51 | spin_lock_init(&rxnet->peer_hash_lock); | ||
52 | |||
53 | ret = -ENOMEM; | ||
54 | rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); | ||
55 | if (!rxnet->proc_net) | ||
56 | goto err_proc; | ||
57 | |||
58 | proc_create("calls", 0444, rxnet->proc_net, &rxrpc_call_seq_fops); | ||
59 | proc_create("conns", 0444, rxnet->proc_net, &rxrpc_connection_seq_fops); | ||
60 | return 0; | ||
61 | |||
62 | proc_remove(rxnet->proc_net); | ||
63 | err_proc: | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Clean up a per-network namespace record. | ||
69 | */ | ||
70 | static __net_exit void rxrpc_exit_net(struct net *net) | ||
71 | { | ||
72 | struct rxrpc_net *rxnet = rxrpc_net(net); | ||
73 | |||
74 | rxrpc_destroy_all_calls(rxnet); | ||
75 | rxrpc_destroy_all_connections(rxnet); | ||
76 | rxrpc_destroy_all_locals(rxnet); | ||
77 | proc_remove(rxnet->proc_net); | ||
78 | } | ||
79 | |||
80 | struct pernet_operations rxrpc_net_ops = { | ||
81 | .init = rxrpc_init_net, | ||
82 | .exit = rxrpc_exit_net, | ||
83 | .id = &rxrpc_net_id, | ||
84 | .size = sizeof(struct rxrpc_net), | ||
85 | }; | ||
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 862eea6b266c..cfed3b27adf0 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c | |||
@@ -26,9 +26,6 @@ | |||
26 | #include <net/ip6_route.h> | 26 | #include <net/ip6_route.h> |
27 | #include "ar-internal.h" | 27 | #include "ar-internal.h" |
28 | 28 | ||
29 | static DEFINE_HASHTABLE(rxrpc_peer_hash, 10); | ||
30 | static DEFINE_SPINLOCK(rxrpc_peer_hash_lock); | ||
31 | |||
32 | /* | 29 | /* |
33 | * Hash a peer key. | 30 | * Hash a peer key. |
34 | */ | 31 | */ |
@@ -124,8 +121,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu( | |||
124 | unsigned long hash_key) | 121 | unsigned long hash_key) |
125 | { | 122 | { |
126 | struct rxrpc_peer *peer; | 123 | struct rxrpc_peer *peer; |
124 | struct rxrpc_net *rxnet = local->rxnet; | ||
127 | 125 | ||
128 | hash_for_each_possible_rcu(rxrpc_peer_hash, peer, hash_link, hash_key) { | 126 | hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { |
129 | if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) { | 127 | if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) { |
130 | if (atomic_read(&peer->usage) == 0) | 128 | if (atomic_read(&peer->usage) == 0) |
131 | return NULL; | 129 | return NULL; |
@@ -301,13 +299,14 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, | |||
301 | struct rxrpc_peer *prealloc) | 299 | struct rxrpc_peer *prealloc) |
302 | { | 300 | { |
303 | struct rxrpc_peer *peer; | 301 | struct rxrpc_peer *peer; |
302 | struct rxrpc_net *rxnet = local->rxnet; | ||
304 | unsigned long hash_key; | 303 | unsigned long hash_key; |
305 | 304 | ||
306 | hash_key = rxrpc_peer_hash_key(local, &prealloc->srx); | 305 | hash_key = rxrpc_peer_hash_key(local, &prealloc->srx); |
307 | prealloc->local = local; | 306 | prealloc->local = local; |
308 | rxrpc_init_peer(prealloc, hash_key); | 307 | rxrpc_init_peer(prealloc, hash_key); |
309 | 308 | ||
310 | spin_lock(&rxrpc_peer_hash_lock); | 309 | spin_lock(&rxnet->peer_hash_lock); |
311 | 310 | ||
312 | /* Need to check that we aren't racing with someone else */ | 311 | /* Need to check that we aren't racing with someone else */ |
313 | peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key); | 312 | peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key); |
@@ -315,10 +314,10 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, | |||
315 | peer = NULL; | 314 | peer = NULL; |
316 | if (!peer) { | 315 | if (!peer) { |
317 | peer = prealloc; | 316 | peer = prealloc; |
318 | hash_add_rcu(rxrpc_peer_hash, &peer->hash_link, hash_key); | 317 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); |
319 | } | 318 | } |
320 | 319 | ||
321 | spin_unlock(&rxrpc_peer_hash_lock); | 320 | spin_unlock(&rxnet->peer_hash_lock); |
322 | return peer; | 321 | return peer; |
323 | } | 322 | } |
324 | 323 | ||
@@ -329,6 +328,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | |||
329 | struct sockaddr_rxrpc *srx, gfp_t gfp) | 328 | struct sockaddr_rxrpc *srx, gfp_t gfp) |
330 | { | 329 | { |
331 | struct rxrpc_peer *peer, *candidate; | 330 | struct rxrpc_peer *peer, *candidate; |
331 | struct rxrpc_net *rxnet = local->rxnet; | ||
332 | unsigned long hash_key = rxrpc_peer_hash_key(local, srx); | 332 | unsigned long hash_key = rxrpc_peer_hash_key(local, srx); |
333 | 333 | ||
334 | _enter("{%pISp}", &srx->transport); | 334 | _enter("{%pISp}", &srx->transport); |
@@ -350,17 +350,17 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | |||
350 | return NULL; | 350 | return NULL; |
351 | } | 351 | } |
352 | 352 | ||
353 | spin_lock_bh(&rxrpc_peer_hash_lock); | 353 | spin_lock_bh(&rxnet->peer_hash_lock); |
354 | 354 | ||
355 | /* Need to check that we aren't racing with someone else */ | 355 | /* Need to check that we aren't racing with someone else */ |
356 | peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); | 356 | peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); |
357 | if (peer && !rxrpc_get_peer_maybe(peer)) | 357 | if (peer && !rxrpc_get_peer_maybe(peer)) |
358 | peer = NULL; | 358 | peer = NULL; |
359 | if (!peer) | 359 | if (!peer) |
360 | hash_add_rcu(rxrpc_peer_hash, | 360 | hash_add_rcu(rxnet->peer_hash, |
361 | &candidate->hash_link, hash_key); | 361 | &candidate->hash_link, hash_key); |
362 | 362 | ||
363 | spin_unlock_bh(&rxrpc_peer_hash_lock); | 363 | spin_unlock_bh(&rxnet->peer_hash_lock); |
364 | 364 | ||
365 | if (peer) | 365 | if (peer) |
366 | kfree(candidate); | 366 | kfree(candidate); |
@@ -379,11 +379,13 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | |||
379 | */ | 379 | */ |
380 | void __rxrpc_put_peer(struct rxrpc_peer *peer) | 380 | void __rxrpc_put_peer(struct rxrpc_peer *peer) |
381 | { | 381 | { |
382 | struct rxrpc_net *rxnet = peer->local->rxnet; | ||
383 | |||
382 | ASSERT(hlist_empty(&peer->error_targets)); | 384 | ASSERT(hlist_empty(&peer->error_targets)); |
383 | 385 | ||
384 | spin_lock_bh(&rxrpc_peer_hash_lock); | 386 | spin_lock_bh(&rxnet->peer_hash_lock); |
385 | hash_del_rcu(&peer->hash_link); | 387 | hash_del_rcu(&peer->hash_link); |
386 | spin_unlock_bh(&rxrpc_peer_hash_lock); | 388 | spin_unlock_bh(&rxnet->peer_hash_lock); |
387 | 389 | ||
388 | kfree_rcu(peer, rcu); | 390 | kfree_rcu(peer, rcu); |
389 | } | 391 | } |
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index b9bcfbfb095c..e92d8405b15a 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c | |||
@@ -30,19 +30,25 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { | |||
30 | */ | 30 | */ |
31 | static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) | 31 | static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) |
32 | { | 32 | { |
33 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); | ||
34 | |||
33 | rcu_read_lock(); | 35 | rcu_read_lock(); |
34 | read_lock(&rxrpc_call_lock); | 36 | read_lock(&rxnet->call_lock); |
35 | return seq_list_start_head(&rxrpc_calls, *_pos); | 37 | return seq_list_start_head(&rxnet->calls, *_pos); |
36 | } | 38 | } |
37 | 39 | ||
38 | static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 40 | static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
39 | { | 41 | { |
40 | return seq_list_next(v, &rxrpc_calls, pos); | 42 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
43 | |||
44 | return seq_list_next(v, &rxnet->calls, pos); | ||
41 | } | 45 | } |
42 | 46 | ||
43 | static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) | 47 | static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) |
44 | { | 48 | { |
45 | read_unlock(&rxrpc_call_lock); | 49 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
50 | |||
51 | read_unlock(&rxnet->call_lock); | ||
46 | rcu_read_unlock(); | 52 | rcu_read_unlock(); |
47 | } | 53 | } |
48 | 54 | ||
@@ -52,10 +58,11 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) | |||
52 | struct rxrpc_sock *rx; | 58 | struct rxrpc_sock *rx; |
53 | struct rxrpc_peer *peer; | 59 | struct rxrpc_peer *peer; |
54 | struct rxrpc_call *call; | 60 | struct rxrpc_call *call; |
61 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); | ||
55 | rxrpc_seq_t tx_hard_ack, rx_hard_ack; | 62 | rxrpc_seq_t tx_hard_ack, rx_hard_ack; |
56 | char lbuff[50], rbuff[50]; | 63 | char lbuff[50], rbuff[50]; |
57 | 64 | ||
58 | if (v == &rxrpc_calls) { | 65 | if (v == &rxnet->calls) { |
59 | seq_puts(seq, | 66 | seq_puts(seq, |
60 | "Proto Local " | 67 | "Proto Local " |
61 | " Remote " | 68 | " Remote " |
@@ -113,7 +120,8 @@ static const struct seq_operations rxrpc_call_seq_ops = { | |||
113 | 120 | ||
114 | static int rxrpc_call_seq_open(struct inode *inode, struct file *file) | 121 | static int rxrpc_call_seq_open(struct inode *inode, struct file *file) |
115 | { | 122 | { |
116 | return seq_open(file, &rxrpc_call_seq_ops); | 123 | return seq_open_net(inode, file, &rxrpc_call_seq_ops, |
124 | sizeof(struct seq_net_private)); | ||
117 | } | 125 | } |
118 | 126 | ||
119 | const struct file_operations rxrpc_call_seq_fops = { | 127 | const struct file_operations rxrpc_call_seq_fops = { |
@@ -129,27 +137,34 @@ const struct file_operations rxrpc_call_seq_fops = { | |||
129 | */ | 137 | */ |
130 | static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) | 138 | static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) |
131 | { | 139 | { |
132 | read_lock(&rxrpc_connection_lock); | 140 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
133 | return seq_list_start_head(&rxrpc_connection_proc_list, *_pos); | 141 | |
142 | read_lock(&rxnet->conn_lock); | ||
143 | return seq_list_start_head(&rxnet->conn_proc_list, *_pos); | ||
134 | } | 144 | } |
135 | 145 | ||
136 | static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, | 146 | static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, |
137 | loff_t *pos) | 147 | loff_t *pos) |
138 | { | 148 | { |
139 | return seq_list_next(v, &rxrpc_connection_proc_list, pos); | 149 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
150 | |||
151 | return seq_list_next(v, &rxnet->conn_proc_list, pos); | ||
140 | } | 152 | } |
141 | 153 | ||
142 | static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) | 154 | static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) |
143 | { | 155 | { |
144 | read_unlock(&rxrpc_connection_lock); | 156 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
157 | |||
158 | read_unlock(&rxnet->conn_lock); | ||
145 | } | 159 | } |
146 | 160 | ||
147 | static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) | 161 | static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) |
148 | { | 162 | { |
149 | struct rxrpc_connection *conn; | 163 | struct rxrpc_connection *conn; |
164 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); | ||
150 | char lbuff[50], rbuff[50]; | 165 | char lbuff[50], rbuff[50]; |
151 | 166 | ||
152 | if (v == &rxrpc_connection_proc_list) { | 167 | if (v == &rxnet->conn_proc_list) { |
153 | seq_puts(seq, | 168 | seq_puts(seq, |
154 | "Proto Local " | 169 | "Proto Local " |
155 | " Remote " | 170 | " Remote " |
@@ -197,7 +212,8 @@ static const struct seq_operations rxrpc_connection_seq_ops = { | |||
197 | 212 | ||
198 | static int rxrpc_connection_seq_open(struct inode *inode, struct file *file) | 213 | static int rxrpc_connection_seq_open(struct inode *inode, struct file *file) |
199 | { | 214 | { |
200 | return seq_open(file, &rxrpc_connection_seq_ops); | 215 | return seq_open_net(inode, file, &rxrpc_connection_seq_ops, |
216 | sizeof(struct seq_net_private)); | ||
201 | } | 217 | } |
202 | 218 | ||
203 | const struct file_operations rxrpc_connection_seq_fops = { | 219 | const struct file_operations rxrpc_connection_seq_fops = { |