diff options
Diffstat (limited to 'net/tipc')
39 files changed, 4415 insertions, 4063 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig index c890848f9d56..c25a3a149dc4 100644 --- a/net/tipc/Kconfig +++ b/net/tipc/Kconfig | |||
@@ -20,21 +20,17 @@ menuconfig TIPC | |||
20 | 20 | ||
21 | If in doubt, say N. | 21 | If in doubt, say N. |
22 | 22 | ||
23 | config TIPC_PORTS | ||
24 | int "Maximum number of ports in a node" | ||
25 | depends on TIPC | ||
26 | range 127 65535 | ||
27 | default "8191" | ||
28 | help | ||
29 | Specifies how many ports can be supported by a node. | ||
30 | Can range from 127 to 65535 ports; default is 8191. | ||
31 | |||
32 | Setting this to a smaller value saves some memory, | ||
33 | setting it to higher allows for more ports. | ||
34 | |||
35 | config TIPC_MEDIA_IB | 23 | config TIPC_MEDIA_IB |
36 | bool "InfiniBand media type support" | 24 | bool "InfiniBand media type support" |
37 | depends on TIPC && INFINIBAND_IPOIB | 25 | depends on TIPC && INFINIBAND_IPOIB |
38 | help | 26 | help |
39 | Saying Y here will enable support for running TIPC on | 27 | Saying Y here will enable support for running TIPC on |
40 | IP-over-InfiniBand devices. | 28 | IP-over-InfiniBand devices. |
29 | config TIPC_MEDIA_UDP | ||
30 | bool "IP/UDP media type support" | ||
31 | depends on TIPC | ||
32 | select NET_UDP_TUNNEL | ||
33 | help | ||
34 | Saying Y here will enable support for running TIPC over IP/UDP | ||
35 | bool | ||
36 | default y | ||
diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 333e4592772c..57e460be4692 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile | |||
@@ -4,11 +4,12 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_TIPC) := tipc.o | 5 | obj-$(CONFIG_TIPC) := tipc.o |
6 | 6 | ||
7 | tipc-y += addr.o bcast.o bearer.o config.o \ | 7 | tipc-y += addr.o bcast.o bearer.o \ |
8 | core.o link.o discover.o msg.o \ | 8 | core.o link.o discover.o msg.o \ |
9 | name_distr.o subscr.o name_table.o net.o \ | 9 | name_distr.o subscr.o name_table.o net.o \ |
10 | netlink.o node.o socket.o log.o eth_media.o \ | 10 | netlink.o netlink_compat.o node.o socket.o eth_media.o \ |
11 | server.o | 11 | server.o socket.o |
12 | 12 | ||
13 | tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o | ||
13 | tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o | 14 | tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o |
14 | tipc-$(CONFIG_SYSCTL) += sysctl.o | 15 | tipc-$(CONFIG_SYSCTL) += sysctl.o |
diff --git a/net/tipc/addr.c b/net/tipc/addr.c index 357b74b26f9e..ba7daa864d44 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c | |||
@@ -34,8 +34,58 @@ | |||
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "core.h" | 37 | #include <linux/kernel.h> |
38 | #include "addr.h" | 38 | #include "addr.h" |
39 | #include "core.h" | ||
40 | |||
41 | u32 tipc_own_addr(struct net *net) | ||
42 | { | ||
43 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
44 | |||
45 | return tn->own_addr; | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * in_own_cluster - test for cluster inclusion; <0.0.0> always matches | ||
50 | */ | ||
51 | int in_own_cluster(struct net *net, u32 addr) | ||
52 | { | ||
53 | return in_own_cluster_exact(net, addr) || !addr; | ||
54 | } | ||
55 | |||
56 | int in_own_cluster_exact(struct net *net, u32 addr) | ||
57 | { | ||
58 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
59 | |||
60 | return !((addr ^ tn->own_addr) >> 12); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * in_own_node - test for node inclusion; <0.0.0> always matches | ||
65 | */ | ||
66 | int in_own_node(struct net *net, u32 addr) | ||
67 | { | ||
68 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
69 | |||
70 | return (addr == tn->own_addr) || !addr; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * addr_domain - convert 2-bit scope value to equivalent message lookup domain | ||
75 | * | ||
76 | * Needed when address of a named message must be looked up a second time | ||
77 | * after a network hop. | ||
78 | */ | ||
79 | u32 addr_domain(struct net *net, u32 sc) | ||
80 | { | ||
81 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
82 | |||
83 | if (likely(sc == TIPC_NODE_SCOPE)) | ||
84 | return tn->own_addr; | ||
85 | if (sc == TIPC_CLUSTER_SCOPE) | ||
86 | return tipc_cluster_mask(tn->own_addr); | ||
87 | return tipc_zone_mask(tn->own_addr); | ||
88 | } | ||
39 | 89 | ||
40 | /** | 90 | /** |
41 | * tipc_addr_domain_valid - validates a network domain address | 91 | * tipc_addr_domain_valid - validates a network domain address |
diff --git a/net/tipc/addr.h b/net/tipc/addr.h index a74acf9ee804..7ba6d5c8ae40 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h | |||
@@ -37,7 +37,10 @@ | |||
37 | #ifndef _TIPC_ADDR_H | 37 | #ifndef _TIPC_ADDR_H |
38 | #define _TIPC_ADDR_H | 38 | #define _TIPC_ADDR_H |
39 | 39 | ||
40 | #include "core.h" | 40 | #include <linux/types.h> |
41 | #include <linux/tipc.h> | ||
42 | #include <net/net_namespace.h> | ||
43 | #include <net/netns/generic.h> | ||
41 | 44 | ||
42 | #define TIPC_ZONE_MASK 0xff000000u | 45 | #define TIPC_ZONE_MASK 0xff000000u |
43 | #define TIPC_CLUSTER_MASK 0xfffff000u | 46 | #define TIPC_CLUSTER_MASK 0xfffff000u |
@@ -52,42 +55,11 @@ static inline u32 tipc_cluster_mask(u32 addr) | |||
52 | return addr & TIPC_CLUSTER_MASK; | 55 | return addr & TIPC_CLUSTER_MASK; |
53 | } | 56 | } |
54 | 57 | ||
55 | static inline int in_own_cluster_exact(u32 addr) | 58 | u32 tipc_own_addr(struct net *net); |
56 | { | 59 | int in_own_cluster(struct net *net, u32 addr); |
57 | return !((addr ^ tipc_own_addr) >> 12); | 60 | int in_own_cluster_exact(struct net *net, u32 addr); |
58 | } | 61 | int in_own_node(struct net *net, u32 addr); |
59 | 62 | u32 addr_domain(struct net *net, u32 sc); | |
60 | /** | ||
61 | * in_own_node - test for node inclusion; <0.0.0> always matches | ||
62 | */ | ||
63 | static inline int in_own_node(u32 addr) | ||
64 | { | ||
65 | return (addr == tipc_own_addr) || !addr; | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * in_own_cluster - test for cluster inclusion; <0.0.0> always matches | ||
70 | */ | ||
71 | static inline int in_own_cluster(u32 addr) | ||
72 | { | ||
73 | return in_own_cluster_exact(addr) || !addr; | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * addr_domain - convert 2-bit scope value to equivalent message lookup domain | ||
78 | * | ||
79 | * Needed when address of a named message must be looked up a second time | ||
80 | * after a network hop. | ||
81 | */ | ||
82 | static inline u32 addr_domain(u32 sc) | ||
83 | { | ||
84 | if (likely(sc == TIPC_NODE_SCOPE)) | ||
85 | return tipc_own_addr; | ||
86 | if (sc == TIPC_CLUSTER_SCOPE) | ||
87 | return tipc_cluster_mask(tipc_own_addr); | ||
88 | return tipc_zone_mask(tipc_own_addr); | ||
89 | } | ||
90 | |||
91 | int tipc_addr_domain_valid(u32); | 63 | int tipc_addr_domain_valid(u32); |
92 | int tipc_addr_node_valid(u32 addr); | 64 | int tipc_addr_node_valid(u32 addr); |
93 | int tipc_in_scope(u32 domain, u32 addr); | 65 | int tipc_in_scope(u32 domain, u32 addr); |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index a9e174fc0f91..c5cbdcb1f0b5 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/bcast.c: TIPC broadcast code | 2 | * net/tipc/bcast.c: TIPC broadcast code |
3 | * | 3 | * |
4 | * Copyright (c) 2004-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2004-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2004, Intel Corporation. | 5 | * Copyright (c) 2004, Intel Corporation. |
6 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 6 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
7 | * All rights reserved. | 7 | * All rights reserved. |
@@ -35,77 +35,14 @@ | |||
35 | * POSSIBILITY OF SUCH DAMAGE. | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include "core.h" | ||
39 | #include "link.h" | ||
40 | #include "socket.h" | 38 | #include "socket.h" |
41 | #include "msg.h" | 39 | #include "msg.h" |
42 | #include "bcast.h" | 40 | #include "bcast.h" |
43 | #include "name_distr.h" | 41 | #include "name_distr.h" |
42 | #include "core.h" | ||
44 | 43 | ||
45 | #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ | 44 | #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ |
46 | #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ | 45 | #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ |
47 | #define BCBEARER MAX_BEARERS | ||
48 | |||
49 | /** | ||
50 | * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link | ||
51 | * @primary: pointer to primary bearer | ||
52 | * @secondary: pointer to secondary bearer | ||
53 | * | ||
54 | * Bearers must have same priority and same set of reachable destinations | ||
55 | * to be paired. | ||
56 | */ | ||
57 | |||
58 | struct tipc_bcbearer_pair { | ||
59 | struct tipc_bearer *primary; | ||
60 | struct tipc_bearer *secondary; | ||
61 | }; | ||
62 | |||
63 | /** | ||
64 | * struct tipc_bcbearer - bearer used by broadcast link | ||
65 | * @bearer: (non-standard) broadcast bearer structure | ||
66 | * @media: (non-standard) broadcast media structure | ||
67 | * @bpairs: array of bearer pairs | ||
68 | * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() | ||
69 | * @remains: temporary node map used by tipc_bcbearer_send() | ||
70 | * @remains_new: temporary node map used tipc_bcbearer_send() | ||
71 | * | ||
72 | * Note: The fields labelled "temporary" are incorporated into the bearer | ||
73 | * to avoid consuming potentially limited stack space through the use of | ||
74 | * large local variables within multicast routines. Concurrent access is | ||
75 | * prevented through use of the spinlock "bclink_lock". | ||
76 | */ | ||
77 | struct tipc_bcbearer { | ||
78 | struct tipc_bearer bearer; | ||
79 | struct tipc_media media; | ||
80 | struct tipc_bcbearer_pair bpairs[MAX_BEARERS]; | ||
81 | struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; | ||
82 | struct tipc_node_map remains; | ||
83 | struct tipc_node_map remains_new; | ||
84 | }; | ||
85 | |||
86 | /** | ||
87 | * struct tipc_bclink - link used for broadcast messages | ||
88 | * @lock: spinlock governing access to structure | ||
89 | * @link: (non-standard) broadcast link structure | ||
90 | * @node: (non-standard) node structure representing b'cast link's peer node | ||
91 | * @flags: represent bclink states | ||
92 | * @bcast_nodes: map of broadcast-capable nodes | ||
93 | * @retransmit_to: node that most recently requested a retransmit | ||
94 | * | ||
95 | * Handles sequence numbering, fragmentation, bundling, etc. | ||
96 | */ | ||
97 | struct tipc_bclink { | ||
98 | spinlock_t lock; | ||
99 | struct tipc_link link; | ||
100 | struct tipc_node node; | ||
101 | unsigned int flags; | ||
102 | struct tipc_node_map bcast_nodes; | ||
103 | struct tipc_node *retransmit_to; | ||
104 | }; | ||
105 | |||
106 | static struct tipc_bcbearer *bcbearer; | ||
107 | static struct tipc_bclink *bclink; | ||
108 | static struct tipc_link *bcl; | ||
109 | 46 | ||
110 | const char tipc_bclink_name[] = "broadcast-link"; | 47 | const char tipc_bclink_name[] = "broadcast-link"; |
111 | 48 | ||
@@ -115,28 +52,25 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a, | |||
115 | static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); | 52 | static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); |
116 | static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); | 53 | static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); |
117 | 54 | ||
118 | static void tipc_bclink_lock(void) | 55 | static void tipc_bclink_lock(struct net *net) |
119 | { | 56 | { |
120 | spin_lock_bh(&bclink->lock); | 57 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
58 | |||
59 | spin_lock_bh(&tn->bclink->lock); | ||
121 | } | 60 | } |
122 | 61 | ||
123 | static void tipc_bclink_unlock(void) | 62 | static void tipc_bclink_unlock(struct net *net) |
124 | { | 63 | { |
125 | struct tipc_node *node = NULL; | 64 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
126 | 65 | ||
127 | if (likely(!bclink->flags)) { | 66 | spin_unlock_bh(&tn->bclink->lock); |
128 | spin_unlock_bh(&bclink->lock); | 67 | } |
129 | return; | ||
130 | } | ||
131 | 68 | ||
132 | if (bclink->flags & TIPC_BCLINK_RESET) { | 69 | void tipc_bclink_input(struct net *net) |
133 | bclink->flags &= ~TIPC_BCLINK_RESET; | 70 | { |
134 | node = tipc_bclink_retransmit_to(); | 71 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
135 | } | ||
136 | spin_unlock_bh(&bclink->lock); | ||
137 | 72 | ||
138 | if (node) | 73 | tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq); |
139 | tipc_link_reset_all(node); | ||
140 | } | 74 | } |
141 | 75 | ||
142 | uint tipc_bclink_get_mtu(void) | 76 | uint tipc_bclink_get_mtu(void) |
@@ -144,11 +78,6 @@ uint tipc_bclink_get_mtu(void) | |||
144 | return MAX_PKT_DEFAULT_MCAST; | 78 | return MAX_PKT_DEFAULT_MCAST; |
145 | } | 79 | } |
146 | 80 | ||
147 | void tipc_bclink_set_flags(unsigned int flags) | ||
148 | { | ||
149 | bclink->flags |= flags; | ||
150 | } | ||
151 | |||
152 | static u32 bcbuf_acks(struct sk_buff *buf) | 81 | static u32 bcbuf_acks(struct sk_buff *buf) |
153 | { | 82 | { |
154 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; | 83 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; |
@@ -164,31 +93,41 @@ static void bcbuf_decr_acks(struct sk_buff *buf) | |||
164 | bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); | 93 | bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); |
165 | } | 94 | } |
166 | 95 | ||
167 | void tipc_bclink_add_node(u32 addr) | 96 | void tipc_bclink_add_node(struct net *net, u32 addr) |
168 | { | 97 | { |
169 | tipc_bclink_lock(); | 98 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
170 | tipc_nmap_add(&bclink->bcast_nodes, addr); | 99 | |
171 | tipc_bclink_unlock(); | 100 | tipc_bclink_lock(net); |
101 | tipc_nmap_add(&tn->bclink->bcast_nodes, addr); | ||
102 | tipc_bclink_unlock(net); | ||
172 | } | 103 | } |
173 | 104 | ||
174 | void tipc_bclink_remove_node(u32 addr) | 105 | void tipc_bclink_remove_node(struct net *net, u32 addr) |
175 | { | 106 | { |
176 | tipc_bclink_lock(); | 107 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
177 | tipc_nmap_remove(&bclink->bcast_nodes, addr); | 108 | |
178 | tipc_bclink_unlock(); | 109 | tipc_bclink_lock(net); |
110 | tipc_nmap_remove(&tn->bclink->bcast_nodes, addr); | ||
111 | tipc_bclink_unlock(net); | ||
179 | } | 112 | } |
180 | 113 | ||
181 | static void bclink_set_last_sent(void) | 114 | static void bclink_set_last_sent(struct net *net) |
182 | { | 115 | { |
183 | if (bcl->next_out) | 116 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
184 | bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); | 117 | struct tipc_link *bcl = tn->bcl; |
118 | struct sk_buff *skb = skb_peek(&bcl->backlogq); | ||
119 | |||
120 | if (skb) | ||
121 | bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1); | ||
185 | else | 122 | else |
186 | bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); | 123 | bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); |
187 | } | 124 | } |
188 | 125 | ||
189 | u32 tipc_bclink_get_last_sent(void) | 126 | u32 tipc_bclink_get_last_sent(struct net *net) |
190 | { | 127 | { |
191 | return bcl->fsm_msg_cnt; | 128 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
129 | |||
130 | return tn->bcl->fsm_msg_cnt; | ||
192 | } | 131 | } |
193 | 132 | ||
194 | static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) | 133 | static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) |
@@ -197,15 +136,16 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) | |||
197 | seqno : node->bclink.last_sent; | 136 | seqno : node->bclink.last_sent; |
198 | } | 137 | } |
199 | 138 | ||
200 | |||
201 | /** | 139 | /** |
202 | * tipc_bclink_retransmit_to - get most recent node to request retransmission | 140 | * tipc_bclink_retransmit_to - get most recent node to request retransmission |
203 | * | 141 | * |
204 | * Called with bclink_lock locked | 142 | * Called with bclink_lock locked |
205 | */ | 143 | */ |
206 | struct tipc_node *tipc_bclink_retransmit_to(void) | 144 | struct tipc_node *tipc_bclink_retransmit_to(struct net *net) |
207 | { | 145 | { |
208 | return bclink->retransmit_to; | 146 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
147 | |||
148 | return tn->bclink->retransmit_to; | ||
209 | } | 149 | } |
210 | 150 | ||
211 | /** | 151 | /** |
@@ -215,11 +155,12 @@ struct tipc_node *tipc_bclink_retransmit_to(void) | |||
215 | * | 155 | * |
216 | * Called with bclink_lock locked | 156 | * Called with bclink_lock locked |
217 | */ | 157 | */ |
218 | static void bclink_retransmit_pkt(u32 after, u32 to) | 158 | static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to) |
219 | { | 159 | { |
220 | struct sk_buff *skb; | 160 | struct sk_buff *skb; |
161 | struct tipc_link *bcl = tn->bcl; | ||
221 | 162 | ||
222 | skb_queue_walk(&bcl->outqueue, skb) { | 163 | skb_queue_walk(&bcl->transmq, skb) { |
223 | if (more(buf_seqno(skb), after)) { | 164 | if (more(buf_seqno(skb), after)) { |
224 | tipc_link_retransmit(bcl, skb, mod(to - after)); | 165 | tipc_link_retransmit(bcl, skb, mod(to - after)); |
225 | break; | 166 | break; |
@@ -232,13 +173,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to) | |||
232 | * | 173 | * |
233 | * Called with no locks taken | 174 | * Called with no locks taken |
234 | */ | 175 | */ |
235 | void tipc_bclink_wakeup_users(void) | 176 | void tipc_bclink_wakeup_users(struct net *net) |
236 | { | 177 | { |
237 | struct sk_buff *skb; | 178 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
238 | |||
239 | while ((skb = skb_dequeue(&bclink->link.waiting_sks))) | ||
240 | tipc_sk_rcv(skb); | ||
241 | 179 | ||
180 | tipc_sk_rcv(net, &tn->bclink->link.wakeupq); | ||
242 | } | 181 | } |
243 | 182 | ||
244 | /** | 183 | /** |
@@ -251,12 +190,17 @@ void tipc_bclink_wakeup_users(void) | |||
251 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | 190 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) |
252 | { | 191 | { |
253 | struct sk_buff *skb, *tmp; | 192 | struct sk_buff *skb, *tmp; |
254 | struct sk_buff *next; | ||
255 | unsigned int released = 0; | 193 | unsigned int released = 0; |
194 | struct net *net = n_ptr->net; | ||
195 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
196 | |||
197 | if (unlikely(!n_ptr->bclink.recv_permitted)) | ||
198 | return; | ||
199 | |||
200 | tipc_bclink_lock(net); | ||
256 | 201 | ||
257 | tipc_bclink_lock(); | ||
258 | /* Bail out if tx queue is empty (no clean up is required) */ | 202 | /* Bail out if tx queue is empty (no clean up is required) */ |
259 | skb = skb_peek(&bcl->outqueue); | 203 | skb = skb_peek(&tn->bcl->transmq); |
260 | if (!skb) | 204 | if (!skb) |
261 | goto exit; | 205 | goto exit; |
262 | 206 | ||
@@ -267,43 +211,35 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | |||
267 | * acknowledge sent messages only (if other nodes still exist) | 211 | * acknowledge sent messages only (if other nodes still exist) |
268 | * or both sent and unsent messages (otherwise) | 212 | * or both sent and unsent messages (otherwise) |
269 | */ | 213 | */ |
270 | if (bclink->bcast_nodes.count) | 214 | if (tn->bclink->bcast_nodes.count) |
271 | acked = bcl->fsm_msg_cnt; | 215 | acked = tn->bcl->fsm_msg_cnt; |
272 | else | 216 | else |
273 | acked = bcl->next_out_no; | 217 | acked = tn->bcl->next_out_no; |
274 | } else { | 218 | } else { |
275 | /* | 219 | /* |
276 | * Bail out if specified sequence number does not correspond | 220 | * Bail out if specified sequence number does not correspond |
277 | * to a message that has been sent and not yet acknowledged | 221 | * to a message that has been sent and not yet acknowledged |
278 | */ | 222 | */ |
279 | if (less(acked, buf_seqno(skb)) || | 223 | if (less(acked, buf_seqno(skb)) || |
280 | less(bcl->fsm_msg_cnt, acked) || | 224 | less(tn->bcl->fsm_msg_cnt, acked) || |
281 | less_eq(acked, n_ptr->bclink.acked)) | 225 | less_eq(acked, n_ptr->bclink.acked)) |
282 | goto exit; | 226 | goto exit; |
283 | } | 227 | } |
284 | 228 | ||
285 | /* Skip over packets that node has previously acknowledged */ | 229 | /* Skip over packets that node has previously acknowledged */ |
286 | skb_queue_walk(&bcl->outqueue, skb) { | 230 | skb_queue_walk(&tn->bcl->transmq, skb) { |
287 | if (more(buf_seqno(skb), n_ptr->bclink.acked)) | 231 | if (more(buf_seqno(skb), n_ptr->bclink.acked)) |
288 | break; | 232 | break; |
289 | } | 233 | } |
290 | 234 | ||
291 | /* Update packets that node is now acknowledging */ | 235 | /* Update packets that node is now acknowledging */ |
292 | skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) { | 236 | skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) { |
293 | if (more(buf_seqno(skb), acked)) | 237 | if (more(buf_seqno(skb), acked)) |
294 | break; | 238 | break; |
295 | 239 | bcbuf_decr_acks(skb); | |
296 | next = tipc_skb_queue_next(&bcl->outqueue, skb); | 240 | bclink_set_last_sent(net); |
297 | if (skb != bcl->next_out) { | ||
298 | bcbuf_decr_acks(skb); | ||
299 | } else { | ||
300 | bcbuf_set_acks(skb, 0); | ||
301 | bcl->next_out = next; | ||
302 | bclink_set_last_sent(); | ||
303 | } | ||
304 | |||
305 | if (bcbuf_acks(skb) == 0) { | 241 | if (bcbuf_acks(skb) == 0) { |
306 | __skb_unlink(skb, &bcl->outqueue); | 242 | __skb_unlink(skb, &tn->bcl->transmq); |
307 | kfree_skb(skb); | 243 | kfree_skb(skb); |
308 | released = 1; | 244 | released = 1; |
309 | } | 245 | } |
@@ -311,15 +247,14 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | |||
311 | n_ptr->bclink.acked = acked; | 247 | n_ptr->bclink.acked = acked; |
312 | 248 | ||
313 | /* Try resolving broadcast link congestion, if necessary */ | 249 | /* Try resolving broadcast link congestion, if necessary */ |
314 | if (unlikely(bcl->next_out)) { | 250 | if (unlikely(skb_peek(&tn->bcl->backlogq))) { |
315 | tipc_link_push_packets(bcl); | 251 | tipc_link_push_packets(tn->bcl); |
316 | bclink_set_last_sent(); | 252 | bclink_set_last_sent(net); |
317 | } | 253 | } |
318 | if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks))) | 254 | if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq))) |
319 | n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS; | 255 | n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS; |
320 | |||
321 | exit: | 256 | exit: |
322 | tipc_bclink_unlock(); | 257 | tipc_bclink_unlock(net); |
323 | } | 258 | } |
324 | 259 | ||
325 | /** | 260 | /** |
@@ -327,9 +262,12 @@ exit: | |||
327 | * | 262 | * |
328 | * RCU and node lock set | 263 | * RCU and node lock set |
329 | */ | 264 | */ |
330 | void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) | 265 | void tipc_bclink_update_link_state(struct tipc_node *n_ptr, |
266 | u32 last_sent) | ||
331 | { | 267 | { |
332 | struct sk_buff *buf; | 268 | struct sk_buff *buf; |
269 | struct net *net = n_ptr->net; | ||
270 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
333 | 271 | ||
334 | /* Ignore "stale" link state info */ | 272 | /* Ignore "stale" link state info */ |
335 | if (less_eq(last_sent, n_ptr->bclink.last_in)) | 273 | if (less_eq(last_sent, n_ptr->bclink.last_in)) |
@@ -356,21 +294,21 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) | |||
356 | buf = tipc_buf_acquire(INT_H_SIZE); | 294 | buf = tipc_buf_acquire(INT_H_SIZE); |
357 | if (buf) { | 295 | if (buf) { |
358 | struct tipc_msg *msg = buf_msg(buf); | 296 | struct tipc_msg *msg = buf_msg(buf); |
359 | struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); | 297 | struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq); |
360 | u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; | 298 | u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; |
361 | 299 | ||
362 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, | 300 | tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG, |
363 | INT_H_SIZE, n_ptr->addr); | 301 | INT_H_SIZE, n_ptr->addr); |
364 | msg_set_non_seq(msg, 1); | 302 | msg_set_non_seq(msg, 1); |
365 | msg_set_mc_netid(msg, tipc_net_id); | 303 | msg_set_mc_netid(msg, tn->net_id); |
366 | msg_set_bcast_ack(msg, n_ptr->bclink.last_in); | 304 | msg_set_bcast_ack(msg, n_ptr->bclink.last_in); |
367 | msg_set_bcgap_after(msg, n_ptr->bclink.last_in); | 305 | msg_set_bcgap_after(msg, n_ptr->bclink.last_in); |
368 | msg_set_bcgap_to(msg, to); | 306 | msg_set_bcgap_to(msg, to); |
369 | 307 | ||
370 | tipc_bclink_lock(); | 308 | tipc_bclink_lock(net); |
371 | tipc_bearer_send(MAX_BEARERS, buf, NULL); | 309 | tipc_bearer_send(net, MAX_BEARERS, buf, NULL); |
372 | bcl->stats.sent_nacks++; | 310 | tn->bcl->stats.sent_nacks++; |
373 | tipc_bclink_unlock(); | 311 | tipc_bclink_unlock(net); |
374 | kfree_skb(buf); | 312 | kfree_skb(buf); |
375 | 313 | ||
376 | n_ptr->bclink.oos_state++; | 314 | n_ptr->bclink.oos_state++; |
@@ -383,34 +321,39 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) | |||
383 | * Delay any upcoming NACK by this node if another node has already | 321 | * Delay any upcoming NACK by this node if another node has already |
384 | * requested the first message this node is going to ask for. | 322 | * requested the first message this node is going to ask for. |
385 | */ | 323 | */ |
386 | static void bclink_peek_nack(struct tipc_msg *msg) | 324 | static void bclink_peek_nack(struct net *net, struct tipc_msg *msg) |
387 | { | 325 | { |
388 | struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); | 326 | struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg)); |
389 | 327 | ||
390 | if (unlikely(!n_ptr)) | 328 | if (unlikely(!n_ptr)) |
391 | return; | 329 | return; |
392 | 330 | ||
393 | tipc_node_lock(n_ptr); | 331 | tipc_node_lock(n_ptr); |
394 | |||
395 | if (n_ptr->bclink.recv_permitted && | 332 | if (n_ptr->bclink.recv_permitted && |
396 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && | 333 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && |
397 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) | 334 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) |
398 | n_ptr->bclink.oos_state = 2; | 335 | n_ptr->bclink.oos_state = 2; |
399 | |||
400 | tipc_node_unlock(n_ptr); | 336 | tipc_node_unlock(n_ptr); |
337 | tipc_node_put(n_ptr); | ||
401 | } | 338 | } |
402 | 339 | ||
403 | /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster | 340 | /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster |
404 | * and to identified node local sockets | 341 | * and to identified node local sockets |
342 | * @net: the applicable net namespace | ||
405 | * @list: chain of buffers containing message | 343 | * @list: chain of buffers containing message |
406 | * Consumes the buffer chain, except when returning -ELINKCONG | 344 | * Consumes the buffer chain, except when returning -ELINKCONG |
407 | * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE | 345 | * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE |
408 | */ | 346 | */ |
409 | int tipc_bclink_xmit(struct sk_buff_head *list) | 347 | int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list) |
410 | { | 348 | { |
349 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
350 | struct tipc_link *bcl = tn->bcl; | ||
351 | struct tipc_bclink *bclink = tn->bclink; | ||
411 | int rc = 0; | 352 | int rc = 0; |
412 | int bc = 0; | 353 | int bc = 0; |
413 | struct sk_buff *skb; | 354 | struct sk_buff *skb; |
355 | struct sk_buff_head arrvq; | ||
356 | struct sk_buff_head inputq; | ||
414 | 357 | ||
415 | /* Prepare clone of message for local node */ | 358 | /* Prepare clone of message for local node */ |
416 | skb = tipc_msg_reassemble(list); | 359 | skb = tipc_msg_reassemble(list); |
@@ -418,33 +361,35 @@ int tipc_bclink_xmit(struct sk_buff_head *list) | |||
418 | __skb_queue_purge(list); | 361 | __skb_queue_purge(list); |
419 | return -EHOSTUNREACH; | 362 | return -EHOSTUNREACH; |
420 | } | 363 | } |
421 | 364 | /* Broadcast to all nodes */ | |
422 | /* Broadcast to all other nodes */ | ||
423 | if (likely(bclink)) { | 365 | if (likely(bclink)) { |
424 | tipc_bclink_lock(); | 366 | tipc_bclink_lock(net); |
425 | if (likely(bclink->bcast_nodes.count)) { | 367 | if (likely(bclink->bcast_nodes.count)) { |
426 | rc = __tipc_link_xmit(bcl, list); | 368 | rc = __tipc_link_xmit(net, bcl, list); |
427 | if (likely(!rc)) { | 369 | if (likely(!rc)) { |
428 | u32 len = skb_queue_len(&bcl->outqueue); | 370 | u32 len = skb_queue_len(&bcl->transmq); |
429 | 371 | ||
430 | bclink_set_last_sent(); | 372 | bclink_set_last_sent(net); |
431 | bcl->stats.queue_sz_counts++; | 373 | bcl->stats.queue_sz_counts++; |
432 | bcl->stats.accu_queue_sz += len; | 374 | bcl->stats.accu_queue_sz += len; |
433 | } | 375 | } |
434 | bc = 1; | 376 | bc = 1; |
435 | } | 377 | } |
436 | tipc_bclink_unlock(); | 378 | tipc_bclink_unlock(net); |
437 | } | 379 | } |
438 | 380 | ||
439 | if (unlikely(!bc)) | 381 | if (unlikely(!bc)) |
440 | __skb_queue_purge(list); | 382 | __skb_queue_purge(list); |
441 | 383 | ||
442 | /* Deliver message clone */ | 384 | if (unlikely(rc)) { |
443 | if (likely(!rc)) | ||
444 | tipc_sk_mcast_rcv(skb); | ||
445 | else | ||
446 | kfree_skb(skb); | 385 | kfree_skb(skb); |
447 | 386 | return rc; | |
387 | } | ||
388 | /* Deliver message clone */ | ||
389 | __skb_queue_head_init(&arrvq); | ||
390 | skb_queue_head_init(&inputq); | ||
391 | __skb_queue_tail(&arrvq, skb); | ||
392 | tipc_sk_mcast_rcv(net, &arrvq, &inputq); | ||
448 | return rc; | 393 | return rc; |
449 | } | 394 | } |
450 | 395 | ||
@@ -455,19 +400,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list) | |||
455 | */ | 400 | */ |
456 | static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) | 401 | static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) |
457 | { | 402 | { |
403 | struct tipc_net *tn = net_generic(node->net, tipc_net_id); | ||
404 | |||
458 | bclink_update_last_sent(node, seqno); | 405 | bclink_update_last_sent(node, seqno); |
459 | node->bclink.last_in = seqno; | 406 | node->bclink.last_in = seqno; |
460 | node->bclink.oos_state = 0; | 407 | node->bclink.oos_state = 0; |
461 | bcl->stats.recv_info++; | 408 | tn->bcl->stats.recv_info++; |
462 | 409 | ||
463 | /* | 410 | /* |
464 | * Unicast an ACK periodically, ensuring that | 411 | * Unicast an ACK periodically, ensuring that |
465 | * all nodes in the cluster don't ACK at the same time | 412 | * all nodes in the cluster don't ACK at the same time |
466 | */ | 413 | */ |
467 | if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { | 414 | if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { |
468 | tipc_link_proto_xmit(node->active_links[node->addr & 1], | 415 | tipc_link_proto_xmit(node->active_links[node->addr & 1], |
469 | STATE_MSG, 0, 0, 0, 0, 0); | 416 | STATE_MSG, 0, 0, 0, 0); |
470 | bcl->stats.sent_acks++; | 417 | tn->bcl->stats.sent_acks++; |
471 | } | 418 | } |
472 | } | 419 | } |
473 | 420 | ||
@@ -476,19 +423,24 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) | |||
476 | * | 423 | * |
477 | * RCU is locked, no other locks set | 424 | * RCU is locked, no other locks set |
478 | */ | 425 | */ |
479 | void tipc_bclink_rcv(struct sk_buff *buf) | 426 | void tipc_bclink_rcv(struct net *net, struct sk_buff *buf) |
480 | { | 427 | { |
428 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
429 | struct tipc_link *bcl = tn->bcl; | ||
481 | struct tipc_msg *msg = buf_msg(buf); | 430 | struct tipc_msg *msg = buf_msg(buf); |
482 | struct tipc_node *node; | 431 | struct tipc_node *node; |
483 | u32 next_in; | 432 | u32 next_in; |
484 | u32 seqno; | 433 | u32 seqno; |
485 | int deferred = 0; | 434 | int deferred = 0; |
435 | int pos = 0; | ||
436 | struct sk_buff *iskb; | ||
437 | struct sk_buff_head *arrvq, *inputq; | ||
486 | 438 | ||
487 | /* Screen out unwanted broadcast messages */ | 439 | /* Screen out unwanted broadcast messages */ |
488 | if (msg_mc_netid(msg) != tipc_net_id) | 440 | if (msg_mc_netid(msg) != tn->net_id) |
489 | goto exit; | 441 | goto exit; |
490 | 442 | ||
491 | node = tipc_node_find(msg_prevnode(msg)); | 443 | node = tipc_node_find(net, msg_prevnode(msg)); |
492 | if (unlikely(!node)) | 444 | if (unlikely(!node)) |
493 | goto exit; | 445 | goto exit; |
494 | 446 | ||
@@ -500,71 +452,76 @@ void tipc_bclink_rcv(struct sk_buff *buf) | |||
500 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { | 452 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { |
501 | if (msg_type(msg) != STATE_MSG) | 453 | if (msg_type(msg) != STATE_MSG) |
502 | goto unlock; | 454 | goto unlock; |
503 | if (msg_destnode(msg) == tipc_own_addr) { | 455 | if (msg_destnode(msg) == tn->own_addr) { |
504 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); | 456 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); |
505 | tipc_node_unlock(node); | 457 | tipc_bclink_lock(net); |
506 | tipc_bclink_lock(); | ||
507 | bcl->stats.recv_nacks++; | 458 | bcl->stats.recv_nacks++; |
508 | bclink->retransmit_to = node; | 459 | tn->bclink->retransmit_to = node; |
509 | bclink_retransmit_pkt(msg_bcgap_after(msg), | 460 | bclink_retransmit_pkt(tn, msg_bcgap_after(msg), |
510 | msg_bcgap_to(msg)); | 461 | msg_bcgap_to(msg)); |
511 | tipc_bclink_unlock(); | 462 | tipc_bclink_unlock(net); |
463 | tipc_node_unlock(node); | ||
512 | } else { | 464 | } else { |
513 | tipc_node_unlock(node); | 465 | tipc_node_unlock(node); |
514 | bclink_peek_nack(msg); | 466 | bclink_peek_nack(net, msg); |
515 | } | 467 | } |
468 | tipc_node_put(node); | ||
516 | goto exit; | 469 | goto exit; |
517 | } | 470 | } |
518 | 471 | ||
519 | /* Handle in-sequence broadcast message */ | 472 | /* Handle in-sequence broadcast message */ |
520 | seqno = msg_seqno(msg); | 473 | seqno = msg_seqno(msg); |
521 | next_in = mod(node->bclink.last_in + 1); | 474 | next_in = mod(node->bclink.last_in + 1); |
475 | arrvq = &tn->bclink->arrvq; | ||
476 | inputq = &tn->bclink->inputq; | ||
522 | 477 | ||
523 | if (likely(seqno == next_in)) { | 478 | if (likely(seqno == next_in)) { |
524 | receive: | 479 | receive: |
525 | /* Deliver message to destination */ | 480 | /* Deliver message to destination */ |
526 | if (likely(msg_isdata(msg))) { | 481 | if (likely(msg_isdata(msg))) { |
527 | tipc_bclink_lock(); | 482 | tipc_bclink_lock(net); |
528 | bclink_accept_pkt(node, seqno); | 483 | bclink_accept_pkt(node, seqno); |
529 | tipc_bclink_unlock(); | 484 | spin_lock_bh(&inputq->lock); |
485 | __skb_queue_tail(arrvq, buf); | ||
486 | spin_unlock_bh(&inputq->lock); | ||
487 | node->action_flags |= TIPC_BCAST_MSG_EVT; | ||
488 | tipc_bclink_unlock(net); | ||
530 | tipc_node_unlock(node); | 489 | tipc_node_unlock(node); |
531 | if (likely(msg_mcast(msg))) | ||
532 | tipc_sk_mcast_rcv(buf); | ||
533 | else | ||
534 | kfree_skb(buf); | ||
535 | } else if (msg_user(msg) == MSG_BUNDLER) { | 490 | } else if (msg_user(msg) == MSG_BUNDLER) { |
536 | tipc_bclink_lock(); | 491 | tipc_bclink_lock(net); |
537 | bclink_accept_pkt(node, seqno); | 492 | bclink_accept_pkt(node, seqno); |
538 | bcl->stats.recv_bundles++; | 493 | bcl->stats.recv_bundles++; |
539 | bcl->stats.recv_bundled += msg_msgcnt(msg); | 494 | bcl->stats.recv_bundled += msg_msgcnt(msg); |
540 | tipc_bclink_unlock(); | 495 | pos = 0; |
496 | while (tipc_msg_extract(buf, &iskb, &pos)) { | ||
497 | spin_lock_bh(&inputq->lock); | ||
498 | __skb_queue_tail(arrvq, iskb); | ||
499 | spin_unlock_bh(&inputq->lock); | ||
500 | } | ||
501 | node->action_flags |= TIPC_BCAST_MSG_EVT; | ||
502 | tipc_bclink_unlock(net); | ||
541 | tipc_node_unlock(node); | 503 | tipc_node_unlock(node); |
542 | tipc_link_bundle_rcv(buf); | ||
543 | } else if (msg_user(msg) == MSG_FRAGMENTER) { | 504 | } else if (msg_user(msg) == MSG_FRAGMENTER) { |
505 | tipc_bclink_lock(net); | ||
506 | bclink_accept_pkt(node, seqno); | ||
544 | tipc_buf_append(&node->bclink.reasm_buf, &buf); | 507 | tipc_buf_append(&node->bclink.reasm_buf, &buf); |
545 | if (unlikely(!buf && !node->bclink.reasm_buf)) | 508 | if (unlikely(!buf && !node->bclink.reasm_buf)) { |
509 | tipc_bclink_unlock(net); | ||
546 | goto unlock; | 510 | goto unlock; |
547 | tipc_bclink_lock(); | 511 | } |
548 | bclink_accept_pkt(node, seqno); | ||
549 | bcl->stats.recv_fragments++; | 512 | bcl->stats.recv_fragments++; |
550 | if (buf) { | 513 | if (buf) { |
551 | bcl->stats.recv_fragmented++; | 514 | bcl->stats.recv_fragmented++; |
552 | msg = buf_msg(buf); | 515 | msg = buf_msg(buf); |
553 | tipc_bclink_unlock(); | 516 | tipc_bclink_unlock(net); |
554 | goto receive; | 517 | goto receive; |
555 | } | 518 | } |
556 | tipc_bclink_unlock(); | 519 | tipc_bclink_unlock(net); |
557 | tipc_node_unlock(node); | ||
558 | } else if (msg_user(msg) == NAME_DISTRIBUTOR) { | ||
559 | tipc_bclink_lock(); | ||
560 | bclink_accept_pkt(node, seqno); | ||
561 | tipc_bclink_unlock(); | ||
562 | tipc_node_unlock(node); | 520 | tipc_node_unlock(node); |
563 | tipc_named_rcv(buf); | ||
564 | } else { | 521 | } else { |
565 | tipc_bclink_lock(); | 522 | tipc_bclink_lock(net); |
566 | bclink_accept_pkt(node, seqno); | 523 | bclink_accept_pkt(node, seqno); |
567 | tipc_bclink_unlock(); | 524 | tipc_bclink_unlock(net); |
568 | tipc_node_unlock(node); | 525 | tipc_node_unlock(node); |
569 | kfree_skb(buf); | 526 | kfree_skb(buf); |
570 | } | 527 | } |
@@ -578,41 +535,42 @@ receive: | |||
578 | if (node->bclink.last_in == node->bclink.last_sent) | 535 | if (node->bclink.last_in == node->bclink.last_sent) |
579 | goto unlock; | 536 | goto unlock; |
580 | 537 | ||
581 | if (skb_queue_empty(&node->bclink.deferred_queue)) { | 538 | if (skb_queue_empty(&node->bclink.deferdq)) { |
582 | node->bclink.oos_state = 1; | 539 | node->bclink.oos_state = 1; |
583 | goto unlock; | 540 | goto unlock; |
584 | } | 541 | } |
585 | 542 | ||
586 | msg = buf_msg(skb_peek(&node->bclink.deferred_queue)); | 543 | msg = buf_msg(skb_peek(&node->bclink.deferdq)); |
587 | seqno = msg_seqno(msg); | 544 | seqno = msg_seqno(msg); |
588 | next_in = mod(next_in + 1); | 545 | next_in = mod(next_in + 1); |
589 | if (seqno != next_in) | 546 | if (seqno != next_in) |
590 | goto unlock; | 547 | goto unlock; |
591 | 548 | ||
592 | /* Take in-sequence message from deferred queue & deliver it */ | 549 | /* Take in-sequence message from deferred queue & deliver it */ |
593 | buf = __skb_dequeue(&node->bclink.deferred_queue); | 550 | buf = __skb_dequeue(&node->bclink.deferdq); |
594 | goto receive; | 551 | goto receive; |
595 | } | 552 | } |
596 | 553 | ||
597 | /* Handle out-of-sequence broadcast message */ | 554 | /* Handle out-of-sequence broadcast message */ |
598 | if (less(next_in, seqno)) { | 555 | if (less(next_in, seqno)) { |
599 | deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue, | 556 | deferred = tipc_link_defer_pkt(&node->bclink.deferdq, |
600 | buf); | 557 | buf); |
601 | bclink_update_last_sent(node, seqno); | 558 | bclink_update_last_sent(node, seqno); |
602 | buf = NULL; | 559 | buf = NULL; |
603 | } | 560 | } |
604 | 561 | ||
605 | tipc_bclink_lock(); | 562 | tipc_bclink_lock(net); |
606 | 563 | ||
607 | if (deferred) | 564 | if (deferred) |
608 | bcl->stats.deferred_recv++; | 565 | bcl->stats.deferred_recv++; |
609 | else | 566 | else |
610 | bcl->stats.duplicates++; | 567 | bcl->stats.duplicates++; |
611 | 568 | ||
612 | tipc_bclink_unlock(); | 569 | tipc_bclink_unlock(net); |
613 | 570 | ||
614 | unlock: | 571 | unlock: |
615 | tipc_node_unlock(node); | 572 | tipc_node_unlock(node); |
573 | tipc_node_put(node); | ||
616 | exit: | 574 | exit: |
617 | kfree_skb(buf); | 575 | kfree_skb(buf); |
618 | } | 576 | } |
@@ -620,7 +578,7 @@ exit: | |||
620 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) | 578 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) |
621 | { | 579 | { |
622 | return (n_ptr->bclink.recv_permitted && | 580 | return (n_ptr->bclink.recv_permitted && |
623 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); | 581 | (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked)); |
624 | } | 582 | } |
625 | 583 | ||
626 | 584 | ||
@@ -633,11 +591,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) | |||
633 | * Returns 0 (packet sent successfully) under all circumstances, | 591 | * Returns 0 (packet sent successfully) under all circumstances, |
634 | * since the broadcast link's pseudo-bearer never blocks | 592 | * since the broadcast link's pseudo-bearer never blocks |
635 | */ | 593 | */ |
636 | static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, | 594 | static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf, |
595 | struct tipc_bearer *unused1, | ||
637 | struct tipc_media_addr *unused2) | 596 | struct tipc_media_addr *unused2) |
638 | { | 597 | { |
639 | int bp_index; | 598 | int bp_index; |
640 | struct tipc_msg *msg = buf_msg(buf); | 599 | struct tipc_msg *msg = buf_msg(buf); |
600 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
601 | struct tipc_bcbearer *bcbearer = tn->bcbearer; | ||
602 | struct tipc_bclink *bclink = tn->bclink; | ||
641 | 603 | ||
642 | /* Prepare broadcast link message for reliable transmission, | 604 | /* Prepare broadcast link message for reliable transmission, |
643 | * if first time trying to send it; | 605 | * if first time trying to send it; |
@@ -647,9 +609,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, | |||
647 | if (likely(!msg_non_seq(buf_msg(buf)))) { | 609 | if (likely(!msg_non_seq(buf_msg(buf)))) { |
648 | bcbuf_set_acks(buf, bclink->bcast_nodes.count); | 610 | bcbuf_set_acks(buf, bclink->bcast_nodes.count); |
649 | msg_set_non_seq(msg, 1); | 611 | msg_set_non_seq(msg, 1); |
650 | msg_set_mc_netid(msg, tipc_net_id); | 612 | msg_set_mc_netid(msg, tn->net_id); |
651 | bcl->stats.sent_info++; | 613 | tn->bcl->stats.sent_info++; |
652 | |||
653 | if (WARN_ON(!bclink->bcast_nodes.count)) { | 614 | if (WARN_ON(!bclink->bcast_nodes.count)) { |
654 | dump_stack(); | 615 | dump_stack(); |
655 | return 0; | 616 | return 0; |
@@ -677,13 +638,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, | |||
677 | 638 | ||
678 | if (bp_index == 0) { | 639 | if (bp_index == 0) { |
679 | /* Use original buffer for first bearer */ | 640 | /* Use original buffer for first bearer */ |
680 | tipc_bearer_send(b->identity, buf, &b->bcast_addr); | 641 | tipc_bearer_send(net, b->identity, buf, &b->bcast_addr); |
681 | } else { | 642 | } else { |
682 | /* Avoid concurrent buffer access */ | 643 | /* Avoid concurrent buffer access */ |
683 | tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC); | 644 | tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC); |
684 | if (!tbuf) | 645 | if (!tbuf) |
685 | break; | 646 | break; |
686 | tipc_bearer_send(b->identity, tbuf, &b->bcast_addr); | 647 | tipc_bearer_send(net, b->identity, tbuf, |
648 | &b->bcast_addr); | ||
687 | kfree_skb(tbuf); /* Bearer keeps a clone */ | 649 | kfree_skb(tbuf); /* Bearer keeps a clone */ |
688 | } | 650 | } |
689 | if (bcbearer->remains_new.count == 0) | 651 | if (bcbearer->remains_new.count == 0) |
@@ -698,15 +660,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, | |||
698 | /** | 660 | /** |
699 | * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer | 661 | * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer |
700 | */ | 662 | */ |
701 | void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) | 663 | void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr, |
664 | u32 node, bool action) | ||
702 | { | 665 | { |
666 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
667 | struct tipc_bcbearer *bcbearer = tn->bcbearer; | ||
703 | struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; | 668 | struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; |
704 | struct tipc_bcbearer_pair *bp_curr; | 669 | struct tipc_bcbearer_pair *bp_curr; |
705 | struct tipc_bearer *b; | 670 | struct tipc_bearer *b; |
706 | int b_index; | 671 | int b_index; |
707 | int pri; | 672 | int pri; |
708 | 673 | ||
709 | tipc_bclink_lock(); | 674 | tipc_bclink_lock(net); |
710 | 675 | ||
711 | if (action) | 676 | if (action) |
712 | tipc_nmap_add(nm_ptr, node); | 677 | tipc_nmap_add(nm_ptr, node); |
@@ -718,7 +683,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) | |||
718 | 683 | ||
719 | rcu_read_lock(); | 684 | rcu_read_lock(); |
720 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { | 685 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { |
721 | b = rcu_dereference_rtnl(bearer_list[b_index]); | 686 | b = rcu_dereference_rtnl(tn->bearer_list[b_index]); |
722 | if (!b || !b->nodes.count) | 687 | if (!b || !b->nodes.count) |
723 | continue; | 688 | continue; |
724 | 689 | ||
@@ -753,7 +718,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) | |||
753 | bp_curr++; | 718 | bp_curr++; |
754 | } | 719 | } |
755 | 720 | ||
756 | tipc_bclink_unlock(); | 721 | tipc_bclink_unlock(net); |
757 | } | 722 | } |
758 | 723 | ||
759 | static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, | 724 | static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, |
@@ -807,19 +772,21 @@ msg_full: | |||
807 | return -EMSGSIZE; | 772 | return -EMSGSIZE; |
808 | } | 773 | } |
809 | 774 | ||
810 | int tipc_nl_add_bc_link(struct tipc_nl_msg *msg) | 775 | int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) |
811 | { | 776 | { |
812 | int err; | 777 | int err; |
813 | void *hdr; | 778 | void *hdr; |
814 | struct nlattr *attrs; | 779 | struct nlattr *attrs; |
815 | struct nlattr *prop; | 780 | struct nlattr *prop; |
781 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
782 | struct tipc_link *bcl = tn->bcl; | ||
816 | 783 | ||
817 | if (!bcl) | 784 | if (!bcl) |
818 | return 0; | 785 | return 0; |
819 | 786 | ||
820 | tipc_bclink_lock(); | 787 | tipc_bclink_lock(net); |
821 | 788 | ||
822 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, | 789 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
823 | NLM_F_MULTI, TIPC_NL_LINK_GET); | 790 | NLM_F_MULTI, TIPC_NL_LINK_GET); |
824 | if (!hdr) | 791 | if (!hdr) |
825 | return -EMSGSIZE; | 792 | return -EMSGSIZE; |
@@ -844,7 +811,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg) | |||
844 | prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); | 811 | prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); |
845 | if (!prop) | 812 | if (!prop) |
846 | goto attr_msg_full; | 813 | goto attr_msg_full; |
847 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0])) | 814 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) |
848 | goto prop_msg_full; | 815 | goto prop_msg_full; |
849 | nla_nest_end(msg->skb, prop); | 816 | nla_nest_end(msg->skb, prop); |
850 | 817 | ||
@@ -852,7 +819,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg) | |||
852 | if (err) | 819 | if (err) |
853 | goto attr_msg_full; | 820 | goto attr_msg_full; |
854 | 821 | ||
855 | tipc_bclink_unlock(); | 822 | tipc_bclink_unlock(net); |
856 | nla_nest_end(msg->skb, attrs); | 823 | nla_nest_end(msg->skb, attrs); |
857 | genlmsg_end(msg->skb, hdr); | 824 | genlmsg_end(msg->skb, hdr); |
858 | 825 | ||
@@ -863,79 +830,49 @@ prop_msg_full: | |||
863 | attr_msg_full: | 830 | attr_msg_full: |
864 | nla_nest_cancel(msg->skb, attrs); | 831 | nla_nest_cancel(msg->skb, attrs); |
865 | msg_full: | 832 | msg_full: |
866 | tipc_bclink_unlock(); | 833 | tipc_bclink_unlock(net); |
867 | genlmsg_cancel(msg->skb, hdr); | 834 | genlmsg_cancel(msg->skb, hdr); |
868 | 835 | ||
869 | return -EMSGSIZE; | 836 | return -EMSGSIZE; |
870 | } | 837 | } |
871 | 838 | ||
872 | int tipc_bclink_stats(char *buf, const u32 buf_size) | 839 | int tipc_bclink_reset_stats(struct net *net) |
873 | { | 840 | { |
874 | int ret; | 841 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
875 | struct tipc_stats *s; | 842 | struct tipc_link *bcl = tn->bcl; |
876 | 843 | ||
877 | if (!bcl) | 844 | if (!bcl) |
878 | return 0; | ||
879 | |||
880 | tipc_bclink_lock(); | ||
881 | |||
882 | s = &bcl->stats; | ||
883 | |||
884 | ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" | ||
885 | " Window:%u packets\n", | ||
886 | bcl->name, bcl->queue_limit[0]); | ||
887 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
888 | " RX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
889 | s->recv_info, s->recv_fragments, | ||
890 | s->recv_fragmented, s->recv_bundles, | ||
891 | s->recv_bundled); | ||
892 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
893 | " TX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
894 | s->sent_info, s->sent_fragments, | ||
895 | s->sent_fragmented, s->sent_bundles, | ||
896 | s->sent_bundled); | ||
897 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
898 | " RX naks:%u defs:%u dups:%u\n", | ||
899 | s->recv_nacks, s->deferred_recv, s->duplicates); | ||
900 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
901 | " TX naks:%u acks:%u dups:%u\n", | ||
902 | s->sent_nacks, s->sent_acks, s->retransmitted); | ||
903 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
904 | " Congestion link:%u Send queue max:%u avg:%u\n", | ||
905 | s->link_congs, s->max_queue_sz, | ||
906 | s->queue_sz_counts ? | ||
907 | (s->accu_queue_sz / s->queue_sz_counts) : 0); | ||
908 | |||
909 | tipc_bclink_unlock(); | ||
910 | return ret; | ||
911 | } | ||
912 | |||
913 | int tipc_bclink_reset_stats(void) | ||
914 | { | ||
915 | if (!bcl) | ||
916 | return -ENOPROTOOPT; | 845 | return -ENOPROTOOPT; |
917 | 846 | ||
918 | tipc_bclink_lock(); | 847 | tipc_bclink_lock(net); |
919 | memset(&bcl->stats, 0, sizeof(bcl->stats)); | 848 | memset(&bcl->stats, 0, sizeof(bcl->stats)); |
920 | tipc_bclink_unlock(); | 849 | tipc_bclink_unlock(net); |
921 | return 0; | 850 | return 0; |
922 | } | 851 | } |
923 | 852 | ||
924 | int tipc_bclink_set_queue_limits(u32 limit) | 853 | int tipc_bclink_set_queue_limits(struct net *net, u32 limit) |
925 | { | 854 | { |
855 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
856 | struct tipc_link *bcl = tn->bcl; | ||
857 | |||
926 | if (!bcl) | 858 | if (!bcl) |
927 | return -ENOPROTOOPT; | 859 | return -ENOPROTOOPT; |
928 | if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) | 860 | if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) |
929 | return -EINVAL; | 861 | return -EINVAL; |
930 | 862 | ||
931 | tipc_bclink_lock(); | 863 | tipc_bclink_lock(net); |
932 | tipc_link_set_queue_limits(bcl, limit); | 864 | tipc_link_set_queue_limits(bcl, limit); |
933 | tipc_bclink_unlock(); | 865 | tipc_bclink_unlock(net); |
934 | return 0; | 866 | return 0; |
935 | } | 867 | } |
936 | 868 | ||
937 | int tipc_bclink_init(void) | 869 | int tipc_bclink_init(struct net *net) |
938 | { | 870 | { |
871 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
872 | struct tipc_bcbearer *bcbearer; | ||
873 | struct tipc_bclink *bclink; | ||
874 | struct tipc_link *bcl; | ||
875 | |||
939 | bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); | 876 | bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); |
940 | if (!bcbearer) | 877 | if (!bcbearer) |
941 | return -ENOMEM; | 878 | return -ENOMEM; |
@@ -952,32 +889,42 @@ int tipc_bclink_init(void) | |||
952 | sprintf(bcbearer->media.name, "tipc-broadcast"); | 889 | sprintf(bcbearer->media.name, "tipc-broadcast"); |
953 | 890 | ||
954 | spin_lock_init(&bclink->lock); | 891 | spin_lock_init(&bclink->lock); |
955 | __skb_queue_head_init(&bcl->outqueue); | 892 | __skb_queue_head_init(&bcl->transmq); |
956 | __skb_queue_head_init(&bcl->deferred_queue); | 893 | __skb_queue_head_init(&bcl->backlogq); |
957 | skb_queue_head_init(&bcl->waiting_sks); | 894 | __skb_queue_head_init(&bcl->deferdq); |
895 | skb_queue_head_init(&bcl->wakeupq); | ||
958 | bcl->next_out_no = 1; | 896 | bcl->next_out_no = 1; |
959 | spin_lock_init(&bclink->node.lock); | 897 | spin_lock_init(&bclink->node.lock); |
960 | __skb_queue_head_init(&bclink->node.waiting_sks); | 898 | __skb_queue_head_init(&bclink->arrvq); |
899 | skb_queue_head_init(&bclink->inputq); | ||
961 | bcl->owner = &bclink->node; | 900 | bcl->owner = &bclink->node; |
962 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 901 | bcl->owner->net = net; |
902 | bcl->mtu = MAX_PKT_DEFAULT_MCAST; | ||
963 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 903 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
964 | bcl->bearer_id = MAX_BEARERS; | 904 | bcl->bearer_id = MAX_BEARERS; |
965 | rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer); | 905 | rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); |
966 | bcl->state = WORKING_WORKING; | 906 | bcl->state = WORKING_WORKING; |
907 | bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg; | ||
908 | msg_set_prevnode(bcl->pmsg, tn->own_addr); | ||
967 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); | 909 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); |
910 | tn->bcbearer = bcbearer; | ||
911 | tn->bclink = bclink; | ||
912 | tn->bcl = bcl; | ||
968 | return 0; | 913 | return 0; |
969 | } | 914 | } |
970 | 915 | ||
971 | void tipc_bclink_stop(void) | 916 | void tipc_bclink_stop(struct net *net) |
972 | { | 917 | { |
973 | tipc_bclink_lock(); | 918 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
974 | tipc_link_purge_queues(bcl); | 919 | |
975 | tipc_bclink_unlock(); | 920 | tipc_bclink_lock(net); |
921 | tipc_link_purge_queues(tn->bcl); | ||
922 | tipc_bclink_unlock(net); | ||
976 | 923 | ||
977 | RCU_INIT_POINTER(bearer_list[BCBEARER], NULL); | 924 | RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL); |
978 | synchronize_net(); | 925 | synchronize_net(); |
979 | kfree(bcbearer); | 926 | kfree(tn->bcbearer); |
980 | kfree(bclink); | 927 | kfree(tn->bclink); |
981 | } | 928 | } |
982 | 929 | ||
983 | /** | 930 | /** |
@@ -1037,50 +984,3 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a, | |||
1037 | } | 984 | } |
1038 | } | 985 | } |
1039 | } | 986 | } |
1040 | |||
1041 | /** | ||
1042 | * tipc_port_list_add - add a port to a port list, ensuring no duplicates | ||
1043 | */ | ||
1044 | void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) | ||
1045 | { | ||
1046 | struct tipc_port_list *item = pl_ptr; | ||
1047 | int i; | ||
1048 | int item_sz = PLSIZE; | ||
1049 | int cnt = pl_ptr->count; | ||
1050 | |||
1051 | for (; ; cnt -= item_sz, item = item->next) { | ||
1052 | if (cnt < PLSIZE) | ||
1053 | item_sz = cnt; | ||
1054 | for (i = 0; i < item_sz; i++) | ||
1055 | if (item->ports[i] == port) | ||
1056 | return; | ||
1057 | if (i < PLSIZE) { | ||
1058 | item->ports[i] = port; | ||
1059 | pl_ptr->count++; | ||
1060 | return; | ||
1061 | } | ||
1062 | if (!item->next) { | ||
1063 | item->next = kmalloc(sizeof(*item), GFP_ATOMIC); | ||
1064 | if (!item->next) { | ||
1065 | pr_warn("Incomplete multicast delivery, no memory\n"); | ||
1066 | return; | ||
1067 | } | ||
1068 | item->next->next = NULL; | ||
1069 | } | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | /** | ||
1074 | * tipc_port_list_free - free dynamically created entries in port_list chain | ||
1075 | * | ||
1076 | */ | ||
1077 | void tipc_port_list_free(struct tipc_port_list *pl_ptr) | ||
1078 | { | ||
1079 | struct tipc_port_list *item; | ||
1080 | struct tipc_port_list *next; | ||
1081 | |||
1082 | for (item = pl_ptr->next; item; item = next) { | ||
1083 | next = item->next; | ||
1084 | kfree(item); | ||
1085 | } | ||
1086 | } | ||
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 644d79129fba..4bdc12277d33 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/bcast.h: Include file for TIPC broadcast code | 2 | * net/tipc/bcast.h: Include file for TIPC broadcast code |
3 | * | 3 | * |
4 | * Copyright (c) 2003-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2003-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -37,39 +37,70 @@ | |||
37 | #ifndef _TIPC_BCAST_H | 37 | #ifndef _TIPC_BCAST_H |
38 | #define _TIPC_BCAST_H | 38 | #define _TIPC_BCAST_H |
39 | 39 | ||
40 | #include "netlink.h" | 40 | #include <linux/tipc_config.h> |
41 | 41 | #include "link.h" | |
42 | #define MAX_NODES 4096 | 42 | #include "node.h" |
43 | #define WSIZE 32 | ||
44 | #define TIPC_BCLINK_RESET 1 | ||
45 | 43 | ||
46 | /** | 44 | /** |
47 | * struct tipc_node_map - set of node identifiers | 45 | * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link |
48 | * @count: # of nodes in set | 46 | * @primary: pointer to primary bearer |
49 | * @map: bitmap of node identifiers that are in the set | 47 | * @secondary: pointer to secondary bearer |
48 | * | ||
49 | * Bearers must have same priority and same set of reachable destinations | ||
50 | * to be paired. | ||
50 | */ | 51 | */ |
51 | struct tipc_node_map { | 52 | |
52 | u32 count; | 53 | struct tipc_bcbearer_pair { |
53 | u32 map[MAX_NODES / WSIZE]; | 54 | struct tipc_bearer *primary; |
55 | struct tipc_bearer *secondary; | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | #define PLSIZE 32 | 58 | #define BCBEARER MAX_BEARERS |
57 | 59 | ||
58 | /** | 60 | /** |
59 | * struct tipc_port_list - set of node local destination ports | 61 | * struct tipc_bcbearer - bearer used by broadcast link |
60 | * @count: # of ports in set (only valid for first entry in list) | 62 | * @bearer: (non-standard) broadcast bearer structure |
61 | * @next: pointer to next entry in list | 63 | * @media: (non-standard) broadcast media structure |
62 | * @ports: array of port references | 64 | * @bpairs: array of bearer pairs |
65 | * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() | ||
66 | * @remains: temporary node map used by tipc_bcbearer_send() | ||
67 | * @remains_new: temporary node map used tipc_bcbearer_send() | ||
68 | * | ||
69 | * Note: The fields labelled "temporary" are incorporated into the bearer | ||
70 | * to avoid consuming potentially limited stack space through the use of | ||
71 | * large local variables within multicast routines. Concurrent access is | ||
72 | * prevented through use of the spinlock "bclink_lock". | ||
63 | */ | 73 | */ |
64 | struct tipc_port_list { | 74 | struct tipc_bcbearer { |
65 | int count; | 75 | struct tipc_bearer bearer; |
66 | struct tipc_port_list *next; | 76 | struct tipc_media media; |
67 | u32 ports[PLSIZE]; | 77 | struct tipc_bcbearer_pair bpairs[MAX_BEARERS]; |
78 | struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; | ||
79 | struct tipc_node_map remains; | ||
80 | struct tipc_node_map remains_new; | ||
68 | }; | 81 | }; |
69 | 82 | ||
83 | /** | ||
84 | * struct tipc_bclink - link used for broadcast messages | ||
85 | * @lock: spinlock governing access to structure | ||
86 | * @link: (non-standard) broadcast link structure | ||
87 | * @node: (non-standard) node structure representing b'cast link's peer node | ||
88 | * @bcast_nodes: map of broadcast-capable nodes | ||
89 | * @retransmit_to: node that most recently requested a retransmit | ||
90 | * | ||
91 | * Handles sequence numbering, fragmentation, bundling, etc. | ||
92 | */ | ||
93 | struct tipc_bclink { | ||
94 | spinlock_t lock; | ||
95 | struct tipc_link link; | ||
96 | struct tipc_node node; | ||
97 | struct sk_buff_head arrvq; | ||
98 | struct sk_buff_head inputq; | ||
99 | struct tipc_node_map bcast_nodes; | ||
100 | struct tipc_node *retransmit_to; | ||
101 | }; | ||
70 | 102 | ||
71 | struct tipc_node; | 103 | struct tipc_node; |
72 | |||
73 | extern const char tipc_bclink_name[]; | 104 | extern const char tipc_bclink_name[]; |
74 | 105 | ||
75 | /** | 106 | /** |
@@ -81,27 +112,25 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, | |||
81 | return !memcmp(nm_a, nm_b, sizeof(*nm_a)); | 112 | return !memcmp(nm_a, nm_b, sizeof(*nm_a)); |
82 | } | 113 | } |
83 | 114 | ||
84 | void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port); | 115 | int tipc_bclink_init(struct net *net); |
85 | void tipc_port_list_free(struct tipc_port_list *pl_ptr); | 116 | void tipc_bclink_stop(struct net *net); |
86 | 117 | void tipc_bclink_add_node(struct net *net, u32 addr); | |
87 | int tipc_bclink_init(void); | 118 | void tipc_bclink_remove_node(struct net *net, u32 addr); |
88 | void tipc_bclink_stop(void); | 119 | struct tipc_node *tipc_bclink_retransmit_to(struct net *tn); |
89 | void tipc_bclink_set_flags(unsigned int flags); | ||
90 | void tipc_bclink_add_node(u32 addr); | ||
91 | void tipc_bclink_remove_node(u32 addr); | ||
92 | struct tipc_node *tipc_bclink_retransmit_to(void); | ||
93 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked); | 120 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked); |
94 | void tipc_bclink_rcv(struct sk_buff *buf); | 121 | void tipc_bclink_rcv(struct net *net, struct sk_buff *buf); |
95 | u32 tipc_bclink_get_last_sent(void); | 122 | u32 tipc_bclink_get_last_sent(struct net *net); |
96 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); | 123 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); |
97 | void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent); | 124 | void tipc_bclink_update_link_state(struct tipc_node *node, |
98 | int tipc_bclink_stats(char *stats_buf, const u32 buf_size); | 125 | u32 last_sent); |
99 | int tipc_bclink_reset_stats(void); | 126 | int tipc_bclink_reset_stats(struct net *net); |
100 | int tipc_bclink_set_queue_limits(u32 limit); | 127 | int tipc_bclink_set_queue_limits(struct net *net, u32 limit); |
101 | void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); | 128 | void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr, |
129 | u32 node, bool action); | ||
102 | uint tipc_bclink_get_mtu(void); | 130 | uint tipc_bclink_get_mtu(void); |
103 | int tipc_bclink_xmit(struct sk_buff_head *list); | 131 | int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list); |
104 | void tipc_bclink_wakeup_users(void); | 132 | void tipc_bclink_wakeup_users(struct net *net); |
105 | int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); | 133 | int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); |
134 | void tipc_bclink_input(struct net *net); | ||
106 | 135 | ||
107 | #endif | 136 | #endif |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 463db5b15b8b..3613e72e858e 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -34,11 +34,12 @@ | |||
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <net/sock.h> | ||
37 | #include "core.h" | 38 | #include "core.h" |
38 | #include "config.h" | ||
39 | #include "bearer.h" | 39 | #include "bearer.h" |
40 | #include "link.h" | 40 | #include "link.h" |
41 | #include "discover.h" | 41 | #include "discover.h" |
42 | #include "bcast.h" | ||
42 | 43 | ||
43 | #define MAX_ADDR_STR 60 | 44 | #define MAX_ADDR_STR 60 |
44 | 45 | ||
@@ -47,6 +48,9 @@ static struct tipc_media * const media_info_array[] = { | |||
47 | #ifdef CONFIG_TIPC_MEDIA_IB | 48 | #ifdef CONFIG_TIPC_MEDIA_IB |
48 | &ib_media_info, | 49 | &ib_media_info, |
49 | #endif | 50 | #endif |
51 | #ifdef CONFIG_TIPC_MEDIA_UDP | ||
52 | &udp_media_info, | ||
53 | #endif | ||
50 | NULL | 54 | NULL |
51 | }; | 55 | }; |
52 | 56 | ||
@@ -67,9 +71,8 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = { | |||
67 | [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED } | 71 | [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED } |
68 | }; | 72 | }; |
69 | 73 | ||
70 | struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; | 74 | static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr, |
71 | 75 | bool shutting_down); | |
72 | static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); | ||
73 | 76 | ||
74 | /** | 77 | /** |
75 | * tipc_media_find - locates specified media object by name | 78 | * tipc_media_find - locates specified media object by name |
@@ -111,38 +114,18 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a) | |||
111 | m_ptr = media_find_id(a->media_id); | 114 | m_ptr = media_find_id(a->media_id); |
112 | 115 | ||
113 | if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str))) | 116 | if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str))) |
114 | ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str); | 117 | ret = scnprintf(buf, len, "%s(%s)", m_ptr->name, addr_str); |
115 | else { | 118 | else { |
116 | u32 i; | 119 | u32 i; |
117 | 120 | ||
118 | ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id); | 121 | ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id); |
119 | for (i = 0; i < sizeof(a->value); i++) | 122 | for (i = 0; i < sizeof(a->value); i++) |
120 | ret += tipc_snprintf(buf - ret, len + ret, | 123 | ret += scnprintf(buf - ret, len + ret, |
121 | "-%02x", a->value[i]); | 124 | "-%02x", a->value[i]); |
122 | } | 125 | } |
123 | } | 126 | } |
124 | 127 | ||
125 | /** | 128 | /** |
126 | * tipc_media_get_names - record names of registered media in buffer | ||
127 | */ | ||
128 | struct sk_buff *tipc_media_get_names(void) | ||
129 | { | ||
130 | struct sk_buff *buf; | ||
131 | int i; | ||
132 | |||
133 | buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME)); | ||
134 | if (!buf) | ||
135 | return NULL; | ||
136 | |||
137 | for (i = 0; media_info_array[i] != NULL; i++) { | ||
138 | tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, | ||
139 | media_info_array[i]->name, | ||
140 | strlen(media_info_array[i]->name) + 1); | ||
141 | } | ||
142 | return buf; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * bearer_name_validate - validate & (optionally) deconstruct bearer name | 129 | * bearer_name_validate - validate & (optionally) deconstruct bearer name |
147 | * @name: ptr to bearer name string | 130 | * @name: ptr to bearer name string |
148 | * @name_parts: ptr to area for bearer name components (or NULL if not needed) | 131 | * @name_parts: ptr to area for bearer name components (or NULL if not needed) |
@@ -190,68 +173,43 @@ static int bearer_name_validate(const char *name, | |||
190 | /** | 173 | /** |
191 | * tipc_bearer_find - locates bearer object with matching bearer name | 174 | * tipc_bearer_find - locates bearer object with matching bearer name |
192 | */ | 175 | */ |
193 | struct tipc_bearer *tipc_bearer_find(const char *name) | 176 | struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name) |
194 | { | 177 | { |
178 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
195 | struct tipc_bearer *b_ptr; | 179 | struct tipc_bearer *b_ptr; |
196 | u32 i; | 180 | u32 i; |
197 | 181 | ||
198 | for (i = 0; i < MAX_BEARERS; i++) { | 182 | for (i = 0; i < MAX_BEARERS; i++) { |
199 | b_ptr = rtnl_dereference(bearer_list[i]); | 183 | b_ptr = rtnl_dereference(tn->bearer_list[i]); |
200 | if (b_ptr && (!strcmp(b_ptr->name, name))) | 184 | if (b_ptr && (!strcmp(b_ptr->name, name))) |
201 | return b_ptr; | 185 | return b_ptr; |
202 | } | 186 | } |
203 | return NULL; | 187 | return NULL; |
204 | } | 188 | } |
205 | 189 | ||
206 | /** | 190 | void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest) |
207 | * tipc_bearer_get_names - record names of bearers in buffer | ||
208 | */ | ||
209 | struct sk_buff *tipc_bearer_get_names(void) | ||
210 | { | ||
211 | struct sk_buff *buf; | ||
212 | struct tipc_bearer *b; | ||
213 | int i, j; | ||
214 | |||
215 | buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME)); | ||
216 | if (!buf) | ||
217 | return NULL; | ||
218 | |||
219 | for (i = 0; media_info_array[i] != NULL; i++) { | ||
220 | for (j = 0; j < MAX_BEARERS; j++) { | ||
221 | b = rtnl_dereference(bearer_list[j]); | ||
222 | if (!b) | ||
223 | continue; | ||
224 | if (b->media == media_info_array[i]) { | ||
225 | tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, | ||
226 | b->name, | ||
227 | strlen(b->name) + 1); | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | return buf; | ||
232 | } | ||
233 | |||
234 | void tipc_bearer_add_dest(u32 bearer_id, u32 dest) | ||
235 | { | 191 | { |
192 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
236 | struct tipc_bearer *b_ptr; | 193 | struct tipc_bearer *b_ptr; |
237 | 194 | ||
238 | rcu_read_lock(); | 195 | rcu_read_lock(); |
239 | b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]); | 196 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); |
240 | if (b_ptr) { | 197 | if (b_ptr) { |
241 | tipc_bcbearer_sort(&b_ptr->nodes, dest, true); | 198 | tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true); |
242 | tipc_disc_add_dest(b_ptr->link_req); | 199 | tipc_disc_add_dest(b_ptr->link_req); |
243 | } | 200 | } |
244 | rcu_read_unlock(); | 201 | rcu_read_unlock(); |
245 | } | 202 | } |
246 | 203 | ||
247 | void tipc_bearer_remove_dest(u32 bearer_id, u32 dest) | 204 | void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) |
248 | { | 205 | { |
206 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
249 | struct tipc_bearer *b_ptr; | 207 | struct tipc_bearer *b_ptr; |
250 | 208 | ||
251 | rcu_read_lock(); | 209 | rcu_read_lock(); |
252 | b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]); | 210 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); |
253 | if (b_ptr) { | 211 | if (b_ptr) { |
254 | tipc_bcbearer_sort(&b_ptr->nodes, dest, false); | 212 | tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false); |
255 | tipc_disc_remove_dest(b_ptr->link_req); | 213 | tipc_disc_remove_dest(b_ptr->link_req); |
256 | } | 214 | } |
257 | rcu_read_unlock(); | 215 | rcu_read_unlock(); |
@@ -260,8 +218,11 @@ void tipc_bearer_remove_dest(u32 bearer_id, u32 dest) | |||
260 | /** | 218 | /** |
261 | * tipc_enable_bearer - enable bearer with the given name | 219 | * tipc_enable_bearer - enable bearer with the given name |
262 | */ | 220 | */ |
263 | int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) | 221 | static int tipc_enable_bearer(struct net *net, const char *name, |
222 | u32 disc_domain, u32 priority, | ||
223 | struct nlattr *attr[]) | ||
264 | { | 224 | { |
225 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
265 | struct tipc_bearer *b_ptr; | 226 | struct tipc_bearer *b_ptr; |
266 | struct tipc_media *m_ptr; | 227 | struct tipc_media *m_ptr; |
267 | struct tipc_bearer_names b_names; | 228 | struct tipc_bearer_names b_names; |
@@ -271,7 +232,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) | |||
271 | u32 i; | 232 | u32 i; |
272 | int res = -EINVAL; | 233 | int res = -EINVAL; |
273 | 234 | ||
274 | if (!tipc_own_addr) { | 235 | if (!tn->own_addr) { |
275 | pr_warn("Bearer <%s> rejected, not supported in standalone mode\n", | 236 | pr_warn("Bearer <%s> rejected, not supported in standalone mode\n", |
276 | name); | 237 | name); |
277 | return -ENOPROTOOPT; | 238 | return -ENOPROTOOPT; |
@@ -281,11 +242,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) | |||
281 | return -EINVAL; | 242 | return -EINVAL; |
282 | } | 243 | } |
283 | if (tipc_addr_domain_valid(disc_domain) && | 244 | if (tipc_addr_domain_valid(disc_domain) && |
284 | (disc_domain != tipc_own_addr)) { | 245 | (disc_domain != tn->own_addr)) { |
285 | if (tipc_in_scope(disc_domain, tipc_own_addr)) { | 246 | if (tipc_in_scope(disc_domain, tn->own_addr)) { |
286 | disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK; | 247 | disc_domain = tn->own_addr & TIPC_CLUSTER_MASK; |
287 | res = 0; /* accept any node in own cluster */ | 248 | res = 0; /* accept any node in own cluster */ |
288 | } else if (in_own_cluster_exact(disc_domain)) | 249 | } else if (in_own_cluster_exact(net, disc_domain)) |
289 | res = 0; /* accept specified node in own cluster */ | 250 | res = 0; /* accept specified node in own cluster */ |
290 | } | 251 | } |
291 | if (res) { | 252 | if (res) { |
@@ -313,7 +274,7 @@ restart: | |||
313 | bearer_id = MAX_BEARERS; | 274 | bearer_id = MAX_BEARERS; |
314 | with_this_prio = 1; | 275 | with_this_prio = 1; |
315 | for (i = MAX_BEARERS; i-- != 0; ) { | 276 | for (i = MAX_BEARERS; i-- != 0; ) { |
316 | b_ptr = rtnl_dereference(bearer_list[i]); | 277 | b_ptr = rtnl_dereference(tn->bearer_list[i]); |
317 | if (!b_ptr) { | 278 | if (!b_ptr) { |
318 | bearer_id = i; | 279 | bearer_id = i; |
319 | continue; | 280 | continue; |
@@ -347,7 +308,7 @@ restart: | |||
347 | 308 | ||
348 | strcpy(b_ptr->name, name); | 309 | strcpy(b_ptr->name, name); |
349 | b_ptr->media = m_ptr; | 310 | b_ptr->media = m_ptr; |
350 | res = m_ptr->enable_media(b_ptr); | 311 | res = m_ptr->enable_media(net, b_ptr, attr); |
351 | if (res) { | 312 | if (res) { |
352 | pr_warn("Bearer <%s> rejected, enable failure (%d)\n", | 313 | pr_warn("Bearer <%s> rejected, enable failure (%d)\n", |
353 | name, -res); | 314 | name, -res); |
@@ -361,15 +322,15 @@ restart: | |||
361 | b_ptr->net_plane = bearer_id + 'A'; | 322 | b_ptr->net_plane = bearer_id + 'A'; |
362 | b_ptr->priority = priority; | 323 | b_ptr->priority = priority; |
363 | 324 | ||
364 | res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr); | 325 | res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr); |
365 | if (res) { | 326 | if (res) { |
366 | bearer_disable(b_ptr, false); | 327 | bearer_disable(net, b_ptr, false); |
367 | pr_warn("Bearer <%s> rejected, discovery object creation failed\n", | 328 | pr_warn("Bearer <%s> rejected, discovery object creation failed\n", |
368 | name); | 329 | name); |
369 | return -EINVAL; | 330 | return -EINVAL; |
370 | } | 331 | } |
371 | 332 | ||
372 | rcu_assign_pointer(bearer_list[bearer_id], b_ptr); | 333 | rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr); |
373 | 334 | ||
374 | pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", | 335 | pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", |
375 | name, | 336 | name, |
@@ -380,11 +341,11 @@ restart: | |||
380 | /** | 341 | /** |
381 | * tipc_reset_bearer - Reset all links established over this bearer | 342 | * tipc_reset_bearer - Reset all links established over this bearer |
382 | */ | 343 | */ |
383 | static int tipc_reset_bearer(struct tipc_bearer *b_ptr) | 344 | static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr) |
384 | { | 345 | { |
385 | pr_info("Resetting bearer <%s>\n", b_ptr->name); | 346 | pr_info("Resetting bearer <%s>\n", b_ptr->name); |
386 | tipc_link_reset_list(b_ptr->identity); | 347 | tipc_link_reset_list(net, b_ptr->identity); |
387 | tipc_disc_reset(b_ptr); | 348 | tipc_disc_reset(net, b_ptr); |
388 | return 0; | 349 | return 0; |
389 | } | 350 | } |
390 | 351 | ||
@@ -393,49 +354,36 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr) | |||
393 | * | 354 | * |
394 | * Note: This routine assumes caller holds RTNL lock. | 355 | * Note: This routine assumes caller holds RTNL lock. |
395 | */ | 356 | */ |
396 | static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) | 357 | static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr, |
358 | bool shutting_down) | ||
397 | { | 359 | { |
360 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
398 | u32 i; | 361 | u32 i; |
399 | 362 | ||
400 | pr_info("Disabling bearer <%s>\n", b_ptr->name); | 363 | pr_info("Disabling bearer <%s>\n", b_ptr->name); |
401 | b_ptr->media->disable_media(b_ptr); | 364 | b_ptr->media->disable_media(b_ptr); |
402 | 365 | ||
403 | tipc_link_delete_list(b_ptr->identity, shutting_down); | 366 | tipc_link_delete_list(net, b_ptr->identity, shutting_down); |
404 | if (b_ptr->link_req) | 367 | if (b_ptr->link_req) |
405 | tipc_disc_delete(b_ptr->link_req); | 368 | tipc_disc_delete(b_ptr->link_req); |
406 | 369 | ||
407 | for (i = 0; i < MAX_BEARERS; i++) { | 370 | for (i = 0; i < MAX_BEARERS; i++) { |
408 | if (b_ptr == rtnl_dereference(bearer_list[i])) { | 371 | if (b_ptr == rtnl_dereference(tn->bearer_list[i])) { |
409 | RCU_INIT_POINTER(bearer_list[i], NULL); | 372 | RCU_INIT_POINTER(tn->bearer_list[i], NULL); |
410 | break; | 373 | break; |
411 | } | 374 | } |
412 | } | 375 | } |
413 | kfree_rcu(b_ptr, rcu); | 376 | kfree_rcu(b_ptr, rcu); |
414 | } | 377 | } |
415 | 378 | ||
416 | int tipc_disable_bearer(const char *name) | 379 | int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, |
417 | { | 380 | struct nlattr *attr[]) |
418 | struct tipc_bearer *b_ptr; | ||
419 | int res; | ||
420 | |||
421 | b_ptr = tipc_bearer_find(name); | ||
422 | if (b_ptr == NULL) { | ||
423 | pr_warn("Attempt to disable unknown bearer <%s>\n", name); | ||
424 | res = -EINVAL; | ||
425 | } else { | ||
426 | bearer_disable(b_ptr, false); | ||
427 | res = 0; | ||
428 | } | ||
429 | return res; | ||
430 | } | ||
431 | |||
432 | int tipc_enable_l2_media(struct tipc_bearer *b) | ||
433 | { | 381 | { |
434 | struct net_device *dev; | 382 | struct net_device *dev; |
435 | char *driver_name = strchr((const char *)b->name, ':') + 1; | 383 | char *driver_name = strchr((const char *)b->name, ':') + 1; |
436 | 384 | ||
437 | /* Find device with specified name */ | 385 | /* Find device with specified name */ |
438 | dev = dev_get_by_name(&init_net, driver_name); | 386 | dev = dev_get_by_name(net, driver_name); |
439 | if (!dev) | 387 | if (!dev) |
440 | return -ENODEV; | 388 | return -ENODEV; |
441 | 389 | ||
@@ -474,8 +422,8 @@ void tipc_disable_l2_media(struct tipc_bearer *b) | |||
474 | * @b_ptr: the bearer through which the packet is to be sent | 422 | * @b_ptr: the bearer through which the packet is to be sent |
475 | * @dest: peer destination address | 423 | * @dest: peer destination address |
476 | */ | 424 | */ |
477 | int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, | 425 | int tipc_l2_send_msg(struct net *net, struct sk_buff *buf, |
478 | struct tipc_media_addr *dest) | 426 | struct tipc_bearer *b, struct tipc_media_addr *dest) |
479 | { | 427 | { |
480 | struct sk_buff *clone; | 428 | struct sk_buff *clone; |
481 | struct net_device *dev; | 429 | struct net_device *dev; |
@@ -511,15 +459,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, | |||
511 | * The media send routine must not alter the buffer being passed in | 459 | * The media send routine must not alter the buffer being passed in |
512 | * as it may be needed for later retransmission! | 460 | * as it may be needed for later retransmission! |
513 | */ | 461 | */ |
514 | void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf, | 462 | void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf, |
515 | struct tipc_media_addr *dest) | 463 | struct tipc_media_addr *dest) |
516 | { | 464 | { |
465 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
517 | struct tipc_bearer *b_ptr; | 466 | struct tipc_bearer *b_ptr; |
518 | 467 | ||
519 | rcu_read_lock(); | 468 | rcu_read_lock(); |
520 | b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]); | 469 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); |
521 | if (likely(b_ptr)) | 470 | if (likely(b_ptr)) |
522 | b_ptr->media->send_msg(buf, b_ptr, dest); | 471 | b_ptr->media->send_msg(net, buf, b_ptr, dest); |
523 | rcu_read_unlock(); | 472 | rcu_read_unlock(); |
524 | } | 473 | } |
525 | 474 | ||
@@ -539,17 +488,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev, | |||
539 | { | 488 | { |
540 | struct tipc_bearer *b_ptr; | 489 | struct tipc_bearer *b_ptr; |
541 | 490 | ||
542 | if (!net_eq(dev_net(dev), &init_net)) { | ||
543 | kfree_skb(buf); | ||
544 | return NET_RX_DROP; | ||
545 | } | ||
546 | |||
547 | rcu_read_lock(); | 491 | rcu_read_lock(); |
548 | b_ptr = rcu_dereference_rtnl(dev->tipc_ptr); | 492 | b_ptr = rcu_dereference_rtnl(dev->tipc_ptr); |
549 | if (likely(b_ptr)) { | 493 | if (likely(b_ptr)) { |
550 | if (likely(buf->pkt_type <= PACKET_BROADCAST)) { | 494 | if (likely(buf->pkt_type <= PACKET_BROADCAST)) { |
551 | buf->next = NULL; | 495 | buf->next = NULL; |
552 | tipc_rcv(buf, b_ptr); | 496 | tipc_rcv(dev_net(dev), buf, b_ptr); |
553 | rcu_read_unlock(); | 497 | rcu_read_unlock(); |
554 | return NET_RX_SUCCESS; | 498 | return NET_RX_SUCCESS; |
555 | } | 499 | } |
@@ -572,11 +516,9 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev, | |||
572 | static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, | 516 | static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, |
573 | void *ptr) | 517 | void *ptr) |
574 | { | 518 | { |
575 | struct tipc_bearer *b_ptr; | ||
576 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 519 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
577 | 520 | struct net *net = dev_net(dev); | |
578 | if (!net_eq(dev_net(dev), &init_net)) | 521 | struct tipc_bearer *b_ptr; |
579 | return NOTIFY_DONE; | ||
580 | 522 | ||
581 | b_ptr = rtnl_dereference(dev->tipc_ptr); | 523 | b_ptr = rtnl_dereference(dev->tipc_ptr); |
582 | if (!b_ptr) | 524 | if (!b_ptr) |
@@ -590,16 +532,16 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, | |||
590 | break; | 532 | break; |
591 | case NETDEV_DOWN: | 533 | case NETDEV_DOWN: |
592 | case NETDEV_CHANGEMTU: | 534 | case NETDEV_CHANGEMTU: |
593 | tipc_reset_bearer(b_ptr); | 535 | tipc_reset_bearer(net, b_ptr); |
594 | break; | 536 | break; |
595 | case NETDEV_CHANGEADDR: | 537 | case NETDEV_CHANGEADDR: |
596 | b_ptr->media->raw2addr(b_ptr, &b_ptr->addr, | 538 | b_ptr->media->raw2addr(b_ptr, &b_ptr->addr, |
597 | (char *)dev->dev_addr); | 539 | (char *)dev->dev_addr); |
598 | tipc_reset_bearer(b_ptr); | 540 | tipc_reset_bearer(net, b_ptr); |
599 | break; | 541 | break; |
600 | case NETDEV_UNREGISTER: | 542 | case NETDEV_UNREGISTER: |
601 | case NETDEV_CHANGENAME: | 543 | case NETDEV_CHANGENAME: |
602 | bearer_disable(b_ptr, false); | 544 | bearer_disable(dev_net(dev), b_ptr, false); |
603 | break; | 545 | break; |
604 | } | 546 | } |
605 | return NOTIFY_OK; | 547 | return NOTIFY_OK; |
@@ -632,16 +574,17 @@ void tipc_bearer_cleanup(void) | |||
632 | dev_remove_pack(&tipc_packet_type); | 574 | dev_remove_pack(&tipc_packet_type); |
633 | } | 575 | } |
634 | 576 | ||
635 | void tipc_bearer_stop(void) | 577 | void tipc_bearer_stop(struct net *net) |
636 | { | 578 | { |
579 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
637 | struct tipc_bearer *b_ptr; | 580 | struct tipc_bearer *b_ptr; |
638 | u32 i; | 581 | u32 i; |
639 | 582 | ||
640 | for (i = 0; i < MAX_BEARERS; i++) { | 583 | for (i = 0; i < MAX_BEARERS; i++) { |
641 | b_ptr = rtnl_dereference(bearer_list[i]); | 584 | b_ptr = rtnl_dereference(tn->bearer_list[i]); |
642 | if (b_ptr) { | 585 | if (b_ptr) { |
643 | bearer_disable(b_ptr, true); | 586 | bearer_disable(net, b_ptr, true); |
644 | bearer_list[i] = NULL; | 587 | tn->bearer_list[i] = NULL; |
645 | } | 588 | } |
646 | } | 589 | } |
647 | } | 590 | } |
@@ -654,7 +597,7 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, | |||
654 | struct nlattr *attrs; | 597 | struct nlattr *attrs; |
655 | struct nlattr *prop; | 598 | struct nlattr *prop; |
656 | 599 | ||
657 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, | 600 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
658 | NLM_F_MULTI, TIPC_NL_BEARER_GET); | 601 | NLM_F_MULTI, TIPC_NL_BEARER_GET); |
659 | if (!hdr) | 602 | if (!hdr) |
660 | return -EMSGSIZE; | 603 | return -EMSGSIZE; |
@@ -698,6 +641,8 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
698 | int i = cb->args[0]; | 641 | int i = cb->args[0]; |
699 | struct tipc_bearer *bearer; | 642 | struct tipc_bearer *bearer; |
700 | struct tipc_nl_msg msg; | 643 | struct tipc_nl_msg msg; |
644 | struct net *net = sock_net(skb->sk); | ||
645 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
701 | 646 | ||
702 | if (i == MAX_BEARERS) | 647 | if (i == MAX_BEARERS) |
703 | return 0; | 648 | return 0; |
@@ -708,7 +653,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
708 | 653 | ||
709 | rtnl_lock(); | 654 | rtnl_lock(); |
710 | for (i = 0; i < MAX_BEARERS; i++) { | 655 | for (i = 0; i < MAX_BEARERS; i++) { |
711 | bearer = rtnl_dereference(bearer_list[i]); | 656 | bearer = rtnl_dereference(tn->bearer_list[i]); |
712 | if (!bearer) | 657 | if (!bearer) |
713 | continue; | 658 | continue; |
714 | 659 | ||
@@ -730,6 +675,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) | |||
730 | struct tipc_bearer *bearer; | 675 | struct tipc_bearer *bearer; |
731 | struct tipc_nl_msg msg; | 676 | struct tipc_nl_msg msg; |
732 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; | 677 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; |
678 | struct net *net = genl_info_net(info); | ||
733 | 679 | ||
734 | if (!info->attrs[TIPC_NLA_BEARER]) | 680 | if (!info->attrs[TIPC_NLA_BEARER]) |
735 | return -EINVAL; | 681 | return -EINVAL; |
@@ -753,7 +699,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) | |||
753 | msg.seq = info->snd_seq; | 699 | msg.seq = info->snd_seq; |
754 | 700 | ||
755 | rtnl_lock(); | 701 | rtnl_lock(); |
756 | bearer = tipc_bearer_find(name); | 702 | bearer = tipc_bearer_find(net, name); |
757 | if (!bearer) { | 703 | if (!bearer) { |
758 | err = -EINVAL; | 704 | err = -EINVAL; |
759 | goto err_out; | 705 | goto err_out; |
@@ -778,6 +724,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) | |||
778 | char *name; | 724 | char *name; |
779 | struct tipc_bearer *bearer; | 725 | struct tipc_bearer *bearer; |
780 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; | 726 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; |
727 | struct net *net = sock_net(skb->sk); | ||
781 | 728 | ||
782 | if (!info->attrs[TIPC_NLA_BEARER]) | 729 | if (!info->attrs[TIPC_NLA_BEARER]) |
783 | return -EINVAL; | 730 | return -EINVAL; |
@@ -794,13 +741,13 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) | |||
794 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); | 741 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
795 | 742 | ||
796 | rtnl_lock(); | 743 | rtnl_lock(); |
797 | bearer = tipc_bearer_find(name); | 744 | bearer = tipc_bearer_find(net, name); |
798 | if (!bearer) { | 745 | if (!bearer) { |
799 | rtnl_unlock(); | 746 | rtnl_unlock(); |
800 | return -EINVAL; | 747 | return -EINVAL; |
801 | } | 748 | } |
802 | 749 | ||
803 | bearer_disable(bearer, false); | 750 | bearer_disable(net, bearer, false); |
804 | rtnl_unlock(); | 751 | rtnl_unlock(); |
805 | 752 | ||
806 | return 0; | 753 | return 0; |
@@ -811,11 +758,13 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | |||
811 | int err; | 758 | int err; |
812 | char *bearer; | 759 | char *bearer; |
813 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; | 760 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; |
761 | struct net *net = sock_net(skb->sk); | ||
762 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
814 | u32 domain; | 763 | u32 domain; |
815 | u32 prio; | 764 | u32 prio; |
816 | 765 | ||
817 | prio = TIPC_MEDIA_LINK_PRI; | 766 | prio = TIPC_MEDIA_LINK_PRI; |
818 | domain = tipc_own_addr & TIPC_CLUSTER_MASK; | 767 | domain = tn->own_addr & TIPC_CLUSTER_MASK; |
819 | 768 | ||
820 | if (!info->attrs[TIPC_NLA_BEARER]) | 769 | if (!info->attrs[TIPC_NLA_BEARER]) |
821 | return -EINVAL; | 770 | return -EINVAL; |
@@ -847,7 +796,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | |||
847 | } | 796 | } |
848 | 797 | ||
849 | rtnl_lock(); | 798 | rtnl_lock(); |
850 | err = tipc_enable_bearer(bearer, domain, prio); | 799 | err = tipc_enable_bearer(net, bearer, domain, prio, attrs); |
851 | if (err) { | 800 | if (err) { |
852 | rtnl_unlock(); | 801 | rtnl_unlock(); |
853 | return err; | 802 | return err; |
@@ -863,6 +812,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
863 | char *name; | 812 | char *name; |
864 | struct tipc_bearer *b; | 813 | struct tipc_bearer *b; |
865 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; | 814 | struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; |
815 | struct net *net = genl_info_net(info); | ||
866 | 816 | ||
867 | if (!info->attrs[TIPC_NLA_BEARER]) | 817 | if (!info->attrs[TIPC_NLA_BEARER]) |
868 | return -EINVAL; | 818 | return -EINVAL; |
@@ -878,7 +828,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
878 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); | 828 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
879 | 829 | ||
880 | rtnl_lock(); | 830 | rtnl_lock(); |
881 | b = tipc_bearer_find(name); | 831 | b = tipc_bearer_find(net, name); |
882 | if (!b) { | 832 | if (!b) { |
883 | rtnl_unlock(); | 833 | rtnl_unlock(); |
884 | return -EINVAL; | 834 | return -EINVAL; |
@@ -913,7 +863,7 @@ static int __tipc_nl_add_media(struct tipc_nl_msg *msg, | |||
913 | struct nlattr *attrs; | 863 | struct nlattr *attrs; |
914 | struct nlattr *prop; | 864 | struct nlattr *prop; |
915 | 865 | ||
916 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, | 866 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
917 | NLM_F_MULTI, TIPC_NL_MEDIA_GET); | 867 | NLM_F_MULTI, TIPC_NL_MEDIA_GET); |
918 | if (!hdr) | 868 | if (!hdr) |
919 | return -EMSGSIZE; | 869 | return -EMSGSIZE; |
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 2c1230ac5dfe..5cad243ee8fc 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -37,26 +37,39 @@ | |||
37 | #ifndef _TIPC_BEARER_H | 37 | #ifndef _TIPC_BEARER_H |
38 | #define _TIPC_BEARER_H | 38 | #define _TIPC_BEARER_H |
39 | 39 | ||
40 | #include "bcast.h" | ||
41 | #include "netlink.h" | 40 | #include "netlink.h" |
42 | #include <net/genetlink.h> | 41 | #include <net/genetlink.h> |
43 | 42 | ||
44 | #define MAX_BEARERS 2 | 43 | #define MAX_BEARERS 2 |
45 | #define MAX_MEDIA 2 | 44 | #define MAX_MEDIA 3 |
45 | #define MAX_NODES 4096 | ||
46 | #define WSIZE 32 | ||
46 | 47 | ||
47 | /* Identifiers associated with TIPC message header media address info | 48 | /* Identifiers associated with TIPC message header media address info |
48 | * - address info field is 32 bytes long | 49 | * - address info field is 32 bytes long |
49 | * - the field's actual content and length is defined per media | 50 | * - the field's actual content and length is defined per media |
50 | * - remaining unused bytes in the field are set to zero | 51 | * - remaining unused bytes in the field are set to zero |
51 | */ | 52 | */ |
52 | #define TIPC_MEDIA_ADDR_SIZE 32 | 53 | #define TIPC_MEDIA_INFO_SIZE 32 |
53 | #define TIPC_MEDIA_TYPE_OFFSET 3 | 54 | #define TIPC_MEDIA_TYPE_OFFSET 3 |
55 | #define TIPC_MEDIA_ADDR_OFFSET 4 | ||
54 | 56 | ||
55 | /* | 57 | /* |
56 | * Identifiers of supported TIPC media types | 58 | * Identifiers of supported TIPC media types |
57 | */ | 59 | */ |
58 | #define TIPC_MEDIA_TYPE_ETH 1 | 60 | #define TIPC_MEDIA_TYPE_ETH 1 |
59 | #define TIPC_MEDIA_TYPE_IB 2 | 61 | #define TIPC_MEDIA_TYPE_IB 2 |
62 | #define TIPC_MEDIA_TYPE_UDP 3 | ||
63 | |||
64 | /** | ||
65 | * struct tipc_node_map - set of node identifiers | ||
66 | * @count: # of nodes in set | ||
67 | * @map: bitmap of node identifiers that are in the set | ||
68 | */ | ||
69 | struct tipc_node_map { | ||
70 | u32 count; | ||
71 | u32 map[MAX_NODES / WSIZE]; | ||
72 | }; | ||
60 | 73 | ||
61 | /** | 74 | /** |
62 | * struct tipc_media_addr - destination address used by TIPC bearers | 75 | * struct tipc_media_addr - destination address used by TIPC bearers |
@@ -65,7 +78,7 @@ | |||
65 | * @broadcast: non-zero if address is a broadcast address | 78 | * @broadcast: non-zero if address is a broadcast address |
66 | */ | 79 | */ |
67 | struct tipc_media_addr { | 80 | struct tipc_media_addr { |
68 | u8 value[TIPC_MEDIA_ADDR_SIZE]; | 81 | u8 value[TIPC_MEDIA_INFO_SIZE]; |
69 | u8 media_id; | 82 | u8 media_id; |
70 | u8 broadcast; | 83 | u8 broadcast; |
71 | }; | 84 | }; |
@@ -89,10 +102,11 @@ struct tipc_bearer; | |||
89 | * @name: media name | 102 | * @name: media name |
90 | */ | 103 | */ |
91 | struct tipc_media { | 104 | struct tipc_media { |
92 | int (*send_msg)(struct sk_buff *buf, | 105 | int (*send_msg)(struct net *net, struct sk_buff *buf, |
93 | struct tipc_bearer *b_ptr, | 106 | struct tipc_bearer *b_ptr, |
94 | struct tipc_media_addr *dest); | 107 | struct tipc_media_addr *dest); |
95 | int (*enable_media)(struct tipc_bearer *b_ptr); | 108 | int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr, |
109 | struct nlattr *attr[]); | ||
96 | void (*disable_media)(struct tipc_bearer *b_ptr); | 110 | void (*disable_media)(struct tipc_bearer *b_ptr); |
97 | int (*addr2str)(struct tipc_media_addr *addr, | 111 | int (*addr2str)(struct tipc_media_addr *addr, |
98 | char *strbuf, | 112 | char *strbuf, |
@@ -157,17 +171,11 @@ struct tipc_bearer_names { | |||
157 | char if_name[TIPC_MAX_IF_NAME]; | 171 | char if_name[TIPC_MAX_IF_NAME]; |
158 | }; | 172 | }; |
159 | 173 | ||
160 | struct tipc_link; | ||
161 | |||
162 | extern struct tipc_bearer __rcu *bearer_list[]; | ||
163 | |||
164 | /* | 174 | /* |
165 | * TIPC routines available to supported media types | 175 | * TIPC routines available to supported media types |
166 | */ | 176 | */ |
167 | 177 | ||
168 | void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr); | 178 | void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr); |
169 | int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority); | ||
170 | int tipc_disable_bearer(const char *name); | ||
171 | 179 | ||
172 | /* | 180 | /* |
173 | * Routines made available to TIPC by supported media types | 181 | * Routines made available to TIPC by supported media types |
@@ -177,6 +185,9 @@ extern struct tipc_media eth_media_info; | |||
177 | #ifdef CONFIG_TIPC_MEDIA_IB | 185 | #ifdef CONFIG_TIPC_MEDIA_IB |
178 | extern struct tipc_media ib_media_info; | 186 | extern struct tipc_media ib_media_info; |
179 | #endif | 187 | #endif |
188 | #ifdef CONFIG_TIPC_MEDIA_UDP | ||
189 | extern struct tipc_media udp_media_info; | ||
190 | #endif | ||
180 | 191 | ||
181 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | 192 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); |
182 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | 193 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); |
@@ -191,21 +202,20 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); | |||
191 | int tipc_media_set_priority(const char *name, u32 new_value); | 202 | int tipc_media_set_priority(const char *name, u32 new_value); |
192 | int tipc_media_set_window(const char *name, u32 new_value); | 203 | int tipc_media_set_window(const char *name, u32 new_value); |
193 | void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); | 204 | void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); |
194 | struct sk_buff *tipc_media_get_names(void); | 205 | int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, |
195 | int tipc_enable_l2_media(struct tipc_bearer *b); | 206 | struct nlattr *attrs[]); |
196 | void tipc_disable_l2_media(struct tipc_bearer *b); | 207 | void tipc_disable_l2_media(struct tipc_bearer *b); |
197 | int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, | 208 | int tipc_l2_send_msg(struct net *net, struct sk_buff *buf, |
198 | struct tipc_media_addr *dest); | 209 | struct tipc_bearer *b, struct tipc_media_addr *dest); |
199 | 210 | ||
200 | struct sk_buff *tipc_bearer_get_names(void); | 211 | void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest); |
201 | void tipc_bearer_add_dest(u32 bearer_id, u32 dest); | 212 | void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest); |
202 | void tipc_bearer_remove_dest(u32 bearer_id, u32 dest); | 213 | struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name); |
203 | struct tipc_bearer *tipc_bearer_find(const char *name); | ||
204 | struct tipc_media *tipc_media_find(const char *name); | 214 | struct tipc_media *tipc_media_find(const char *name); |
205 | int tipc_bearer_setup(void); | 215 | int tipc_bearer_setup(void); |
206 | void tipc_bearer_cleanup(void); | 216 | void tipc_bearer_cleanup(void); |
207 | void tipc_bearer_stop(void); | 217 | void tipc_bearer_stop(struct net *net); |
208 | void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf, | 218 | void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf, |
209 | struct tipc_media_addr *dest); | 219 | struct tipc_media_addr *dest); |
210 | 220 | ||
211 | #endif /* _TIPC_BEARER_H */ | 221 | #endif /* _TIPC_BEARER_H */ |
diff --git a/net/tipc/config.c b/net/tipc/config.c deleted file mode 100644 index 876f4c6a2631..000000000000 --- a/net/tipc/config.c +++ /dev/null | |||
@@ -1,342 +0,0 @@ | |||
1 | /* | ||
2 | * net/tipc/config.c: TIPC configuration management code | ||
3 | * | ||
4 | * Copyright (c) 2002-2006, Ericsson AB | ||
5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Redistribution and use in source and binary forms, with or without | ||
9 | * modification, are permitted provided that the following conditions are met: | ||
10 | * | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer. | ||
13 | * 2. Redistributions in binary form must reproduce the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer in the | ||
15 | * documentation and/or other materials provided with the distribution. | ||
16 | * 3. Neither the names of the copyright holders nor the names of its | ||
17 | * contributors may be used to endorse or promote products derived from | ||
18 | * this software without specific prior written permission. | ||
19 | * | ||
20 | * Alternatively, this software may be distributed under the terms of the | ||
21 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
22 | * Software Foundation. | ||
23 | * | ||
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
34 | * POSSIBILITY OF SUCH DAMAGE. | ||
35 | */ | ||
36 | |||
37 | #include "core.h" | ||
38 | #include "socket.h" | ||
39 | #include "name_table.h" | ||
40 | #include "config.h" | ||
41 | #include "server.h" | ||
42 | |||
43 | #define REPLY_TRUNCATED "<truncated>\n" | ||
44 | |||
45 | static const void *req_tlv_area; /* request message TLV area */ | ||
46 | static int req_tlv_space; /* request message TLV area size */ | ||
47 | static int rep_headroom; /* reply message headroom to use */ | ||
48 | |||
49 | struct sk_buff *tipc_cfg_reply_alloc(int payload_size) | ||
50 | { | ||
51 | struct sk_buff *buf; | ||
52 | |||
53 | buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC); | ||
54 | if (buf) | ||
55 | skb_reserve(buf, rep_headroom); | ||
56 | return buf; | ||
57 | } | ||
58 | |||
59 | int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, | ||
60 | void *tlv_data, int tlv_data_size) | ||
61 | { | ||
62 | struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf); | ||
63 | int new_tlv_space = TLV_SPACE(tlv_data_size); | ||
64 | |||
65 | if (skb_tailroom(buf) < new_tlv_space) | ||
66 | return 0; | ||
67 | skb_put(buf, new_tlv_space); | ||
68 | tlv->tlv_type = htons(tlv_type); | ||
69 | tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size)); | ||
70 | if (tlv_data_size && tlv_data) | ||
71 | memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size); | ||
72 | return 1; | ||
73 | } | ||
74 | |||
75 | static struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value) | ||
76 | { | ||
77 | struct sk_buff *buf; | ||
78 | __be32 value_net; | ||
79 | |||
80 | buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value))); | ||
81 | if (buf) { | ||
82 | value_net = htonl(value); | ||
83 | tipc_cfg_append_tlv(buf, tlv_type, &value_net, | ||
84 | sizeof(value_net)); | ||
85 | } | ||
86 | return buf; | ||
87 | } | ||
88 | |||
89 | static struct sk_buff *tipc_cfg_reply_unsigned(u32 value) | ||
90 | { | ||
91 | return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value); | ||
92 | } | ||
93 | |||
94 | struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string) | ||
95 | { | ||
96 | struct sk_buff *buf; | ||
97 | int string_len = strlen(string) + 1; | ||
98 | |||
99 | buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len)); | ||
100 | if (buf) | ||
101 | tipc_cfg_append_tlv(buf, tlv_type, string, string_len); | ||
102 | return buf; | ||
103 | } | ||
104 | |||
105 | static struct sk_buff *tipc_show_stats(void) | ||
106 | { | ||
107 | struct sk_buff *buf; | ||
108 | struct tlv_desc *rep_tlv; | ||
109 | char *pb; | ||
110 | int pb_len; | ||
111 | int str_len; | ||
112 | u32 value; | ||
113 | |||
114 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | ||
115 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
116 | |||
117 | value = ntohl(*(u32 *)TLV_DATA(req_tlv_area)); | ||
118 | if (value != 0) | ||
119 | return tipc_cfg_reply_error_string("unsupported argument"); | ||
120 | |||
121 | buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); | ||
122 | if (buf == NULL) | ||
123 | return NULL; | ||
124 | |||
125 | rep_tlv = (struct tlv_desc *)buf->data; | ||
126 | pb = TLV_DATA(rep_tlv); | ||
127 | pb_len = ULTRA_STRING_MAX_LEN; | ||
128 | |||
129 | str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n"); | ||
130 | str_len += 1; /* for "\0" */ | ||
131 | skb_put(buf, TLV_SPACE(str_len)); | ||
132 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | ||
133 | |||
134 | return buf; | ||
135 | } | ||
136 | |||
137 | static struct sk_buff *cfg_enable_bearer(void) | ||
138 | { | ||
139 | struct tipc_bearer_config *args; | ||
140 | |||
141 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG)) | ||
142 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
143 | |||
144 | args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area); | ||
145 | if (tipc_enable_bearer(args->name, | ||
146 | ntohl(args->disc_domain), | ||
147 | ntohl(args->priority))) | ||
148 | return tipc_cfg_reply_error_string("unable to enable bearer"); | ||
149 | |||
150 | return tipc_cfg_reply_none(); | ||
151 | } | ||
152 | |||
153 | static struct sk_buff *cfg_disable_bearer(void) | ||
154 | { | ||
155 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME)) | ||
156 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
157 | |||
158 | if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area))) | ||
159 | return tipc_cfg_reply_error_string("unable to disable bearer"); | ||
160 | |||
161 | return tipc_cfg_reply_none(); | ||
162 | } | ||
163 | |||
164 | static struct sk_buff *cfg_set_own_addr(void) | ||
165 | { | ||
166 | u32 addr; | ||
167 | |||
168 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | ||
169 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
170 | |||
171 | addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); | ||
172 | if (addr == tipc_own_addr) | ||
173 | return tipc_cfg_reply_none(); | ||
174 | if (!tipc_addr_node_valid(addr)) | ||
175 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
176 | " (node address)"); | ||
177 | if (tipc_own_addr) | ||
178 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
179 | " (cannot change node address once assigned)"); | ||
180 | if (!tipc_net_start(addr)) | ||
181 | return tipc_cfg_reply_none(); | ||
182 | |||
183 | return tipc_cfg_reply_error_string("cannot change to network mode"); | ||
184 | } | ||
185 | |||
186 | static struct sk_buff *cfg_set_max_ports(void) | ||
187 | { | ||
188 | u32 value; | ||
189 | |||
190 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | ||
191 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
192 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); | ||
193 | if (value == tipc_max_ports) | ||
194 | return tipc_cfg_reply_none(); | ||
195 | if (value < 127 || value > 65535) | ||
196 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
197 | " (max ports must be 127-65535)"); | ||
198 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
199 | " (cannot change max ports while TIPC is active)"); | ||
200 | } | ||
201 | |||
202 | static struct sk_buff *cfg_set_netid(void) | ||
203 | { | ||
204 | u32 value; | ||
205 | |||
206 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | ||
207 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
208 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); | ||
209 | if (value == tipc_net_id) | ||
210 | return tipc_cfg_reply_none(); | ||
211 | if (value < 1 || value > 9999) | ||
212 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
213 | " (network id must be 1-9999)"); | ||
214 | if (tipc_own_addr) | ||
215 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
216 | " (cannot change network id once TIPC has joined a network)"); | ||
217 | tipc_net_id = value; | ||
218 | return tipc_cfg_reply_none(); | ||
219 | } | ||
220 | |||
221 | struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area, | ||
222 | int request_space, int reply_headroom) | ||
223 | { | ||
224 | struct sk_buff *rep_tlv_buf; | ||
225 | |||
226 | rtnl_lock(); | ||
227 | |||
228 | /* Save request and reply details in a well-known location */ | ||
229 | req_tlv_area = request_area; | ||
230 | req_tlv_space = request_space; | ||
231 | rep_headroom = reply_headroom; | ||
232 | |||
233 | /* Check command authorization */ | ||
234 | if (likely(in_own_node(orig_node))) { | ||
235 | /* command is permitted */ | ||
236 | } else { | ||
237 | rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
238 | " (cannot be done remotely)"); | ||
239 | goto exit; | ||
240 | } | ||
241 | |||
242 | /* Call appropriate processing routine */ | ||
243 | switch (cmd) { | ||
244 | case TIPC_CMD_NOOP: | ||
245 | rep_tlv_buf = tipc_cfg_reply_none(); | ||
246 | break; | ||
247 | case TIPC_CMD_GET_NODES: | ||
248 | rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space); | ||
249 | break; | ||
250 | case TIPC_CMD_GET_LINKS: | ||
251 | rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space); | ||
252 | break; | ||
253 | case TIPC_CMD_SHOW_LINK_STATS: | ||
254 | rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space); | ||
255 | break; | ||
256 | case TIPC_CMD_RESET_LINK_STATS: | ||
257 | rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space); | ||
258 | break; | ||
259 | case TIPC_CMD_SHOW_NAME_TABLE: | ||
260 | rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space); | ||
261 | break; | ||
262 | case TIPC_CMD_GET_BEARER_NAMES: | ||
263 | rep_tlv_buf = tipc_bearer_get_names(); | ||
264 | break; | ||
265 | case TIPC_CMD_GET_MEDIA_NAMES: | ||
266 | rep_tlv_buf = tipc_media_get_names(); | ||
267 | break; | ||
268 | case TIPC_CMD_SHOW_PORTS: | ||
269 | rep_tlv_buf = tipc_sk_socks_show(); | ||
270 | break; | ||
271 | case TIPC_CMD_SHOW_STATS: | ||
272 | rep_tlv_buf = tipc_show_stats(); | ||
273 | break; | ||
274 | case TIPC_CMD_SET_LINK_TOL: | ||
275 | case TIPC_CMD_SET_LINK_PRI: | ||
276 | case TIPC_CMD_SET_LINK_WINDOW: | ||
277 | rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd); | ||
278 | break; | ||
279 | case TIPC_CMD_ENABLE_BEARER: | ||
280 | rep_tlv_buf = cfg_enable_bearer(); | ||
281 | break; | ||
282 | case TIPC_CMD_DISABLE_BEARER: | ||
283 | rep_tlv_buf = cfg_disable_bearer(); | ||
284 | break; | ||
285 | case TIPC_CMD_SET_NODE_ADDR: | ||
286 | rep_tlv_buf = cfg_set_own_addr(); | ||
287 | break; | ||
288 | case TIPC_CMD_SET_MAX_PORTS: | ||
289 | rep_tlv_buf = cfg_set_max_ports(); | ||
290 | break; | ||
291 | case TIPC_CMD_SET_NETID: | ||
292 | rep_tlv_buf = cfg_set_netid(); | ||
293 | break; | ||
294 | case TIPC_CMD_GET_MAX_PORTS: | ||
295 | rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports); | ||
296 | break; | ||
297 | case TIPC_CMD_GET_NETID: | ||
298 | rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); | ||
299 | break; | ||
300 | case TIPC_CMD_NOT_NET_ADMIN: | ||
301 | rep_tlv_buf = | ||
302 | tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); | ||
303 | break; | ||
304 | case TIPC_CMD_SET_MAX_ZONES: | ||
305 | case TIPC_CMD_GET_MAX_ZONES: | ||
306 | case TIPC_CMD_SET_MAX_SLAVES: | ||
307 | case TIPC_CMD_GET_MAX_SLAVES: | ||
308 | case TIPC_CMD_SET_MAX_CLUSTERS: | ||
309 | case TIPC_CMD_GET_MAX_CLUSTERS: | ||
310 | case TIPC_CMD_SET_MAX_NODES: | ||
311 | case TIPC_CMD_GET_MAX_NODES: | ||
312 | case TIPC_CMD_SET_MAX_SUBSCR: | ||
313 | case TIPC_CMD_GET_MAX_SUBSCR: | ||
314 | case TIPC_CMD_SET_MAX_PUBL: | ||
315 | case TIPC_CMD_GET_MAX_PUBL: | ||
316 | case TIPC_CMD_SET_LOG_SIZE: | ||
317 | case TIPC_CMD_SET_REMOTE_MNG: | ||
318 | case TIPC_CMD_GET_REMOTE_MNG: | ||
319 | case TIPC_CMD_DUMP_LOG: | ||
320 | rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
321 | " (obsolete command)"); | ||
322 | break; | ||
323 | default: | ||
324 | rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
325 | " (unknown command)"); | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN)); | ||
330 | |||
331 | /* Append an error message if we cannot return all requested data */ | ||
332 | if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) { | ||
333 | if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0') | ||
334 | sprintf(rep_tlv_buf->data + rep_tlv_buf->len - | ||
335 | sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED); | ||
336 | } | ||
337 | |||
338 | /* Return reply buffer */ | ||
339 | exit: | ||
340 | rtnl_unlock(); | ||
341 | return rep_tlv_buf; | ||
342 | } | ||
diff --git a/net/tipc/config.h b/net/tipc/config.h deleted file mode 100644 index 47b1bf181612..000000000000 --- a/net/tipc/config.h +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * net/tipc/config.h: Include file for TIPC configuration service code | ||
3 | * | ||
4 | * Copyright (c) 2003-2006, Ericsson AB | ||
5 | * Copyright (c) 2005, Wind River Systems | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Redistribution and use in source and binary forms, with or without | ||
9 | * modification, are permitted provided that the following conditions are met: | ||
10 | * | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer. | ||
13 | * 2. Redistributions in binary form must reproduce the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer in the | ||
15 | * documentation and/or other materials provided with the distribution. | ||
16 | * 3. Neither the names of the copyright holders nor the names of its | ||
17 | * contributors may be used to endorse or promote products derived from | ||
18 | * this software without specific prior written permission. | ||
19 | * | ||
20 | * Alternatively, this software may be distributed under the terms of the | ||
21 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
22 | * Software Foundation. | ||
23 | * | ||
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
34 | * POSSIBILITY OF SUCH DAMAGE. | ||
35 | */ | ||
36 | |||
37 | #ifndef _TIPC_CONFIG_H | ||
38 | #define _TIPC_CONFIG_H | ||
39 | |||
40 | /* ---------------------------------------------------------------------- */ | ||
41 | |||
42 | #include "link.h" | ||
43 | |||
44 | struct sk_buff *tipc_cfg_reply_alloc(int payload_size); | ||
45 | int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, | ||
46 | void *tlv_data, int tlv_data_size); | ||
47 | struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string); | ||
48 | |||
49 | static inline struct sk_buff *tipc_cfg_reply_none(void) | ||
50 | { | ||
51 | return tipc_cfg_reply_alloc(0); | ||
52 | } | ||
53 | |||
54 | static inline struct sk_buff *tipc_cfg_reply_error_string(char *string) | ||
55 | { | ||
56 | return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string); | ||
57 | } | ||
58 | |||
59 | static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string) | ||
60 | { | ||
61 | return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string); | ||
62 | } | ||
63 | |||
64 | struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, | ||
65 | const void *req_tlv_area, int req_tlv_space, | ||
66 | int headroom); | ||
67 | #endif | ||
diff --git a/net/tipc/core.c b/net/tipc/core.c index a5737b8407dd..be1c9fa60b09 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -34,82 +34,88 @@ | |||
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
38 | |||
37 | #include "core.h" | 39 | #include "core.h" |
38 | #include "name_table.h" | 40 | #include "name_table.h" |
39 | #include "subscr.h" | 41 | #include "subscr.h" |
40 | #include "config.h" | 42 | #include "bearer.h" |
43 | #include "net.h" | ||
41 | #include "socket.h" | 44 | #include "socket.h" |
42 | 45 | ||
43 | #include <linux/module.h> | 46 | #include <linux/module.h> |
44 | 47 | ||
45 | /* global variables used by multiple sub-systems within TIPC */ | ||
46 | int tipc_random __read_mostly; | ||
47 | |||
48 | /* configurable TIPC parameters */ | 48 | /* configurable TIPC parameters */ |
49 | u32 tipc_own_addr __read_mostly; | ||
50 | int tipc_max_ports __read_mostly; | ||
51 | int tipc_net_id __read_mostly; | 49 | int tipc_net_id __read_mostly; |
52 | int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */ | 50 | int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */ |
53 | 51 | ||
54 | /** | 52 | static int __net_init tipc_init_net(struct net *net) |
55 | * tipc_buf_acquire - creates a TIPC message buffer | ||
56 | * @size: message size (including TIPC header) | ||
57 | * | ||
58 | * Returns a new buffer with data pointers set to the specified size. | ||
59 | * | ||
60 | * NOTE: Headroom is reserved to allow prepending of a data link header. | ||
61 | * There may also be unrequested tailroom present at the buffer's end. | ||
62 | */ | ||
63 | struct sk_buff *tipc_buf_acquire(u32 size) | ||
64 | { | 53 | { |
65 | struct sk_buff *skb; | 54 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
66 | unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; | 55 | int err; |
67 | 56 | ||
68 | skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); | 57 | tn->net_id = 4711; |
69 | if (skb) { | 58 | tn->own_addr = 0; |
70 | skb_reserve(skb, BUF_HEADROOM); | 59 | get_random_bytes(&tn->random, sizeof(int)); |
71 | skb_put(skb, size); | 60 | INIT_LIST_HEAD(&tn->node_list); |
72 | skb->next = NULL; | 61 | spin_lock_init(&tn->node_list_lock); |
73 | } | 62 | |
74 | return skb; | 63 | err = tipc_sk_rht_init(net); |
64 | if (err) | ||
65 | goto out_sk_rht; | ||
66 | |||
67 | err = tipc_nametbl_init(net); | ||
68 | if (err) | ||
69 | goto out_nametbl; | ||
70 | |||
71 | err = tipc_subscr_start(net); | ||
72 | if (err) | ||
73 | goto out_subscr; | ||
74 | return 0; | ||
75 | |||
76 | out_subscr: | ||
77 | tipc_nametbl_stop(net); | ||
78 | out_nametbl: | ||
79 | tipc_sk_rht_destroy(net); | ||
80 | out_sk_rht: | ||
81 | return err; | ||
75 | } | 82 | } |
76 | 83 | ||
77 | /** | 84 | static void __net_exit tipc_exit_net(struct net *net) |
78 | * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode | ||
79 | */ | ||
80 | static void tipc_core_stop(void) | ||
81 | { | 85 | { |
82 | tipc_net_stop(); | 86 | tipc_subscr_stop(net); |
83 | tipc_bearer_cleanup(); | 87 | tipc_net_stop(net); |
84 | tipc_netlink_stop(); | 88 | tipc_nametbl_stop(net); |
85 | tipc_subscr_stop(); | 89 | tipc_sk_rht_destroy(net); |
86 | tipc_nametbl_stop(); | ||
87 | tipc_sk_ref_table_stop(); | ||
88 | tipc_socket_stop(); | ||
89 | tipc_unregister_sysctl(); | ||
90 | } | 90 | } |
91 | 91 | ||
92 | /** | 92 | static struct pernet_operations tipc_net_ops = { |
93 | * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode | 93 | .init = tipc_init_net, |
94 | */ | 94 | .exit = tipc_exit_net, |
95 | static int tipc_core_start(void) | 95 | .id = &tipc_net_id, |
96 | .size = sizeof(struct tipc_net), | ||
97 | }; | ||
98 | |||
99 | static int __init tipc_init(void) | ||
96 | { | 100 | { |
97 | int err; | 101 | int err; |
98 | 102 | ||
99 | get_random_bytes(&tipc_random, sizeof(tipc_random)); | 103 | pr_info("Activated (version " TIPC_MOD_VER ")\n"); |
100 | |||
101 | err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random); | ||
102 | if (err) | ||
103 | goto out_reftbl; | ||
104 | 104 | ||
105 | err = tipc_nametbl_init(); | 105 | sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << |
106 | if (err) | 106 | TIPC_LOW_IMPORTANCE; |
107 | goto out_nametbl; | 107 | sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << |
108 | TIPC_CRITICAL_IMPORTANCE; | ||
109 | sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT; | ||
108 | 110 | ||
109 | err = tipc_netlink_start(); | 111 | err = tipc_netlink_start(); |
110 | if (err) | 112 | if (err) |
111 | goto out_netlink; | 113 | goto out_netlink; |
112 | 114 | ||
115 | err = tipc_netlink_compat_start(); | ||
116 | if (err) | ||
117 | goto out_netlink_compat; | ||
118 | |||
113 | err = tipc_socket_init(); | 119 | err = tipc_socket_init(); |
114 | if (err) | 120 | if (err) |
115 | goto out_socket; | 121 | goto out_socket; |
@@ -118,58 +124,40 @@ static int tipc_core_start(void) | |||
118 | if (err) | 124 | if (err) |
119 | goto out_sysctl; | 125 | goto out_sysctl; |
120 | 126 | ||
121 | err = tipc_subscr_start(); | 127 | err = register_pernet_subsys(&tipc_net_ops); |
122 | if (err) | 128 | if (err) |
123 | goto out_subscr; | 129 | goto out_pernet; |
124 | 130 | ||
125 | err = tipc_bearer_setup(); | 131 | err = tipc_bearer_setup(); |
126 | if (err) | 132 | if (err) |
127 | goto out_bearer; | 133 | goto out_bearer; |
128 | 134 | ||
135 | pr_info("Started in single node mode\n"); | ||
129 | return 0; | 136 | return 0; |
130 | out_bearer: | 137 | out_bearer: |
131 | tipc_subscr_stop(); | 138 | unregister_pernet_subsys(&tipc_net_ops); |
132 | out_subscr: | 139 | out_pernet: |
133 | tipc_unregister_sysctl(); | 140 | tipc_unregister_sysctl(); |
134 | out_sysctl: | 141 | out_sysctl: |
135 | tipc_socket_stop(); | 142 | tipc_socket_stop(); |
136 | out_socket: | 143 | out_socket: |
144 | tipc_netlink_compat_stop(); | ||
145 | out_netlink_compat: | ||
137 | tipc_netlink_stop(); | 146 | tipc_netlink_stop(); |
138 | out_netlink: | 147 | out_netlink: |
139 | tipc_nametbl_stop(); | 148 | pr_err("Unable to start in single node mode\n"); |
140 | out_nametbl: | ||
141 | tipc_sk_ref_table_stop(); | ||
142 | out_reftbl: | ||
143 | return err; | 149 | return err; |
144 | } | 150 | } |
145 | 151 | ||
146 | static int __init tipc_init(void) | ||
147 | { | ||
148 | int res; | ||
149 | |||
150 | pr_info("Activated (version " TIPC_MOD_VER ")\n"); | ||
151 | |||
152 | tipc_own_addr = 0; | ||
153 | tipc_max_ports = CONFIG_TIPC_PORTS; | ||
154 | tipc_net_id = 4711; | ||
155 | |||
156 | sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << | ||
157 | TIPC_LOW_IMPORTANCE; | ||
158 | sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 << | ||
159 | TIPC_CRITICAL_IMPORTANCE; | ||
160 | sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT; | ||
161 | |||
162 | res = tipc_core_start(); | ||
163 | if (res) | ||
164 | pr_err("Unable to start in single node mode\n"); | ||
165 | else | ||
166 | pr_info("Started in single node mode\n"); | ||
167 | return res; | ||
168 | } | ||
169 | |||
170 | static void __exit tipc_exit(void) | 152 | static void __exit tipc_exit(void) |
171 | { | 153 | { |
172 | tipc_core_stop(); | 154 | tipc_bearer_cleanup(); |
155 | unregister_pernet_subsys(&tipc_net_ops); | ||
156 | tipc_netlink_stop(); | ||
157 | tipc_netlink_compat_stop(); | ||
158 | tipc_socket_stop(); | ||
159 | tipc_unregister_sysctl(); | ||
160 | |||
173 | pr_info("Deactivated\n"); | 161 | pr_info("Deactivated\n"); |
174 | } | 162 | } |
175 | 163 | ||
diff --git a/net/tipc/core.h b/net/tipc/core.h index 84602137ce20..3dc68c7a966d 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -37,8 +37,6 @@ | |||
37 | #ifndef _TIPC_CORE_H | 37 | #ifndef _TIPC_CORE_H |
38 | #define _TIPC_CORE_H | 38 | #define _TIPC_CORE_H |
39 | 39 | ||
40 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
41 | |||
42 | #include <linux/tipc.h> | 40 | #include <linux/tipc.h> |
43 | #include <linux/tipc_config.h> | 41 | #include <linux/tipc_config.h> |
44 | #include <linux/tipc_netlink.h> | 42 | #include <linux/tipc_netlink.h> |
@@ -59,47 +57,54 @@ | |||
59 | #include <linux/vmalloc.h> | 57 | #include <linux/vmalloc.h> |
60 | #include <linux/rtnetlink.h> | 58 | #include <linux/rtnetlink.h> |
61 | #include <linux/etherdevice.h> | 59 | #include <linux/etherdevice.h> |
60 | #include <net/netns/generic.h> | ||
61 | #include <linux/rhashtable.h> | ||
62 | |||
63 | #include "node.h" | ||
64 | #include "bearer.h" | ||
65 | #include "bcast.h" | ||
66 | #include "netlink.h" | ||
67 | #include "link.h" | ||
68 | #include "node.h" | ||
69 | #include "msg.h" | ||
62 | 70 | ||
63 | #define TIPC_MOD_VER "2.0.0" | 71 | #define TIPC_MOD_VER "2.0.0" |
64 | 72 | ||
65 | #define ULTRA_STRING_MAX_LEN 32768 | 73 | extern int tipc_net_id __read_mostly; |
66 | #define TIPC_MAX_SUBSCRIPTIONS 65535 | 74 | extern int sysctl_tipc_rmem[3] __read_mostly; |
67 | #define TIPC_MAX_PUBLICATIONS 65535 | 75 | extern int sysctl_tipc_named_timeout __read_mostly; |
68 | 76 | ||
69 | struct tipc_msg; /* msg.h */ | 77 | struct tipc_net { |
78 | u32 own_addr; | ||
79 | int net_id; | ||
80 | int random; | ||
70 | 81 | ||
71 | int tipc_snprintf(char *buf, int len, const char *fmt, ...); | 82 | /* Node table and node list */ |
83 | spinlock_t node_list_lock; | ||
84 | struct hlist_head node_htable[NODE_HTABLE_SIZE]; | ||
85 | struct list_head node_list; | ||
86 | u32 num_nodes; | ||
87 | u32 num_links; | ||
72 | 88 | ||
73 | /* | 89 | /* Bearer list */ |
74 | * TIPC-specific error codes | 90 | struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; |
75 | */ | ||
76 | #define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ | ||
77 | 91 | ||
78 | /* | 92 | /* Broadcast link */ |
79 | * Global configuration variables | 93 | struct tipc_bcbearer *bcbearer; |
80 | */ | 94 | struct tipc_bclink *bclink; |
81 | extern u32 tipc_own_addr __read_mostly; | 95 | struct tipc_link *bcl; |
82 | extern int tipc_max_ports __read_mostly; | ||
83 | extern int tipc_net_id __read_mostly; | ||
84 | extern int sysctl_tipc_rmem[3] __read_mostly; | ||
85 | extern int sysctl_tipc_named_timeout __read_mostly; | ||
86 | 96 | ||
87 | /* | 97 | /* Socket hash table */ |
88 | * Other global variables | 98 | struct rhashtable sk_rht; |
89 | */ | ||
90 | extern int tipc_random __read_mostly; | ||
91 | 99 | ||
92 | /* | 100 | /* Name table */ |
93 | * Routines available to privileged subsystems | 101 | spinlock_t nametbl_lock; |
94 | */ | 102 | struct name_table *nametbl; |
95 | int tipc_netlink_start(void); | 103 | |
96 | void tipc_netlink_stop(void); | 104 | /* Topology subscription server */ |
97 | int tipc_socket_init(void); | 105 | struct tipc_server *topsrv; |
98 | void tipc_socket_stop(void); | 106 | atomic_t subscription_count; |
99 | int tipc_sock_create_local(int type, struct socket **res); | 107 | }; |
100 | void tipc_sock_release_local(struct socket *sock); | ||
101 | int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, | ||
102 | int flags); | ||
103 | 108 | ||
104 | #ifdef CONFIG_SYSCTL | 109 | #ifdef CONFIG_SYSCTL |
105 | int tipc_register_sysctl(void); | 110 | int tipc_register_sysctl(void); |
@@ -108,102 +113,4 @@ void tipc_unregister_sysctl(void); | |||
108 | #define tipc_register_sysctl() 0 | 113 | #define tipc_register_sysctl() 0 |
109 | #define tipc_unregister_sysctl() | 114 | #define tipc_unregister_sysctl() |
110 | #endif | 115 | #endif |
111 | |||
112 | /* | ||
113 | * TIPC timer code | ||
114 | */ | ||
115 | typedef void (*Handler) (unsigned long); | ||
116 | |||
117 | /** | ||
118 | * k_init_timer - initialize a timer | ||
119 | * @timer: pointer to timer structure | ||
120 | * @routine: pointer to routine to invoke when timer expires | ||
121 | * @argument: value to pass to routine when timer expires | ||
122 | * | ||
123 | * Timer must be initialized before use (and terminated when no longer needed). | ||
124 | */ | ||
125 | static inline void k_init_timer(struct timer_list *timer, Handler routine, | ||
126 | unsigned long argument) | ||
127 | { | ||
128 | setup_timer(timer, routine, argument); | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * k_start_timer - start a timer | ||
133 | * @timer: pointer to timer structure | ||
134 | * @msec: time to delay (in ms) | ||
135 | * | ||
136 | * Schedules a previously initialized timer for later execution. | ||
137 | * If timer is already running, the new timeout overrides the previous request. | ||
138 | * | ||
139 | * To ensure the timer doesn't expire before the specified delay elapses, | ||
140 | * the amount of delay is rounded up when converting to the jiffies | ||
141 | * then an additional jiffy is added to account for the fact that | ||
142 | * the starting time may be in the middle of the current jiffy. | ||
143 | */ | ||
144 | static inline void k_start_timer(struct timer_list *timer, unsigned long msec) | ||
145 | { | ||
146 | mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * k_cancel_timer - cancel a timer | ||
151 | * @timer: pointer to timer structure | ||
152 | * | ||
153 | * Cancels a previously initialized timer. | ||
154 | * Can be called safely even if the timer is already inactive. | ||
155 | * | ||
156 | * WARNING: Must not be called when holding locks required by the timer's | ||
157 | * timeout routine, otherwise deadlock can occur on SMP systems! | ||
158 | */ | ||
159 | static inline void k_cancel_timer(struct timer_list *timer) | ||
160 | { | ||
161 | del_timer_sync(timer); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * k_term_timer - terminate a timer | ||
166 | * @timer: pointer to timer structure | ||
167 | * | ||
168 | * Prevents further use of a previously initialized timer. | ||
169 | * | ||
170 | * WARNING: Caller must ensure timer isn't currently running. | ||
171 | * | ||
172 | * (Do not "enhance" this routine to automatically cancel an active timer, | ||
173 | * otherwise deadlock can arise when a timeout routine calls k_term_timer.) | ||
174 | */ | ||
175 | static inline void k_term_timer(struct timer_list *timer) | ||
176 | { | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * TIPC message buffer code | ||
181 | * | ||
182 | * TIPC message buffer headroom reserves space for the worst-case | ||
183 | * link-level device header (in case the message is sent off-node). | ||
184 | * | ||
185 | * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields | ||
186 | * are word aligned for quicker access | ||
187 | */ | ||
188 | #define BUF_HEADROOM LL_MAX_HEADER | ||
189 | |||
190 | struct tipc_skb_cb { | ||
191 | void *handle; | ||
192 | struct sk_buff *tail; | ||
193 | bool deferred; | ||
194 | bool wakeup_pending; | ||
195 | bool bundling; | ||
196 | u16 chain_sz; | ||
197 | u16 chain_imp; | ||
198 | }; | ||
199 | |||
200 | #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) | ||
201 | |||
202 | static inline struct tipc_msg *buf_msg(struct sk_buff *skb) | ||
203 | { | ||
204 | return (struct tipc_msg *)skb->data; | ||
205 | } | ||
206 | |||
207 | struct sk_buff *tipc_buf_acquire(u32 size); | ||
208 | |||
209 | #endif | 116 | #endif |
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index aa722a42ef8b..967e292f53c8 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/discover.c | 2 | * net/tipc/discover.c |
3 | * | 3 | * |
4 | * Copyright (c) 2003-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2003-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2005-2006, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005-2006, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -38,15 +38,19 @@ | |||
38 | #include "link.h" | 38 | #include "link.h" |
39 | #include "discover.h" | 39 | #include "discover.h" |
40 | 40 | ||
41 | #define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */ | 41 | /* min delay during bearer start up */ |
42 | #define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */ | 42 | #define TIPC_LINK_REQ_INIT msecs_to_jiffies(125) |
43 | #define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */ | 43 | /* max delay if bearer has no links */ |
44 | #define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */ | 44 | #define TIPC_LINK_REQ_FAST msecs_to_jiffies(1000) |
45 | 45 | /* max delay if bearer has links */ | |
46 | #define TIPC_LINK_REQ_SLOW msecs_to_jiffies(60000) | ||
47 | /* indicates no timer in use */ | ||
48 | #define TIPC_LINK_REQ_INACTIVE 0xffffffff | ||
46 | 49 | ||
47 | /** | 50 | /** |
48 | * struct tipc_link_req - information about an ongoing link setup request | 51 | * struct tipc_link_req - information about an ongoing link setup request |
49 | * @bearer_id: identity of bearer issuing requests | 52 | * @bearer_id: identity of bearer issuing requests |
53 | * @net: network namespace instance | ||
50 | * @dest: destination address for request messages | 54 | * @dest: destination address for request messages |
51 | * @domain: network domain to which links can be established | 55 | * @domain: network domain to which links can be established |
52 | * @num_nodes: number of nodes currently discovered (i.e. with an active link) | 56 | * @num_nodes: number of nodes currently discovered (i.e. with an active link) |
@@ -58,31 +62,36 @@ | |||
58 | struct tipc_link_req { | 62 | struct tipc_link_req { |
59 | u32 bearer_id; | 63 | u32 bearer_id; |
60 | struct tipc_media_addr dest; | 64 | struct tipc_media_addr dest; |
65 | struct net *net; | ||
61 | u32 domain; | 66 | u32 domain; |
62 | int num_nodes; | 67 | int num_nodes; |
63 | spinlock_t lock; | 68 | spinlock_t lock; |
64 | struct sk_buff *buf; | 69 | struct sk_buff *buf; |
65 | struct timer_list timer; | 70 | struct timer_list timer; |
66 | unsigned int timer_intv; | 71 | unsigned long timer_intv; |
67 | }; | 72 | }; |
68 | 73 | ||
69 | /** | 74 | /** |
70 | * tipc_disc_init_msg - initialize a link setup message | 75 | * tipc_disc_init_msg - initialize a link setup message |
76 | * @net: the applicable net namespace | ||
71 | * @type: message type (request or response) | 77 | * @type: message type (request or response) |
72 | * @b_ptr: ptr to bearer issuing message | 78 | * @b_ptr: ptr to bearer issuing message |
73 | */ | 79 | */ |
74 | static void tipc_disc_init_msg(struct sk_buff *buf, u32 type, | 80 | static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type, |
75 | struct tipc_bearer *b_ptr) | 81 | struct tipc_bearer *b_ptr) |
76 | { | 82 | { |
83 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
77 | struct tipc_msg *msg; | 84 | struct tipc_msg *msg; |
78 | u32 dest_domain = b_ptr->domain; | 85 | u32 dest_domain = b_ptr->domain; |
79 | 86 | ||
80 | msg = buf_msg(buf); | 87 | msg = buf_msg(buf); |
81 | tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); | 88 | tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type, |
89 | MAX_H_SIZE, dest_domain); | ||
82 | msg_set_non_seq(msg, 1); | 90 | msg_set_non_seq(msg, 1); |
83 | msg_set_node_sig(msg, tipc_random); | 91 | msg_set_node_sig(msg, tn->random); |
92 | msg_set_node_capabilities(msg, 0); | ||
84 | msg_set_dest_domain(msg, dest_domain); | 93 | msg_set_dest_domain(msg, dest_domain); |
85 | msg_set_bc_netid(msg, tipc_net_id); | 94 | msg_set_bc_netid(msg, tn->net_id); |
86 | b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); | 95 | b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); |
87 | } | 96 | } |
88 | 97 | ||
@@ -107,11 +116,14 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, | |||
107 | 116 | ||
108 | /** | 117 | /** |
109 | * tipc_disc_rcv - handle incoming discovery message (request or response) | 118 | * tipc_disc_rcv - handle incoming discovery message (request or response) |
119 | * @net: the applicable net namespace | ||
110 | * @buf: buffer containing message | 120 | * @buf: buffer containing message |
111 | * @bearer: bearer that message arrived on | 121 | * @bearer: bearer that message arrived on |
112 | */ | 122 | */ |
113 | void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer) | 123 | void tipc_disc_rcv(struct net *net, struct sk_buff *buf, |
124 | struct tipc_bearer *bearer) | ||
114 | { | 125 | { |
126 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
115 | struct tipc_node *node; | 127 | struct tipc_node *node; |
116 | struct tipc_link *link; | 128 | struct tipc_link *link; |
117 | struct tipc_media_addr maddr; | 129 | struct tipc_media_addr maddr; |
@@ -122,6 +134,7 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer) | |||
122 | u32 net_id = msg_bc_netid(msg); | 134 | u32 net_id = msg_bc_netid(msg); |
123 | u32 mtyp = msg_type(msg); | 135 | u32 mtyp = msg_type(msg); |
124 | u32 signature = msg_node_sig(msg); | 136 | u32 signature = msg_node_sig(msg); |
137 | u16 caps = msg_node_capabilities(msg); | ||
125 | bool addr_match = false; | 138 | bool addr_match = false; |
126 | bool sign_match = false; | 139 | bool sign_match = false; |
127 | bool link_up = false; | 140 | bool link_up = false; |
@@ -133,7 +146,7 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer) | |||
133 | kfree_skb(buf); | 146 | kfree_skb(buf); |
134 | 147 | ||
135 | /* Ensure message from node is valid and communication is permitted */ | 148 | /* Ensure message from node is valid and communication is permitted */ |
136 | if (net_id != tipc_net_id) | 149 | if (net_id != tn->net_id) |
137 | return; | 150 | return; |
138 | if (maddr.broadcast) | 151 | if (maddr.broadcast) |
139 | return; | 152 | return; |
@@ -142,24 +155,21 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer) | |||
142 | if (!tipc_addr_node_valid(onode)) | 155 | if (!tipc_addr_node_valid(onode)) |
143 | return; | 156 | return; |
144 | 157 | ||
145 | if (in_own_node(onode)) { | 158 | if (in_own_node(net, onode)) { |
146 | if (memcmp(&maddr, &bearer->addr, sizeof(maddr))) | 159 | if (memcmp(&maddr, &bearer->addr, sizeof(maddr))) |
147 | disc_dupl_alert(bearer, tipc_own_addr, &maddr); | 160 | disc_dupl_alert(bearer, tn->own_addr, &maddr); |
148 | return; | 161 | return; |
149 | } | 162 | } |
150 | if (!tipc_in_scope(ddom, tipc_own_addr)) | 163 | if (!tipc_in_scope(ddom, tn->own_addr)) |
151 | return; | 164 | return; |
152 | if (!tipc_in_scope(bearer->domain, onode)) | 165 | if (!tipc_in_scope(bearer->domain, onode)) |
153 | return; | 166 | return; |
154 | 167 | ||
155 | /* Locate, or if necessary, create, node: */ | 168 | node = tipc_node_create(net, onode); |
156 | node = tipc_node_find(onode); | ||
157 | if (!node) | ||
158 | node = tipc_node_create(onode); | ||
159 | if (!node) | 169 | if (!node) |
160 | return; | 170 | return; |
161 | |||
162 | tipc_node_lock(node); | 171 | tipc_node_lock(node); |
172 | node->capabilities = caps; | ||
163 | link = node->links[bearer->identity]; | 173 | link = node->links[bearer->identity]; |
164 | 174 | ||
165 | /* Prepare to validate requesting node's signature and media address */ | 175 | /* Prepare to validate requesting node's signature and media address */ |
@@ -242,14 +252,15 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer) | |||
242 | 252 | ||
243 | /* Send response, if necessary */ | 253 | /* Send response, if necessary */ |
244 | if (respond && (mtyp == DSC_REQ_MSG)) { | 254 | if (respond && (mtyp == DSC_REQ_MSG)) { |
245 | rbuf = tipc_buf_acquire(INT_H_SIZE); | 255 | rbuf = tipc_buf_acquire(MAX_H_SIZE); |
246 | if (rbuf) { | 256 | if (rbuf) { |
247 | tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer); | 257 | tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer); |
248 | tipc_bearer_send(bearer->identity, rbuf, &maddr); | 258 | tipc_bearer_send(net, bearer->identity, rbuf, &maddr); |
249 | kfree_skb(rbuf); | 259 | kfree_skb(rbuf); |
250 | } | 260 | } |
251 | } | 261 | } |
252 | tipc_node_unlock(node); | 262 | tipc_node_unlock(node); |
263 | tipc_node_put(node); | ||
253 | } | 264 | } |
254 | 265 | ||
255 | /** | 266 | /** |
@@ -265,7 +276,7 @@ static void disc_update(struct tipc_link_req *req) | |||
265 | if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) || | 276 | if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) || |
266 | (req->timer_intv > TIPC_LINK_REQ_FAST)) { | 277 | (req->timer_intv > TIPC_LINK_REQ_FAST)) { |
267 | req->timer_intv = TIPC_LINK_REQ_INIT; | 278 | req->timer_intv = TIPC_LINK_REQ_INIT; |
268 | k_start_timer(&req->timer, req->timer_intv); | 279 | mod_timer(&req->timer, jiffies + req->timer_intv); |
269 | } | 280 | } |
270 | } | 281 | } |
271 | } | 282 | } |
@@ -295,12 +306,13 @@ void tipc_disc_remove_dest(struct tipc_link_req *req) | |||
295 | 306 | ||
296 | /** | 307 | /** |
297 | * disc_timeout - send a periodic link setup request | 308 | * disc_timeout - send a periodic link setup request |
298 | * @req: ptr to link request structure | 309 | * @data: ptr to link request structure |
299 | * | 310 | * |
300 | * Called whenever a link setup request timer associated with a bearer expires. | 311 | * Called whenever a link setup request timer associated with a bearer expires. |
301 | */ | 312 | */ |
302 | static void disc_timeout(struct tipc_link_req *req) | 313 | static void disc_timeout(unsigned long data) |
303 | { | 314 | { |
315 | struct tipc_link_req *req = (struct tipc_link_req *)data; | ||
304 | int max_delay; | 316 | int max_delay; |
305 | 317 | ||
306 | spin_lock_bh(&req->lock); | 318 | spin_lock_bh(&req->lock); |
@@ -318,7 +330,7 @@ static void disc_timeout(struct tipc_link_req *req) | |||
318 | * hold at fast polling rate if don't have any associated nodes, | 330 | * hold at fast polling rate if don't have any associated nodes, |
319 | * otherwise hold at slow polling rate | 331 | * otherwise hold at slow polling rate |
320 | */ | 332 | */ |
321 | tipc_bearer_send(req->bearer_id, req->buf, &req->dest); | 333 | tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest); |
322 | 334 | ||
323 | 335 | ||
324 | req->timer_intv *= 2; | 336 | req->timer_intv *= 2; |
@@ -329,44 +341,46 @@ static void disc_timeout(struct tipc_link_req *req) | |||
329 | if (req->timer_intv > max_delay) | 341 | if (req->timer_intv > max_delay) |
330 | req->timer_intv = max_delay; | 342 | req->timer_intv = max_delay; |
331 | 343 | ||
332 | k_start_timer(&req->timer, req->timer_intv); | 344 | mod_timer(&req->timer, jiffies + req->timer_intv); |
333 | exit: | 345 | exit: |
334 | spin_unlock_bh(&req->lock); | 346 | spin_unlock_bh(&req->lock); |
335 | } | 347 | } |
336 | 348 | ||
337 | /** | 349 | /** |
338 | * tipc_disc_create - create object to send periodic link setup requests | 350 | * tipc_disc_create - create object to send periodic link setup requests |
351 | * @net: the applicable net namespace | ||
339 | * @b_ptr: ptr to bearer issuing requests | 352 | * @b_ptr: ptr to bearer issuing requests |
340 | * @dest: destination address for request messages | 353 | * @dest: destination address for request messages |
341 | * @dest_domain: network domain to which links can be established | 354 | * @dest_domain: network domain to which links can be established |
342 | * | 355 | * |
343 | * Returns 0 if successful, otherwise -errno. | 356 | * Returns 0 if successful, otherwise -errno. |
344 | */ | 357 | */ |
345 | int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest) | 358 | int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr, |
359 | struct tipc_media_addr *dest) | ||
346 | { | 360 | { |
347 | struct tipc_link_req *req; | 361 | struct tipc_link_req *req; |
348 | 362 | ||
349 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | 363 | req = kmalloc(sizeof(*req), GFP_ATOMIC); |
350 | if (!req) | 364 | if (!req) |
351 | return -ENOMEM; | 365 | return -ENOMEM; |
352 | 366 | req->buf = tipc_buf_acquire(MAX_H_SIZE); | |
353 | req->buf = tipc_buf_acquire(INT_H_SIZE); | ||
354 | if (!req->buf) { | 367 | if (!req->buf) { |
355 | kfree(req); | 368 | kfree(req); |
356 | return -ENOMEM; | 369 | return -ENOMEM; |
357 | } | 370 | } |
358 | 371 | ||
359 | tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr); | 372 | tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr); |
360 | memcpy(&req->dest, dest, sizeof(*dest)); | 373 | memcpy(&req->dest, dest, sizeof(*dest)); |
374 | req->net = net; | ||
361 | req->bearer_id = b_ptr->identity; | 375 | req->bearer_id = b_ptr->identity; |
362 | req->domain = b_ptr->domain; | 376 | req->domain = b_ptr->domain; |
363 | req->num_nodes = 0; | 377 | req->num_nodes = 0; |
364 | req->timer_intv = TIPC_LINK_REQ_INIT; | 378 | req->timer_intv = TIPC_LINK_REQ_INIT; |
365 | spin_lock_init(&req->lock); | 379 | spin_lock_init(&req->lock); |
366 | k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); | 380 | setup_timer(&req->timer, disc_timeout, (unsigned long)req); |
367 | k_start_timer(&req->timer, req->timer_intv); | 381 | mod_timer(&req->timer, jiffies + req->timer_intv); |
368 | b_ptr->link_req = req; | 382 | b_ptr->link_req = req; |
369 | tipc_bearer_send(req->bearer_id, req->buf, &req->dest); | 383 | tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest); |
370 | return 0; | 384 | return 0; |
371 | } | 385 | } |
372 | 386 | ||
@@ -376,28 +390,29 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest) | |||
376 | */ | 390 | */ |
377 | void tipc_disc_delete(struct tipc_link_req *req) | 391 | void tipc_disc_delete(struct tipc_link_req *req) |
378 | { | 392 | { |
379 | k_cancel_timer(&req->timer); | 393 | del_timer_sync(&req->timer); |
380 | k_term_timer(&req->timer); | ||
381 | kfree_skb(req->buf); | 394 | kfree_skb(req->buf); |
382 | kfree(req); | 395 | kfree(req); |
383 | } | 396 | } |
384 | 397 | ||
385 | /** | 398 | /** |
386 | * tipc_disc_reset - reset object to send periodic link setup requests | 399 | * tipc_disc_reset - reset object to send periodic link setup requests |
400 | * @net: the applicable net namespace | ||
387 | * @b_ptr: ptr to bearer issuing requests | 401 | * @b_ptr: ptr to bearer issuing requests |
388 | * @dest_domain: network domain to which links can be established | 402 | * @dest_domain: network domain to which links can be established |
389 | */ | 403 | */ |
390 | void tipc_disc_reset(struct tipc_bearer *b_ptr) | 404 | void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr) |
391 | { | 405 | { |
392 | struct tipc_link_req *req = b_ptr->link_req; | 406 | struct tipc_link_req *req = b_ptr->link_req; |
393 | 407 | ||
394 | spin_lock_bh(&req->lock); | 408 | spin_lock_bh(&req->lock); |
395 | tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr); | 409 | tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr); |
410 | req->net = net; | ||
396 | req->bearer_id = b_ptr->identity; | 411 | req->bearer_id = b_ptr->identity; |
397 | req->domain = b_ptr->domain; | 412 | req->domain = b_ptr->domain; |
398 | req->num_nodes = 0; | 413 | req->num_nodes = 0; |
399 | req->timer_intv = TIPC_LINK_REQ_INIT; | 414 | req->timer_intv = TIPC_LINK_REQ_INIT; |
400 | k_start_timer(&req->timer, req->timer_intv); | 415 | mod_timer(&req->timer, jiffies + req->timer_intv); |
401 | tipc_bearer_send(req->bearer_id, req->buf, &req->dest); | 416 | tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest); |
402 | spin_unlock_bh(&req->lock); | 417 | spin_unlock_bh(&req->lock); |
403 | } | 418 | } |
diff --git a/net/tipc/discover.h b/net/tipc/discover.h index 515b57392f4d..c9b12770c5ed 100644 --- a/net/tipc/discover.h +++ b/net/tipc/discover.h | |||
@@ -39,11 +39,13 @@ | |||
39 | 39 | ||
40 | struct tipc_link_req; | 40 | struct tipc_link_req; |
41 | 41 | ||
42 | int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); | 42 | int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr, |
43 | struct tipc_media_addr *dest); | ||
43 | void tipc_disc_delete(struct tipc_link_req *req); | 44 | void tipc_disc_delete(struct tipc_link_req *req); |
44 | void tipc_disc_reset(struct tipc_bearer *b_ptr); | 45 | void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr); |
45 | void tipc_disc_add_dest(struct tipc_link_req *req); | 46 | void tipc_disc_add_dest(struct tipc_link_req *req); |
46 | void tipc_disc_remove_dest(struct tipc_link_req *req); | 47 | void tipc_disc_remove_dest(struct tipc_link_req *req); |
47 | void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr); | 48 | void tipc_disc_rcv(struct net *net, struct sk_buff *buf, |
49 | struct tipc_bearer *b_ptr); | ||
48 | 50 | ||
49 | #endif | 51 | #endif |
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 5e1426f1751f..f69a2fde9f4a 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c | |||
@@ -37,8 +37,6 @@ | |||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "bearer.h" | 38 | #include "bearer.h" |
39 | 39 | ||
40 | #define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */ | ||
41 | |||
42 | /* Convert Ethernet address (media address format) to string */ | 40 | /* Convert Ethernet address (media address format) to string */ |
43 | static int tipc_eth_addr2str(struct tipc_media_addr *addr, | 41 | static int tipc_eth_addr2str(struct tipc_media_addr *addr, |
44 | char *strbuf, int bufsz) | 42 | char *strbuf, int bufsz) |
@@ -53,9 +51,9 @@ static int tipc_eth_addr2str(struct tipc_media_addr *addr, | |||
53 | /* Convert from media address format to discovery message addr format */ | 51 | /* Convert from media address format to discovery message addr format */ |
54 | static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) | 52 | static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr) |
55 | { | 53 | { |
56 | memset(msg, 0, TIPC_MEDIA_ADDR_SIZE); | 54 | memset(msg, 0, TIPC_MEDIA_INFO_SIZE); |
57 | msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; | 55 | msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; |
58 | memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN); | 56 | memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN); |
59 | return 0; | 57 | return 0; |
60 | } | 58 | } |
61 | 59 | ||
@@ -79,7 +77,7 @@ static int tipc_eth_msg2addr(struct tipc_bearer *b, | |||
79 | char *msg) | 77 | char *msg) |
80 | { | 78 | { |
81 | /* Skip past preamble: */ | 79 | /* Skip past preamble: */ |
82 | msg += ETH_ADDR_OFFSET; | 80 | msg += TIPC_MEDIA_ADDR_OFFSET; |
83 | return tipc_eth_raw2addr(b, addr, msg); | 81 | return tipc_eth_raw2addr(b, addr, msg); |
84 | } | 82 | } |
85 | 83 | ||
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c index 8522eef9c136..e8c16718e3fa 100644 --- a/net/tipc/ib_media.c +++ b/net/tipc/ib_media.c | |||
@@ -57,7 +57,7 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, | |||
57 | /* Convert from media address format to discovery message addr format */ | 57 | /* Convert from media address format to discovery message addr format */ |
58 | static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) | 58 | static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr) |
59 | { | 59 | { |
60 | memset(msg, 0, TIPC_MEDIA_ADDR_SIZE); | 60 | memset(msg, 0, TIPC_MEDIA_INFO_SIZE); |
61 | memcpy(msg, addr->value, INFINIBAND_ALEN); | 61 | memcpy(msg, addr->value, INFINIBAND_ALEN); |
62 | return 0; | 62 | return 0; |
63 | } | 63 | } |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 23bcc1132365..a6b30df6ec02 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/link.c: TIPC link code | 2 | * net/tipc/link.c: TIPC link code |
3 | * | 3 | * |
4 | * Copyright (c) 1996-2007, 2012-2014, Ericsson AB | 4 | * Copyright (c) 1996-2007, 2012-2015, Ericsson AB |
5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | 5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -35,12 +35,12 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "subscr.h" | ||
38 | #include "link.h" | 39 | #include "link.h" |
39 | #include "bcast.h" | 40 | #include "bcast.h" |
40 | #include "socket.h" | 41 | #include "socket.h" |
41 | #include "name_distr.h" | 42 | #include "name_distr.h" |
42 | #include "discover.h" | 43 | #include "discover.h" |
43 | #include "config.h" | ||
44 | #include "netlink.h" | 44 | #include "netlink.h" |
45 | 45 | ||
46 | #include <linux/pkt_sched.h> | 46 | #include <linux/pkt_sched.h> |
@@ -89,32 +89,23 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { | |||
89 | #define TIMEOUT_EVT 560817u /* link timer expired */ | 89 | #define TIMEOUT_EVT 560817u /* link timer expired */ |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * The following two 'message types' is really just implementation | 92 | * State value stored in 'failover_pkts' |
93 | * data conveniently stored in the message header. | ||
94 | * They must not be considered part of the protocol | ||
95 | */ | 93 | */ |
96 | #define OPEN_MSG 0 | 94 | #define FIRST_FAILOVER 0xffffu |
97 | #define CLOSED_MSG 1 | ||
98 | 95 | ||
99 | /* | 96 | static void link_handle_out_of_seq_msg(struct tipc_link *link, |
100 | * State value stored in 'exp_msg_count' | 97 | struct sk_buff *skb); |
101 | */ | 98 | static void tipc_link_proto_rcv(struct tipc_link *link, |
102 | #define START_CHANGEOVER 100000u | 99 | struct sk_buff *skb); |
103 | 100 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); | |
104 | static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | ||
105 | struct sk_buff *buf); | ||
106 | static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf); | ||
107 | static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, | ||
108 | struct sk_buff **buf); | ||
109 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); | ||
110 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 101 | static void link_state_event(struct tipc_link *l_ptr, u32 event); |
111 | static void link_reset_statistics(struct tipc_link *l_ptr); | 102 | static void link_reset_statistics(struct tipc_link *l_ptr); |
112 | static void link_print(struct tipc_link *l_ptr, const char *str); | 103 | static void link_print(struct tipc_link *l_ptr, const char *str); |
113 | static void tipc_link_sync_xmit(struct tipc_link *l); | 104 | static void tipc_link_sync_xmit(struct tipc_link *l); |
114 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 105 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); |
115 | static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf); | 106 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); |
116 | static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf); | 107 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); |
117 | 108 | static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb); | |
118 | /* | 109 | /* |
119 | * Simple link routines | 110 | * Simple link routines |
120 | */ | 111 | */ |
@@ -123,30 +114,26 @@ static unsigned int align(unsigned int i) | |||
123 | return (i + 3) & ~3u; | 114 | return (i + 3) & ~3u; |
124 | } | 115 | } |
125 | 116 | ||
126 | static void link_init_max_pkt(struct tipc_link *l_ptr) | 117 | static void tipc_link_release(struct kref *kref) |
127 | { | 118 | { |
128 | struct tipc_bearer *b_ptr; | 119 | kfree(container_of(kref, struct tipc_link, ref)); |
129 | u32 max_pkt; | 120 | } |
130 | |||
131 | rcu_read_lock(); | ||
132 | b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]); | ||
133 | if (!b_ptr) { | ||
134 | rcu_read_unlock(); | ||
135 | return; | ||
136 | } | ||
137 | max_pkt = (b_ptr->mtu & ~3); | ||
138 | rcu_read_unlock(); | ||
139 | 121 | ||
140 | if (max_pkt > MAX_MSG_SIZE) | 122 | static void tipc_link_get(struct tipc_link *l_ptr) |
141 | max_pkt = MAX_MSG_SIZE; | 123 | { |
124 | kref_get(&l_ptr->ref); | ||
125 | } | ||
142 | 126 | ||
143 | l_ptr->max_pkt_target = max_pkt; | 127 | static void tipc_link_put(struct tipc_link *l_ptr) |
144 | if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) | 128 | { |
145 | l_ptr->max_pkt = l_ptr->max_pkt_target; | 129 | kref_put(&l_ptr->ref, tipc_link_release); |
146 | else | 130 | } |
147 | l_ptr->max_pkt = MAX_PKT_DEFAULT; | ||
148 | 131 | ||
149 | l_ptr->max_pkt_probes = 0; | 132 | static struct tipc_link *tipc_parallel_link(struct tipc_link *l) |
133 | { | ||
134 | if (l->owner->active_links[0] != l) | ||
135 | return l->owner->active_links[0]; | ||
136 | return l->owner->active_links[1]; | ||
150 | } | 137 | } |
151 | 138 | ||
152 | /* | 139 | /* |
@@ -169,17 +156,18 @@ int tipc_link_is_active(struct tipc_link *l_ptr) | |||
169 | * link_timeout - handle expiration of link timer | 156 | * link_timeout - handle expiration of link timer |
170 | * @l_ptr: pointer to link | 157 | * @l_ptr: pointer to link |
171 | */ | 158 | */ |
172 | static void link_timeout(struct tipc_link *l_ptr) | 159 | static void link_timeout(unsigned long data) |
173 | { | 160 | { |
161 | struct tipc_link *l_ptr = (struct tipc_link *)data; | ||
174 | struct sk_buff *skb; | 162 | struct sk_buff *skb; |
175 | 163 | ||
176 | tipc_node_lock(l_ptr->owner); | 164 | tipc_node_lock(l_ptr->owner); |
177 | 165 | ||
178 | /* update counters used in statistical profiling of send traffic */ | 166 | /* update counters used in statistical profiling of send traffic */ |
179 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); | 167 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq); |
180 | l_ptr->stats.queue_sz_counts++; | 168 | l_ptr->stats.queue_sz_counts++; |
181 | 169 | ||
182 | skb = skb_peek(&l_ptr->outqueue); | 170 | skb = skb_peek(&l_ptr->transmq); |
183 | if (skb) { | 171 | if (skb) { |
184 | struct tipc_msg *msg = buf_msg(skb); | 172 | struct tipc_msg *msg = buf_msg(skb); |
185 | u32 length = msg_size(msg); | 173 | u32 length = msg_size(msg); |
@@ -211,15 +199,17 @@ static void link_timeout(struct tipc_link *l_ptr) | |||
211 | /* do all other link processing performed on a periodic basis */ | 199 | /* do all other link processing performed on a periodic basis */ |
212 | link_state_event(l_ptr, TIMEOUT_EVT); | 200 | link_state_event(l_ptr, TIMEOUT_EVT); |
213 | 201 | ||
214 | if (l_ptr->next_out) | 202 | if (skb_queue_len(&l_ptr->backlogq)) |
215 | tipc_link_push_packets(l_ptr); | 203 | tipc_link_push_packets(l_ptr); |
216 | 204 | ||
217 | tipc_node_unlock(l_ptr->owner); | 205 | tipc_node_unlock(l_ptr->owner); |
206 | tipc_link_put(l_ptr); | ||
218 | } | 207 | } |
219 | 208 | ||
220 | static void link_set_timer(struct tipc_link *l_ptr, u32 time) | 209 | static void link_set_timer(struct tipc_link *link, unsigned long time) |
221 | { | 210 | { |
222 | k_start_timer(&l_ptr->timer, time); | 211 | if (!mod_timer(&link->timer, jiffies + time)) |
212 | tipc_link_get(link); | ||
223 | } | 213 | } |
224 | 214 | ||
225 | /** | 215 | /** |
@@ -234,6 +224,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
234 | struct tipc_bearer *b_ptr, | 224 | struct tipc_bearer *b_ptr, |
235 | const struct tipc_media_addr *media_addr) | 225 | const struct tipc_media_addr *media_addr) |
236 | { | 226 | { |
227 | struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); | ||
237 | struct tipc_link *l_ptr; | 228 | struct tipc_link *l_ptr; |
238 | struct tipc_msg *msg; | 229 | struct tipc_msg *msg; |
239 | char *if_name; | 230 | char *if_name; |
@@ -259,12 +250,12 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
259 | pr_warn("Link creation failed, no memory\n"); | 250 | pr_warn("Link creation failed, no memory\n"); |
260 | return NULL; | 251 | return NULL; |
261 | } | 252 | } |
262 | 253 | kref_init(&l_ptr->ref); | |
263 | l_ptr->addr = peer; | 254 | l_ptr->addr = peer; |
264 | if_name = strchr(b_ptr->name, ':') + 1; | 255 | if_name = strchr(b_ptr->name, ':') + 1; |
265 | sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", | 256 | sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", |
266 | tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), | 257 | tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr), |
267 | tipc_node(tipc_own_addr), | 258 | tipc_node(tn->own_addr), |
268 | if_name, | 259 | if_name, |
269 | tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); | 260 | tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); |
270 | /* note: peer i/f name is updated by reset/activate message */ | 261 | /* note: peer i/f name is updated by reset/activate message */ |
@@ -278,89 +269,104 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
278 | 269 | ||
279 | l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; | 270 | l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; |
280 | msg = l_ptr->pmsg; | 271 | msg = l_ptr->pmsg; |
281 | tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); | 272 | tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, |
273 | l_ptr->addr); | ||
282 | msg_set_size(msg, sizeof(l_ptr->proto_msg)); | 274 | msg_set_size(msg, sizeof(l_ptr->proto_msg)); |
283 | msg_set_session(msg, (tipc_random & 0xffff)); | 275 | msg_set_session(msg, (tn->random & 0xffff)); |
284 | msg_set_bearer_id(msg, b_ptr->identity); | 276 | msg_set_bearer_id(msg, b_ptr->identity); |
285 | strcpy((char *)msg_data(msg), if_name); | 277 | strcpy((char *)msg_data(msg), if_name); |
286 | 278 | l_ptr->net_plane = b_ptr->net_plane; | |
279 | l_ptr->advertised_mtu = b_ptr->mtu; | ||
280 | l_ptr->mtu = l_ptr->advertised_mtu; | ||
287 | l_ptr->priority = b_ptr->priority; | 281 | l_ptr->priority = b_ptr->priority; |
288 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); | 282 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); |
289 | |||
290 | l_ptr->net_plane = b_ptr->net_plane; | ||
291 | link_init_max_pkt(l_ptr); | ||
292 | |||
293 | l_ptr->next_out_no = 1; | 283 | l_ptr->next_out_no = 1; |
294 | __skb_queue_head_init(&l_ptr->outqueue); | 284 | __skb_queue_head_init(&l_ptr->transmq); |
295 | __skb_queue_head_init(&l_ptr->deferred_queue); | 285 | __skb_queue_head_init(&l_ptr->backlogq); |
296 | skb_queue_head_init(&l_ptr->waiting_sks); | 286 | __skb_queue_head_init(&l_ptr->deferdq); |
297 | 287 | skb_queue_head_init(&l_ptr->wakeupq); | |
288 | skb_queue_head_init(&l_ptr->inputq); | ||
289 | skb_queue_head_init(&l_ptr->namedq); | ||
298 | link_reset_statistics(l_ptr); | 290 | link_reset_statistics(l_ptr); |
299 | |||
300 | tipc_node_attach_link(n_ptr, l_ptr); | 291 | tipc_node_attach_link(n_ptr, l_ptr); |
301 | 292 | setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr); | |
302 | k_init_timer(&l_ptr->timer, (Handler)link_timeout, | ||
303 | (unsigned long)l_ptr); | ||
304 | |||
305 | link_state_event(l_ptr, STARTING_EVT); | 293 | link_state_event(l_ptr, STARTING_EVT); |
306 | 294 | ||
307 | return l_ptr; | 295 | return l_ptr; |
308 | } | 296 | } |
309 | 297 | ||
310 | void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) | 298 | /** |
299 | * tipc_link_delete - Delete a link | ||
300 | * @l: link to be deleted | ||
301 | */ | ||
302 | void tipc_link_delete(struct tipc_link *l) | ||
311 | { | 303 | { |
312 | struct tipc_link *l_ptr; | 304 | tipc_link_reset(l); |
313 | struct tipc_node *n_ptr; | 305 | if (del_timer(&l->timer)) |
306 | tipc_link_put(l); | ||
307 | l->flags |= LINK_STOPPED; | ||
308 | /* Delete link now, or when timer is finished: */ | ||
309 | tipc_link_reset_fragments(l); | ||
310 | tipc_node_detach_link(l->owner, l); | ||
311 | tipc_link_put(l); | ||
312 | } | ||
313 | |||
314 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | ||
315 | bool shutting_down) | ||
316 | { | ||
317 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
318 | struct tipc_link *link; | ||
319 | struct tipc_node *node; | ||
314 | 320 | ||
315 | rcu_read_lock(); | 321 | rcu_read_lock(); |
316 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | 322 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
317 | tipc_node_lock(n_ptr); | 323 | tipc_node_lock(node); |
318 | l_ptr = n_ptr->links[bearer_id]; | 324 | link = node->links[bearer_id]; |
319 | if (l_ptr) { | 325 | if (link) |
320 | tipc_link_reset(l_ptr); | 326 | tipc_link_delete(link); |
321 | if (shutting_down || !tipc_node_is_up(n_ptr)) { | 327 | tipc_node_unlock(node); |
322 | tipc_node_detach_link(l_ptr->owner, l_ptr); | ||
323 | tipc_link_reset_fragments(l_ptr); | ||
324 | tipc_node_unlock(n_ptr); | ||
325 | |||
326 | /* Nobody else can access this link now: */ | ||
327 | del_timer_sync(&l_ptr->timer); | ||
328 | kfree(l_ptr); | ||
329 | } else { | ||
330 | /* Detach/delete when failover is finished: */ | ||
331 | l_ptr->flags |= LINK_STOPPED; | ||
332 | tipc_node_unlock(n_ptr); | ||
333 | del_timer_sync(&l_ptr->timer); | ||
334 | } | ||
335 | continue; | ||
336 | } | ||
337 | tipc_node_unlock(n_ptr); | ||
338 | } | 328 | } |
339 | rcu_read_unlock(); | 329 | rcu_read_unlock(); |
340 | } | 330 | } |
341 | 331 | ||
342 | /** | 332 | /** |
343 | * link_schedule_user - schedule user for wakeup after congestion | 333 | * link_schedule_user - schedule a message sender for wakeup after congestion |
344 | * @link: congested link | 334 | * @link: congested link |
345 | * @oport: sending port | 335 | * @list: message that was attempted sent |
346 | * @chain_sz: size of buffer chain that was attempted sent | ||
347 | * @imp: importance of message attempted sent | ||
348 | * Create pseudo msg to send back to user when congestion abates | 336 | * Create pseudo msg to send back to user when congestion abates |
337 | * Only consumes message if there is an error | ||
349 | */ | 338 | */ |
350 | static bool link_schedule_user(struct tipc_link *link, u32 oport, | 339 | static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) |
351 | uint chain_sz, uint imp) | ||
352 | { | 340 | { |
353 | struct sk_buff *buf; | 341 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
342 | int imp = msg_importance(msg); | ||
343 | u32 oport = msg_origport(msg); | ||
344 | u32 addr = link_own_addr(link); | ||
345 | struct sk_buff *skb; | ||
354 | 346 | ||
355 | buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr, | 347 | /* This really cannot happen... */ |
356 | tipc_own_addr, oport, 0, 0); | 348 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { |
357 | if (!buf) | 349 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); |
358 | return false; | 350 | tipc_link_reset(link); |
359 | TIPC_SKB_CB(buf)->chain_sz = chain_sz; | 351 | goto err; |
360 | TIPC_SKB_CB(buf)->chain_imp = imp; | 352 | } |
361 | skb_queue_tail(&link->waiting_sks, buf); | 353 | /* Non-blocking sender: */ |
354 | if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) | ||
355 | return -ELINKCONG; | ||
356 | |||
357 | /* Create and schedule wakeup pseudo message */ | ||
358 | skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | ||
359 | addr, addr, oport, 0, 0); | ||
360 | if (!skb) | ||
361 | goto err; | ||
362 | TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); | ||
363 | TIPC_SKB_CB(skb)->chain_imp = imp; | ||
364 | skb_queue_tail(&link->wakeupq, skb); | ||
362 | link->stats.link_congs++; | 365 | link->stats.link_congs++; |
363 | return true; | 366 | return -ELINKCONG; |
367 | err: | ||
368 | __skb_queue_purge(list); | ||
369 | return -ENOBUFS; | ||
364 | } | 370 | } |
365 | 371 | ||
366 | /** | 372 | /** |
@@ -369,17 +375,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, | |||
369 | * Move a number of waiting users, as permitted by available space in | 375 | * Move a number of waiting users, as permitted by available space in |
370 | * the send queue, from link wait queue to node wait queue for wakeup | 376 | * the send queue, from link wait queue to node wait queue for wakeup |
371 | */ | 377 | */ |
372 | static void link_prepare_wakeup(struct tipc_link *link) | 378 | void link_prepare_wakeup(struct tipc_link *l) |
373 | { | 379 | { |
374 | uint pend_qsz = skb_queue_len(&link->outqueue); | 380 | int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; |
381 | int imp, lim; | ||
375 | struct sk_buff *skb, *tmp; | 382 | struct sk_buff *skb, *tmp; |
376 | 383 | ||
377 | skb_queue_walk_safe(&link->waiting_sks, skb, tmp) { | 384 | skb_queue_walk_safe(&l->wakeupq, skb, tmp) { |
378 | if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) | 385 | imp = TIPC_SKB_CB(skb)->chain_imp; |
386 | lim = l->window + l->backlog[imp].limit; | ||
387 | pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; | ||
388 | if ((pnd[imp] + l->backlog[imp].len) >= lim) | ||
379 | break; | 389 | break; |
380 | pend_qsz += TIPC_SKB_CB(skb)->chain_sz; | 390 | skb_unlink(skb, &l->wakeupq); |
381 | skb_unlink(skb, &link->waiting_sks); | 391 | skb_queue_tail(&l->inputq, skb); |
382 | skb_queue_tail(&link->owner->waiting_sks, skb); | 392 | l->owner->inputq = &l->inputq; |
393 | l->owner->action_flags |= TIPC_MSG_EVT; | ||
383 | } | 394 | } |
384 | } | 395 | } |
385 | 396 | ||
@@ -393,31 +404,42 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) | |||
393 | l_ptr->reasm_buf = NULL; | 404 | l_ptr->reasm_buf = NULL; |
394 | } | 405 | } |
395 | 406 | ||
407 | static void tipc_link_purge_backlog(struct tipc_link *l) | ||
408 | { | ||
409 | __skb_queue_purge(&l->backlogq); | ||
410 | l->backlog[TIPC_LOW_IMPORTANCE].len = 0; | ||
411 | l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; | ||
412 | l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; | ||
413 | l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; | ||
414 | l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; | ||
415 | } | ||
416 | |||
396 | /** | 417 | /** |
397 | * tipc_link_purge_queues - purge all pkt queues associated with link | 418 | * tipc_link_purge_queues - purge all pkt queues associated with link |
398 | * @l_ptr: pointer to link | 419 | * @l_ptr: pointer to link |
399 | */ | 420 | */ |
400 | void tipc_link_purge_queues(struct tipc_link *l_ptr) | 421 | void tipc_link_purge_queues(struct tipc_link *l_ptr) |
401 | { | 422 | { |
402 | __skb_queue_purge(&l_ptr->deferred_queue); | 423 | __skb_queue_purge(&l_ptr->deferdq); |
403 | __skb_queue_purge(&l_ptr->outqueue); | 424 | __skb_queue_purge(&l_ptr->transmq); |
425 | tipc_link_purge_backlog(l_ptr); | ||
404 | tipc_link_reset_fragments(l_ptr); | 426 | tipc_link_reset_fragments(l_ptr); |
405 | } | 427 | } |
406 | 428 | ||
407 | void tipc_link_reset(struct tipc_link *l_ptr) | 429 | void tipc_link_reset(struct tipc_link *l_ptr) |
408 | { | 430 | { |
409 | u32 prev_state = l_ptr->state; | 431 | u32 prev_state = l_ptr->state; |
410 | u32 checkpoint = l_ptr->next_in_no; | ||
411 | int was_active_link = tipc_link_is_active(l_ptr); | 432 | int was_active_link = tipc_link_is_active(l_ptr); |
412 | struct tipc_node *owner = l_ptr->owner; | 433 | struct tipc_node *owner = l_ptr->owner; |
434 | struct tipc_link *pl = tipc_parallel_link(l_ptr); | ||
413 | 435 | ||
414 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); | 436 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); |
415 | 437 | ||
416 | /* Link is down, accept any session */ | 438 | /* Link is down, accept any session */ |
417 | l_ptr->peer_session = INVALID_SESSION; | 439 | l_ptr->peer_session = INVALID_SESSION; |
418 | 440 | ||
419 | /* Prepare for max packet size negotiation */ | 441 | /* Prepare for renewed mtu size negotiation */ |
420 | link_init_max_pkt(l_ptr); | 442 | l_ptr->mtu = l_ptr->advertised_mtu; |
421 | 443 | ||
422 | l_ptr->state = RESET_UNKNOWN; | 444 | l_ptr->state = RESET_UNKNOWN; |
423 | 445 | ||
@@ -425,22 +447,28 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
425 | return; | 447 | return; |
426 | 448 | ||
427 | tipc_node_link_down(l_ptr->owner, l_ptr); | 449 | tipc_node_link_down(l_ptr->owner, l_ptr); |
428 | tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr); | 450 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); |
429 | 451 | ||
430 | if (was_active_link && tipc_node_active_links(l_ptr->owner)) { | 452 | if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) { |
431 | l_ptr->reset_checkpoint = checkpoint; | 453 | l_ptr->flags |= LINK_FAILINGOVER; |
432 | l_ptr->exp_msg_count = START_CHANGEOVER; | 454 | l_ptr->failover_checkpt = l_ptr->next_in_no; |
433 | } | 455 | pl->failover_pkts = FIRST_FAILOVER; |
434 | 456 | pl->failover_checkpt = l_ptr->next_in_no; | |
435 | /* Clean up all queues: */ | 457 | pl->failover_skb = l_ptr->reasm_buf; |
436 | __skb_queue_purge(&l_ptr->outqueue); | 458 | } else { |
437 | __skb_queue_purge(&l_ptr->deferred_queue); | 459 | kfree_skb(l_ptr->reasm_buf); |
438 | if (!skb_queue_empty(&l_ptr->waiting_sks)) { | ||
439 | skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks); | ||
440 | owner->action_flags |= TIPC_WAKEUP_USERS; | ||
441 | } | 460 | } |
442 | l_ptr->next_out = NULL; | 461 | /* Clean up all queues, except inputq: */ |
443 | l_ptr->unacked_window = 0; | 462 | __skb_queue_purge(&l_ptr->transmq); |
463 | __skb_queue_purge(&l_ptr->deferdq); | ||
464 | if (!owner->inputq) | ||
465 | owner->inputq = &l_ptr->inputq; | ||
466 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
467 | if (!skb_queue_empty(owner->inputq)) | ||
468 | owner->action_flags |= TIPC_MSG_EVT; | ||
469 | tipc_link_purge_backlog(l_ptr); | ||
470 | l_ptr->reasm_buf = NULL; | ||
471 | l_ptr->rcv_unacked = 0; | ||
444 | l_ptr->checkpoint = 1; | 472 | l_ptr->checkpoint = 1; |
445 | l_ptr->next_out_no = 1; | 473 | l_ptr->next_out_no = 1; |
446 | l_ptr->fsm_msg_cnt = 0; | 474 | l_ptr->fsm_msg_cnt = 0; |
@@ -448,13 +476,14 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
448 | link_reset_statistics(l_ptr); | 476 | link_reset_statistics(l_ptr); |
449 | } | 477 | } |
450 | 478 | ||
451 | void tipc_link_reset_list(unsigned int bearer_id) | 479 | void tipc_link_reset_list(struct net *net, unsigned int bearer_id) |
452 | { | 480 | { |
481 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
453 | struct tipc_link *l_ptr; | 482 | struct tipc_link *l_ptr; |
454 | struct tipc_node *n_ptr; | 483 | struct tipc_node *n_ptr; |
455 | 484 | ||
456 | rcu_read_lock(); | 485 | rcu_read_lock(); |
457 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | 486 | list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { |
458 | tipc_node_lock(n_ptr); | 487 | tipc_node_lock(n_ptr); |
459 | l_ptr = n_ptr->links[bearer_id]; | 488 | l_ptr = n_ptr->links[bearer_id]; |
460 | if (l_ptr) | 489 | if (l_ptr) |
@@ -464,11 +493,14 @@ void tipc_link_reset_list(unsigned int bearer_id) | |||
464 | rcu_read_unlock(); | 493 | rcu_read_unlock(); |
465 | } | 494 | } |
466 | 495 | ||
467 | static void link_activate(struct tipc_link *l_ptr) | 496 | static void link_activate(struct tipc_link *link) |
468 | { | 497 | { |
469 | l_ptr->next_in_no = l_ptr->stats.recv_info = 1; | 498 | struct tipc_node *node = link->owner; |
470 | tipc_node_link_up(l_ptr->owner, l_ptr); | 499 | |
471 | tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr); | 500 | link->next_in_no = 1; |
501 | link->stats.recv_info = 1; | ||
502 | tipc_node_link_up(node, link); | ||
503 | tipc_bearer_add_dest(node->net, link->bearer_id, link->addr); | ||
472 | } | 504 | } |
473 | 505 | ||
474 | /** | 506 | /** |
@@ -479,7 +511,7 @@ static void link_activate(struct tipc_link *l_ptr) | |||
479 | static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | 511 | static void link_state_event(struct tipc_link *l_ptr, unsigned int event) |
480 | { | 512 | { |
481 | struct tipc_link *other; | 513 | struct tipc_link *other; |
482 | u32 cont_intv = l_ptr->continuity_interval; | 514 | unsigned long cont_intv = l_ptr->cont_intv; |
483 | 515 | ||
484 | if (l_ptr->flags & LINK_STOPPED) | 516 | if (l_ptr->flags & LINK_STOPPED) |
485 | return; | 517 | return; |
@@ -487,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
487 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) | 519 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) |
488 | return; /* Not yet. */ | 520 | return; /* Not yet. */ |
489 | 521 | ||
490 | /* Check whether changeover is going on */ | 522 | if (l_ptr->flags & LINK_FAILINGOVER) { |
491 | if (l_ptr->exp_msg_count) { | ||
492 | if (event == TIMEOUT_EVT) | 523 | if (event == TIMEOUT_EVT) |
493 | link_set_timer(l_ptr, cont_intv); | 524 | link_set_timer(l_ptr, cont_intv); |
494 | return; | 525 | return; |
@@ -505,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
505 | l_ptr->checkpoint = l_ptr->next_in_no; | 536 | l_ptr->checkpoint = l_ptr->next_in_no; |
506 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 537 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
507 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 538 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
508 | 0, 0, 0, 0, 0); | 539 | 0, 0, 0, 0); |
509 | l_ptr->fsm_msg_cnt++; | ||
510 | } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { | ||
511 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | ||
512 | 1, 0, 0, 0, 0); | ||
513 | l_ptr->fsm_msg_cnt++; | 540 | l_ptr->fsm_msg_cnt++; |
514 | } | 541 | } |
515 | link_set_timer(l_ptr, cont_intv); | 542 | link_set_timer(l_ptr, cont_intv); |
@@ -517,23 +544,23 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
517 | } | 544 | } |
518 | l_ptr->state = WORKING_UNKNOWN; | 545 | l_ptr->state = WORKING_UNKNOWN; |
519 | l_ptr->fsm_msg_cnt = 0; | 546 | l_ptr->fsm_msg_cnt = 0; |
520 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 547 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
521 | l_ptr->fsm_msg_cnt++; | 548 | l_ptr->fsm_msg_cnt++; |
522 | link_set_timer(l_ptr, cont_intv / 4); | 549 | link_set_timer(l_ptr, cont_intv / 4); |
523 | break; | 550 | break; |
524 | case RESET_MSG: | 551 | case RESET_MSG: |
525 | pr_info("%s<%s>, requested by peer\n", link_rst_msg, | 552 | pr_debug("%s<%s>, requested by peer\n", |
526 | l_ptr->name); | 553 | link_rst_msg, l_ptr->name); |
527 | tipc_link_reset(l_ptr); | 554 | tipc_link_reset(l_ptr); |
528 | l_ptr->state = RESET_RESET; | 555 | l_ptr->state = RESET_RESET; |
529 | l_ptr->fsm_msg_cnt = 0; | 556 | l_ptr->fsm_msg_cnt = 0; |
530 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 557 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
531 | 0, 0, 0, 0, 0); | 558 | 0, 0, 0, 0); |
532 | l_ptr->fsm_msg_cnt++; | 559 | l_ptr->fsm_msg_cnt++; |
533 | link_set_timer(l_ptr, cont_intv); | 560 | link_set_timer(l_ptr, cont_intv); |
534 | break; | 561 | break; |
535 | default: | 562 | default: |
536 | pr_err("%s%u in WW state\n", link_unk_evt, event); | 563 | pr_debug("%s%u in WW state\n", link_unk_evt, event); |
537 | } | 564 | } |
538 | break; | 565 | break; |
539 | case WORKING_UNKNOWN: | 566 | case WORKING_UNKNOWN: |
@@ -545,13 +572,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
545 | link_set_timer(l_ptr, cont_intv); | 572 | link_set_timer(l_ptr, cont_intv); |
546 | break; | 573 | break; |
547 | case RESET_MSG: | 574 | case RESET_MSG: |
548 | pr_info("%s<%s>, requested by peer while probing\n", | 575 | pr_debug("%s<%s>, requested by peer while probing\n", |
549 | link_rst_msg, l_ptr->name); | 576 | link_rst_msg, l_ptr->name); |
550 | tipc_link_reset(l_ptr); | 577 | tipc_link_reset(l_ptr); |
551 | l_ptr->state = RESET_RESET; | 578 | l_ptr->state = RESET_RESET; |
552 | l_ptr->fsm_msg_cnt = 0; | 579 | l_ptr->fsm_msg_cnt = 0; |
553 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 580 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
554 | 0, 0, 0, 0, 0); | 581 | 0, 0, 0, 0); |
555 | l_ptr->fsm_msg_cnt++; | 582 | l_ptr->fsm_msg_cnt++; |
556 | link_set_timer(l_ptr, cont_intv); | 583 | link_set_timer(l_ptr, cont_intv); |
557 | break; | 584 | break; |
@@ -562,23 +589,23 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
562 | l_ptr->checkpoint = l_ptr->next_in_no; | 589 | l_ptr->checkpoint = l_ptr->next_in_no; |
563 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 590 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
564 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 591 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
565 | 0, 0, 0, 0, 0); | 592 | 0, 0, 0, 0); |
566 | l_ptr->fsm_msg_cnt++; | 593 | l_ptr->fsm_msg_cnt++; |
567 | } | 594 | } |
568 | link_set_timer(l_ptr, cont_intv); | 595 | link_set_timer(l_ptr, cont_intv); |
569 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { | 596 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { |
570 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
571 | 1, 0, 0, 0, 0); | 598 | 1, 0, 0, 0); |
572 | l_ptr->fsm_msg_cnt++; | 599 | l_ptr->fsm_msg_cnt++; |
573 | link_set_timer(l_ptr, cont_intv / 4); | 600 | link_set_timer(l_ptr, cont_intv / 4); |
574 | } else { /* Link has failed */ | 601 | } else { /* Link has failed */ |
575 | pr_warn("%s<%s>, peer not responding\n", | 602 | pr_debug("%s<%s>, peer not responding\n", |
576 | link_rst_msg, l_ptr->name); | 603 | link_rst_msg, l_ptr->name); |
577 | tipc_link_reset(l_ptr); | 604 | tipc_link_reset(l_ptr); |
578 | l_ptr->state = RESET_UNKNOWN; | 605 | l_ptr->state = RESET_UNKNOWN; |
579 | l_ptr->fsm_msg_cnt = 0; | 606 | l_ptr->fsm_msg_cnt = 0; |
580 | tipc_link_proto_xmit(l_ptr, RESET_MSG, | 607 | tipc_link_proto_xmit(l_ptr, RESET_MSG, |
581 | 0, 0, 0, 0, 0); | 608 | 0, 0, 0, 0); |
582 | l_ptr->fsm_msg_cnt++; | 609 | l_ptr->fsm_msg_cnt++; |
583 | link_set_timer(l_ptr, cont_intv); | 610 | link_set_timer(l_ptr, cont_intv); |
584 | } | 611 | } |
@@ -598,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
598 | l_ptr->state = WORKING_WORKING; | 625 | l_ptr->state = WORKING_WORKING; |
599 | l_ptr->fsm_msg_cnt = 0; | 626 | l_ptr->fsm_msg_cnt = 0; |
600 | link_activate(l_ptr); | 627 | link_activate(l_ptr); |
601 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 628 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
602 | l_ptr->fsm_msg_cnt++; | 629 | l_ptr->fsm_msg_cnt++; |
603 | if (l_ptr->owner->working_links == 1) | 630 | if (l_ptr->owner->working_links == 1) |
604 | tipc_link_sync_xmit(l_ptr); | 631 | tipc_link_sync_xmit(l_ptr); |
@@ -608,15 +635,17 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
608 | l_ptr->state = RESET_RESET; | 635 | l_ptr->state = RESET_RESET; |
609 | l_ptr->fsm_msg_cnt = 0; | 636 | l_ptr->fsm_msg_cnt = 0; |
610 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 637 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
611 | 1, 0, 0, 0, 0); | 638 | 1, 0, 0, 0); |
612 | l_ptr->fsm_msg_cnt++; | 639 | l_ptr->fsm_msg_cnt++; |
613 | link_set_timer(l_ptr, cont_intv); | 640 | link_set_timer(l_ptr, cont_intv); |
614 | break; | 641 | break; |
615 | case STARTING_EVT: | 642 | case STARTING_EVT: |
616 | l_ptr->flags |= LINK_STARTED; | 643 | l_ptr->flags |= LINK_STARTED; |
617 | /* fall through */ | 644 | l_ptr->fsm_msg_cnt++; |
645 | link_set_timer(l_ptr, cont_intv); | ||
646 | break; | ||
618 | case TIMEOUT_EVT: | 647 | case TIMEOUT_EVT: |
619 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 648 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0); |
620 | l_ptr->fsm_msg_cnt++; | 649 | l_ptr->fsm_msg_cnt++; |
621 | link_set_timer(l_ptr, cont_intv); | 650 | link_set_timer(l_ptr, cont_intv); |
622 | break; | 651 | break; |
@@ -634,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
634 | l_ptr->state = WORKING_WORKING; | 663 | l_ptr->state = WORKING_WORKING; |
635 | l_ptr->fsm_msg_cnt = 0; | 664 | l_ptr->fsm_msg_cnt = 0; |
636 | link_activate(l_ptr); | 665 | link_activate(l_ptr); |
637 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 666 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
638 | l_ptr->fsm_msg_cnt++; | 667 | l_ptr->fsm_msg_cnt++; |
639 | if (l_ptr->owner->working_links == 1) | 668 | if (l_ptr->owner->working_links == 1) |
640 | tipc_link_sync_xmit(l_ptr); | 669 | tipc_link_sync_xmit(l_ptr); |
@@ -644,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
644 | break; | 673 | break; |
645 | case TIMEOUT_EVT: | 674 | case TIMEOUT_EVT: |
646 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 675 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
647 | 0, 0, 0, 0, 0); | 676 | 0, 0, 0, 0); |
648 | l_ptr->fsm_msg_cnt++; | 677 | l_ptr->fsm_msg_cnt++; |
649 | link_set_timer(l_ptr, cont_intv); | 678 | link_set_timer(l_ptr, cont_intv); |
650 | break; | 679 | break; |
@@ -657,99 +686,65 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
657 | } | 686 | } |
658 | } | 687 | } |
659 | 688 | ||
660 | /* tipc_link_cong: determine return value and how to treat the | ||
661 | * sent buffer during link congestion. | ||
662 | * - For plain, errorless user data messages we keep the buffer and | ||
663 | * return -ELINKONG. | ||
664 | * - For all other messages we discard the buffer and return -EHOSTUNREACH | ||
665 | * - For TIPC internal messages we also reset the link | ||
666 | */ | ||
667 | static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) | ||
668 | { | ||
669 | struct sk_buff *skb = skb_peek(list); | ||
670 | struct tipc_msg *msg = buf_msg(skb); | ||
671 | uint imp = tipc_msg_tot_importance(msg); | ||
672 | u32 oport = msg_tot_origport(msg); | ||
673 | |||
674 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { | ||
675 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); | ||
676 | tipc_link_reset(link); | ||
677 | goto drop; | ||
678 | } | ||
679 | if (unlikely(msg_errcode(msg))) | ||
680 | goto drop; | ||
681 | if (unlikely(msg_reroute_cnt(msg))) | ||
682 | goto drop; | ||
683 | if (TIPC_SKB_CB(skb)->wakeup_pending) | ||
684 | return -ELINKCONG; | ||
685 | if (link_schedule_user(link, oport, skb_queue_len(list), imp)) | ||
686 | return -ELINKCONG; | ||
687 | drop: | ||
688 | __skb_queue_purge(list); | ||
689 | return -EHOSTUNREACH; | ||
690 | } | ||
691 | |||
692 | /** | 689 | /** |
693 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked | 690 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked |
694 | * @link: link to use | 691 | * @link: link to use |
695 | * @list: chain of buffers containing message | 692 | * @list: chain of buffers containing message |
696 | * | 693 | * |
697 | * Consumes the buffer chain, except when returning -ELINKCONG | 694 | * Consumes the buffer chain, except when returning -ELINKCONG, |
698 | * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket | 695 | * since the caller then may want to make more send attempts. |
699 | * user data messages) or -EHOSTUNREACH (all other messages/senders) | 696 | * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS |
700 | * Only the socket functions tipc_send_stream() and tipc_send_packet() need | 697 | * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted |
701 | * to act on the return value, since they may need to do more send attempts. | ||
702 | */ | 698 | */ |
703 | int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list) | 699 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, |
700 | struct sk_buff_head *list) | ||
704 | { | 701 | { |
705 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 702 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
706 | uint psz = msg_size(msg); | 703 | unsigned int maxwin = link->window; |
707 | uint sndlim = link->queue_limit[0]; | 704 | unsigned int imp = msg_importance(msg); |
708 | uint imp = tipc_msg_tot_importance(msg); | 705 | uint mtu = link->mtu; |
709 | uint mtu = link->max_pkt; | ||
710 | uint ack = mod(link->next_in_no - 1); | 706 | uint ack = mod(link->next_in_no - 1); |
711 | uint seqno = link->next_out_no; | 707 | uint seqno = link->next_out_no; |
712 | uint bc_last_in = link->owner->bclink.last_in; | 708 | uint bc_last_in = link->owner->bclink.last_in; |
713 | struct tipc_media_addr *addr = &link->media_addr; | 709 | struct tipc_media_addr *addr = &link->media_addr; |
714 | struct sk_buff_head *outqueue = &link->outqueue; | 710 | struct sk_buff_head *transmq = &link->transmq; |
711 | struct sk_buff_head *backlogq = &link->backlogq; | ||
715 | struct sk_buff *skb, *tmp; | 712 | struct sk_buff *skb, *tmp; |
716 | 713 | ||
717 | /* Match queue limits against msg importance: */ | 714 | /* Match backlog limit against msg importance: */ |
718 | if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) | 715 | if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit)) |
719 | return tipc_link_cong(link, list); | 716 | return link_schedule_user(link, list); |
720 | 717 | ||
721 | /* Has valid packet limit been used ? */ | 718 | if (unlikely(msg_size(msg) > mtu)) { |
722 | if (unlikely(psz > mtu)) { | ||
723 | __skb_queue_purge(list); | 719 | __skb_queue_purge(list); |
724 | return -EMSGSIZE; | 720 | return -EMSGSIZE; |
725 | } | 721 | } |
726 | 722 | /* Prepare each packet for sending, and add to relevant queue: */ | |
727 | /* Prepare each packet for sending, and add to outqueue: */ | ||
728 | skb_queue_walk_safe(list, skb, tmp) { | 723 | skb_queue_walk_safe(list, skb, tmp) { |
729 | __skb_unlink(skb, list); | 724 | __skb_unlink(skb, list); |
730 | msg = buf_msg(skb); | 725 | msg = buf_msg(skb); |
731 | msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); | 726 | msg_set_seqno(msg, seqno); |
727 | msg_set_ack(msg, ack); | ||
732 | msg_set_bcast_ack(msg, bc_last_in); | 728 | msg_set_bcast_ack(msg, bc_last_in); |
733 | 729 | ||
734 | if (skb_queue_len(outqueue) < sndlim) { | 730 | if (likely(skb_queue_len(transmq) < maxwin)) { |
735 | __skb_queue_tail(outqueue, skb); | 731 | __skb_queue_tail(transmq, skb); |
736 | tipc_bearer_send(link->bearer_id, skb, addr); | 732 | tipc_bearer_send(net, link->bearer_id, skb, addr); |
737 | link->next_out = NULL; | 733 | link->rcv_unacked = 0; |
738 | link->unacked_window = 0; | 734 | seqno++; |
739 | } else if (tipc_msg_bundle(outqueue, skb, mtu)) { | 735 | continue; |
736 | } | ||
737 | if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { | ||
740 | link->stats.sent_bundled++; | 738 | link->stats.sent_bundled++; |
741 | continue; | 739 | continue; |
742 | } else if (tipc_msg_make_bundle(outqueue, skb, mtu, | 740 | } |
743 | link->addr)) { | 741 | if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { |
744 | link->stats.sent_bundled++; | 742 | link->stats.sent_bundled++; |
745 | link->stats.sent_bundles++; | 743 | link->stats.sent_bundles++; |
746 | if (!link->next_out) | 744 | imp = msg_importance(buf_msg(skb)); |
747 | link->next_out = skb_peek_tail(outqueue); | ||
748 | } else { | ||
749 | __skb_queue_tail(outqueue, skb); | ||
750 | if (!link->next_out) | ||
751 | link->next_out = skb; | ||
752 | } | 745 | } |
746 | __skb_queue_tail(backlogq, skb); | ||
747 | link->backlog[imp].len++; | ||
753 | seqno++; | 748 | seqno++; |
754 | } | 749 | } |
755 | link->next_out_no = seqno; | 750 | link->next_out_no = seqno; |
@@ -758,7 +753,7 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list) | |||
758 | 753 | ||
759 | static void skb2list(struct sk_buff *skb, struct sk_buff_head *list) | 754 | static void skb2list(struct sk_buff *skb, struct sk_buff_head *list) |
760 | { | 755 | { |
761 | __skb_queue_head_init(list); | 756 | skb_queue_head_init(list); |
762 | __skb_queue_tail(list, skb); | 757 | __skb_queue_tail(list, skb); |
763 | } | 758 | } |
764 | 759 | ||
@@ -767,19 +762,33 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb) | |||
767 | struct sk_buff_head head; | 762 | struct sk_buff_head head; |
768 | 763 | ||
769 | skb2list(skb, &head); | 764 | skb2list(skb, &head); |
770 | return __tipc_link_xmit(link, &head); | 765 | return __tipc_link_xmit(link->owner->net, link, &head); |
771 | } | 766 | } |
772 | 767 | ||
773 | int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector) | 768 | /* tipc_link_xmit_skb(): send single buffer to destination |
769 | * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE | ||
770 | * messages, which will not be rejected | ||
771 | * The only exception is datagram messages rerouted after secondary | ||
772 | * lookup, which are rare and safe to dispose of anyway. | ||
773 | * TODO: Return real return value, and let callers use | ||
774 | * tipc_wait_for_sendpkt() where applicable | ||
775 | */ | ||
776 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | ||
777 | u32 selector) | ||
774 | { | 778 | { |
775 | struct sk_buff_head head; | 779 | struct sk_buff_head head; |
780 | int rc; | ||
776 | 781 | ||
777 | skb2list(skb, &head); | 782 | skb2list(skb, &head); |
778 | return tipc_link_xmit(&head, dnode, selector); | 783 | rc = tipc_link_xmit(net, &head, dnode, selector); |
784 | if (rc == -ELINKCONG) | ||
785 | kfree_skb(skb); | ||
786 | return 0; | ||
779 | } | 787 | } |
780 | 788 | ||
781 | /** | 789 | /** |
782 | * tipc_link_xmit() is the general link level function for message sending | 790 | * tipc_link_xmit() is the general link level function for message sending |
791 | * @net: the applicable net namespace | ||
783 | * @list: chain of buffers containing message | 792 | * @list: chain of buffers containing message |
784 | * @dsz: amount of user data to be sent | 793 | * @dsz: amount of user data to be sent |
785 | * @dnode: address of destination node | 794 | * @dnode: address of destination node |
@@ -787,33 +796,31 @@ int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector) | |||
787 | * Consumes the buffer chain, except when returning -ELINKCONG | 796 | * Consumes the buffer chain, except when returning -ELINKCONG |
788 | * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE | 797 | * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE |
789 | */ | 798 | */ |
790 | int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector) | 799 | int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, |
800 | u32 selector) | ||
791 | { | 801 | { |
792 | struct tipc_link *link = NULL; | 802 | struct tipc_link *link = NULL; |
793 | struct tipc_node *node; | 803 | struct tipc_node *node; |
794 | int rc = -EHOSTUNREACH; | 804 | int rc = -EHOSTUNREACH; |
795 | 805 | ||
796 | node = tipc_node_find(dnode); | 806 | node = tipc_node_find(net, dnode); |
797 | if (node) { | 807 | if (node) { |
798 | tipc_node_lock(node); | 808 | tipc_node_lock(node); |
799 | link = node->active_links[selector & 1]; | 809 | link = node->active_links[selector & 1]; |
800 | if (link) | 810 | if (link) |
801 | rc = __tipc_link_xmit(link, list); | 811 | rc = __tipc_link_xmit(net, link, list); |
802 | tipc_node_unlock(node); | 812 | tipc_node_unlock(node); |
813 | tipc_node_put(node); | ||
803 | } | 814 | } |
804 | |||
805 | if (link) | 815 | if (link) |
806 | return rc; | 816 | return rc; |
807 | 817 | ||
808 | if (likely(in_own_node(dnode))) { | 818 | if (likely(in_own_node(net, dnode))) { |
809 | /* As a node local message chain never contains more than one | 819 | tipc_sk_rcv(net, list); |
810 | * buffer, we just need to dequeue one SKB buffer from the | 820 | return 0; |
811 | * head list. | ||
812 | */ | ||
813 | return tipc_sk_rcv(__skb_dequeue(list)); | ||
814 | } | 821 | } |
815 | __skb_queue_purge(list); | ||
816 | 822 | ||
823 | __skb_queue_purge(list); | ||
817 | return rc; | 824 | return rc; |
818 | } | 825 | } |
819 | 826 | ||
@@ -835,7 +842,8 @@ static void tipc_link_sync_xmit(struct tipc_link *link) | |||
835 | return; | 842 | return; |
836 | 843 | ||
837 | msg = buf_msg(skb); | 844 | msg = buf_msg(skb); |
838 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); | 845 | tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG, |
846 | INT_H_SIZE, link->addr); | ||
839 | msg_set_last_bcast(msg, link->owner->bclink.acked); | 847 | msg_set_last_bcast(msg, link->owner->bclink.acked); |
840 | __tipc_link_xmit_skb(link, skb); | 848 | __tipc_link_xmit_skb(link, skb); |
841 | } | 849 | } |
@@ -857,14 +865,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) | |||
857 | kfree_skb(buf); | 865 | kfree_skb(buf); |
858 | } | 866 | } |
859 | 867 | ||
860 | struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | ||
861 | const struct sk_buff *skb) | ||
862 | { | ||
863 | if (skb_queue_is_last(list, skb)) | ||
864 | return NULL; | ||
865 | return skb->next; | ||
866 | } | ||
867 | |||
868 | /* | 868 | /* |
869 | * tipc_link_push_packets - push unsent packets to bearer | 869 | * tipc_link_push_packets - push unsent packets to bearer |
870 | * | 870 | * |
@@ -873,29 +873,24 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | |||
873 | * | 873 | * |
874 | * Called with node locked | 874 | * Called with node locked |
875 | */ | 875 | */ |
876 | void tipc_link_push_packets(struct tipc_link *l_ptr) | 876 | void tipc_link_push_packets(struct tipc_link *link) |
877 | { | 877 | { |
878 | struct sk_buff_head *outqueue = &l_ptr->outqueue; | 878 | struct sk_buff *skb; |
879 | struct sk_buff *skb = l_ptr->next_out; | ||
880 | struct tipc_msg *msg; | 879 | struct tipc_msg *msg; |
881 | u32 next, first; | 880 | unsigned int ack = mod(link->next_in_no - 1); |
882 | 881 | ||
883 | skb_queue_walk_from(outqueue, skb) { | 882 | while (skb_queue_len(&link->transmq) < link->window) { |
884 | msg = buf_msg(skb); | 883 | skb = __skb_dequeue(&link->backlogq); |
885 | next = msg_seqno(msg); | 884 | if (!skb) |
886 | first = buf_seqno(skb_peek(outqueue)); | ||
887 | |||
888 | if (mod(next - first) < l_ptr->queue_limit[0]) { | ||
889 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | ||
890 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | ||
891 | if (msg_user(msg) == MSG_BUNDLER) | ||
892 | TIPC_SKB_CB(skb)->bundling = false; | ||
893 | tipc_bearer_send(l_ptr->bearer_id, skb, | ||
894 | &l_ptr->media_addr); | ||
895 | l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); | ||
896 | } else { | ||
897 | break; | 885 | break; |
898 | } | 886 | msg = buf_msg(skb); |
887 | link->backlog[msg_importance(msg)].len--; | ||
888 | msg_set_ack(msg, ack); | ||
889 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); | ||
890 | link->rcv_unacked = 0; | ||
891 | __skb_queue_tail(&link->transmq, skb); | ||
892 | tipc_bearer_send(link->owner->net, link->bearer_id, | ||
893 | skb, &link->media_addr); | ||
899 | } | 894 | } |
900 | } | 895 | } |
901 | 896 | ||
@@ -923,6 +918,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
923 | struct sk_buff *buf) | 918 | struct sk_buff *buf) |
924 | { | 919 | { |
925 | struct tipc_msg *msg = buf_msg(buf); | 920 | struct tipc_msg *msg = buf_msg(buf); |
921 | struct net *net = l_ptr->owner->net; | ||
926 | 922 | ||
927 | pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); | 923 | pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); |
928 | 924 | ||
@@ -940,8 +936,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
940 | pr_cont("Outstanding acks: %lu\n", | 936 | pr_cont("Outstanding acks: %lu\n", |
941 | (unsigned long) TIPC_SKB_CB(buf)->handle); | 937 | (unsigned long) TIPC_SKB_CB(buf)->handle); |
942 | 938 | ||
943 | n_ptr = tipc_bclink_retransmit_to(); | 939 | n_ptr = tipc_bclink_retransmit_to(net); |
944 | tipc_node_lock(n_ptr); | ||
945 | 940 | ||
946 | tipc_addr_string_fill(addr_string, n_ptr->addr); | 941 | tipc_addr_string_fill(addr_string, n_ptr->addr); |
947 | pr_info("Broadcast link info for %s\n", addr_string); | 942 | pr_info("Broadcast link info for %s\n", addr_string); |
@@ -953,9 +948,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
953 | n_ptr->bclink.oos_state, | 948 | n_ptr->bclink.oos_state, |
954 | n_ptr->bclink.last_sent); | 949 | n_ptr->bclink.last_sent); |
955 | 950 | ||
956 | tipc_node_unlock(n_ptr); | 951 | n_ptr->action_flags |= TIPC_BCAST_RESET; |
957 | |||
958 | tipc_bclink_set_flags(TIPC_BCLINK_RESET); | ||
959 | l_ptr->stale_count = 0; | 952 | l_ptr->stale_count = 0; |
960 | } | 953 | } |
961 | } | 954 | } |
@@ -981,96 +974,70 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
981 | l_ptr->stale_count = 1; | 974 | l_ptr->stale_count = 1; |
982 | } | 975 | } |
983 | 976 | ||
984 | skb_queue_walk_from(&l_ptr->outqueue, skb) { | 977 | skb_queue_walk_from(&l_ptr->transmq, skb) { |
985 | if (!retransmits || skb == l_ptr->next_out) | 978 | if (!retransmits) |
986 | break; | 979 | break; |
987 | msg = buf_msg(skb); | 980 | msg = buf_msg(skb); |
988 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 981 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
989 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 982 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
990 | tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr); | 983 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb, |
984 | &l_ptr->media_addr); | ||
991 | retransmits--; | 985 | retransmits--; |
992 | l_ptr->stats.retransmitted++; | 986 | l_ptr->stats.retransmitted++; |
993 | } | 987 | } |
994 | } | 988 | } |
995 | 989 | ||
996 | static void link_retrieve_defq(struct tipc_link *link, | 990 | /* link_synch(): check if all packets arrived before the synch |
997 | struct sk_buff_head *list) | 991 | * point have been consumed |
998 | { | 992 | * Returns true if the parallel links are synched, otherwise false |
999 | u32 seq_no; | ||
1000 | |||
1001 | if (skb_queue_empty(&link->deferred_queue)) | ||
1002 | return; | ||
1003 | |||
1004 | seq_no = buf_seqno(skb_peek(&link->deferred_queue)); | ||
1005 | if (seq_no == mod(link->next_in_no)) | ||
1006 | skb_queue_splice_tail_init(&link->deferred_queue, list); | ||
1007 | } | ||
1008 | |||
1009 | /** | ||
1010 | * link_recv_buf_validate - validate basic format of received message | ||
1011 | * | ||
1012 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
1013 | * as much data as the header indicates it should. The routine also ensures | ||
1014 | * that the entire message header is stored in the main fragment of the message | ||
1015 | * buffer, to simplify future access to message header fields. | ||
1016 | * | ||
1017 | * Note: Having extra info present in the message header or data areas is OK. | ||
1018 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
1019 | * introduced by a later release of the protocol. | ||
1020 | */ | 993 | */ |
1021 | static int link_recv_buf_validate(struct sk_buff *buf) | 994 | static bool link_synch(struct tipc_link *l) |
1022 | { | 995 | { |
1023 | static u32 min_data_hdr_size[8] = { | 996 | unsigned int post_synch; |
1024 | SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, | 997 | struct tipc_link *pl; |
1025 | MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE | ||
1026 | }; | ||
1027 | 998 | ||
1028 | struct tipc_msg *msg; | 999 | pl = tipc_parallel_link(l); |
1029 | u32 tipc_hdr[2]; | 1000 | if (pl == l) |
1030 | u32 size; | 1001 | goto synched; |
1031 | u32 hdr_size; | ||
1032 | u32 min_hdr_size; | ||
1033 | 1002 | ||
1034 | /* If this packet comes from the defer queue, the skb has already | 1003 | /* Was last pre-synch packet added to input queue ? */ |
1035 | * been validated | 1004 | if (less_eq(pl->next_in_no, l->synch_point)) |
1036 | */ | 1005 | return false; |
1037 | if (unlikely(TIPC_SKB_CB(buf)->deferred)) | ||
1038 | return 1; | ||
1039 | |||
1040 | if (unlikely(buf->len < MIN_H_SIZE)) | ||
1041 | return 0; | ||
1042 | |||
1043 | msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); | ||
1044 | if (msg == NULL) | ||
1045 | return 0; | ||
1046 | 1006 | ||
1047 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | 1007 | /* Is it still in the input queue ? */ |
1048 | return 0; | 1008 | post_synch = mod(pl->next_in_no - l->synch_point) - 1; |
1009 | if (skb_queue_len(&pl->inputq) > post_synch) | ||
1010 | return false; | ||
1011 | synched: | ||
1012 | l->flags &= ~LINK_SYNCHING; | ||
1013 | return true; | ||
1014 | } | ||
1049 | 1015 | ||
1050 | size = msg_size(msg); | 1016 | static void link_retrieve_defq(struct tipc_link *link, |
1051 | hdr_size = msg_hdr_sz(msg); | 1017 | struct sk_buff_head *list) |
1052 | min_hdr_size = msg_isdata(msg) ? | 1018 | { |
1053 | min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; | 1019 | u32 seq_no; |
1054 | 1020 | ||
1055 | if (unlikely((hdr_size < min_hdr_size) || | 1021 | if (skb_queue_empty(&link->deferdq)) |
1056 | (size < hdr_size) || | 1022 | return; |
1057 | (buf->len < size) || | ||
1058 | (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) | ||
1059 | return 0; | ||
1060 | 1023 | ||
1061 | return pskb_may_pull(buf, hdr_size); | 1024 | seq_no = buf_seqno(skb_peek(&link->deferdq)); |
1025 | if (seq_no == mod(link->next_in_no)) | ||
1026 | skb_queue_splice_tail_init(&link->deferdq, list); | ||
1062 | } | 1027 | } |
1063 | 1028 | ||
1064 | /** | 1029 | /** |
1065 | * tipc_rcv - process TIPC packets/messages arriving from off-node | 1030 | * tipc_rcv - process TIPC packets/messages arriving from off-node |
1031 | * @net: the applicable net namespace | ||
1066 | * @skb: TIPC packet | 1032 | * @skb: TIPC packet |
1067 | * @b_ptr: pointer to bearer message arrived on | 1033 | * @b_ptr: pointer to bearer message arrived on |
1068 | * | 1034 | * |
1069 | * Invoked with no locks held. Bearer pointer must point to a valid bearer | 1035 | * Invoked with no locks held. Bearer pointer must point to a valid bearer |
1070 | * structure (i.e. cannot be NULL), but bearer can be inactive. | 1036 | * structure (i.e. cannot be NULL), but bearer can be inactive. |
1071 | */ | 1037 | */ |
1072 | void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) | 1038 | void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) |
1073 | { | 1039 | { |
1040 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
1074 | struct sk_buff_head head; | 1041 | struct sk_buff_head head; |
1075 | struct tipc_node *n_ptr; | 1042 | struct tipc_node *n_ptr; |
1076 | struct tipc_link *l_ptr; | 1043 | struct tipc_link *l_ptr; |
@@ -1084,39 +1051,34 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1084 | 1051 | ||
1085 | while ((skb = __skb_dequeue(&head))) { | 1052 | while ((skb = __skb_dequeue(&head))) { |
1086 | /* Ensure message is well-formed */ | 1053 | /* Ensure message is well-formed */ |
1087 | if (unlikely(!link_recv_buf_validate(skb))) | 1054 | if (unlikely(!tipc_msg_validate(skb))) |
1088 | goto discard; | ||
1089 | |||
1090 | /* Ensure message data is a single contiguous unit */ | ||
1091 | if (unlikely(skb_linearize(skb))) | ||
1092 | goto discard; | 1055 | goto discard; |
1093 | 1056 | ||
1094 | /* Handle arrival of a non-unicast link message */ | 1057 | /* Handle arrival of a non-unicast link message */ |
1095 | msg = buf_msg(skb); | 1058 | msg = buf_msg(skb); |
1096 | |||
1097 | if (unlikely(msg_non_seq(msg))) { | 1059 | if (unlikely(msg_non_seq(msg))) { |
1098 | if (msg_user(msg) == LINK_CONFIG) | 1060 | if (msg_user(msg) == LINK_CONFIG) |
1099 | tipc_disc_rcv(skb, b_ptr); | 1061 | tipc_disc_rcv(net, skb, b_ptr); |
1100 | else | 1062 | else |
1101 | tipc_bclink_rcv(skb); | 1063 | tipc_bclink_rcv(net, skb); |
1102 | continue; | 1064 | continue; |
1103 | } | 1065 | } |
1104 | 1066 | ||
1105 | /* Discard unicast link messages destined for another node */ | 1067 | /* Discard unicast link messages destined for another node */ |
1106 | if (unlikely(!msg_short(msg) && | 1068 | if (unlikely(!msg_short(msg) && |
1107 | (msg_destnode(msg) != tipc_own_addr))) | 1069 | (msg_destnode(msg) != tn->own_addr))) |
1108 | goto discard; | 1070 | goto discard; |
1109 | 1071 | ||
1110 | /* Locate neighboring node that sent message */ | 1072 | /* Locate neighboring node that sent message */ |
1111 | n_ptr = tipc_node_find(msg_prevnode(msg)); | 1073 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); |
1112 | if (unlikely(!n_ptr)) | 1074 | if (unlikely(!n_ptr)) |
1113 | goto discard; | 1075 | goto discard; |
1114 | tipc_node_lock(n_ptr); | ||
1115 | 1076 | ||
1077 | tipc_node_lock(n_ptr); | ||
1116 | /* Locate unicast link endpoint that should handle message */ | 1078 | /* Locate unicast link endpoint that should handle message */ |
1117 | l_ptr = n_ptr->links[b_ptr->identity]; | 1079 | l_ptr = n_ptr->links[b_ptr->identity]; |
1118 | if (unlikely(!l_ptr)) | 1080 | if (unlikely(!l_ptr)) |
1119 | goto unlock_discard; | 1081 | goto unlock; |
1120 | 1082 | ||
1121 | /* Verify that communication with node is currently allowed */ | 1083 | /* Verify that communication with node is currently allowed */ |
1122 | if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) && | 1084 | if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) && |
@@ -1127,42 +1089,39 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1127 | n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN; | 1089 | n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN; |
1128 | 1090 | ||
1129 | if (tipc_node_blocked(n_ptr)) | 1091 | if (tipc_node_blocked(n_ptr)) |
1130 | goto unlock_discard; | 1092 | goto unlock; |
1131 | 1093 | ||
1132 | /* Validate message sequence number info */ | 1094 | /* Validate message sequence number info */ |
1133 | seq_no = msg_seqno(msg); | 1095 | seq_no = msg_seqno(msg); |
1134 | ackd = msg_ack(msg); | 1096 | ackd = msg_ack(msg); |
1135 | 1097 | ||
1136 | /* Release acked messages */ | 1098 | /* Release acked messages */ |
1137 | if (n_ptr->bclink.recv_permitted) | 1099 | if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg))) |
1138 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); | 1100 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); |
1139 | 1101 | ||
1140 | released = 0; | 1102 | released = 0; |
1141 | skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { | 1103 | skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) { |
1142 | if (skb1 == l_ptr->next_out || | 1104 | if (more(buf_seqno(skb1), ackd)) |
1143 | more(buf_seqno(skb1), ackd)) | ||
1144 | break; | 1105 | break; |
1145 | __skb_unlink(skb1, &l_ptr->outqueue); | 1106 | __skb_unlink(skb1, &l_ptr->transmq); |
1146 | kfree_skb(skb1); | 1107 | kfree_skb(skb1); |
1147 | released = 1; | 1108 | released = 1; |
1148 | } | 1109 | } |
1149 | 1110 | ||
1150 | /* Try sending any messages link endpoint has pending */ | 1111 | /* Try sending any messages link endpoint has pending */ |
1151 | if (unlikely(l_ptr->next_out)) | 1112 | if (unlikely(skb_queue_len(&l_ptr->backlogq))) |
1152 | tipc_link_push_packets(l_ptr); | 1113 | tipc_link_push_packets(l_ptr); |
1153 | 1114 | ||
1154 | if (released && !skb_queue_empty(&l_ptr->waiting_sks)) { | 1115 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) |
1155 | link_prepare_wakeup(l_ptr); | 1116 | link_prepare_wakeup(l_ptr); |
1156 | l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS; | ||
1157 | } | ||
1158 | 1117 | ||
1159 | /* Process the incoming packet */ | 1118 | /* Process the incoming packet */ |
1160 | if (unlikely(!link_working_working(l_ptr))) { | 1119 | if (unlikely(!link_working_working(l_ptr))) { |
1161 | if (msg_user(msg) == LINK_PROTOCOL) { | 1120 | if (msg_user(msg) == LINK_PROTOCOL) { |
1162 | tipc_link_proto_rcv(l_ptr, skb); | 1121 | tipc_link_proto_rcv(l_ptr, skb); |
1163 | link_retrieve_defq(l_ptr, &head); | 1122 | link_retrieve_defq(l_ptr, &head); |
1164 | tipc_node_unlock(n_ptr); | 1123 | skb = NULL; |
1165 | continue; | 1124 | goto unlock; |
1166 | } | 1125 | } |
1167 | 1126 | ||
1168 | /* Traffic message. Conditionally activate link */ | 1127 | /* Traffic message. Conditionally activate link */ |
@@ -1171,116 +1130,137 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1171 | if (link_working_working(l_ptr)) { | 1130 | if (link_working_working(l_ptr)) { |
1172 | /* Re-insert buffer in front of queue */ | 1131 | /* Re-insert buffer in front of queue */ |
1173 | __skb_queue_head(&head, skb); | 1132 | __skb_queue_head(&head, skb); |
1174 | tipc_node_unlock(n_ptr); | 1133 | skb = NULL; |
1175 | continue; | 1134 | goto unlock; |
1176 | } | 1135 | } |
1177 | goto unlock_discard; | 1136 | goto unlock; |
1178 | } | 1137 | } |
1179 | 1138 | ||
1180 | /* Link is now in state WORKING_WORKING */ | 1139 | /* Link is now in state WORKING_WORKING */ |
1181 | if (unlikely(seq_no != mod(l_ptr->next_in_no))) { | 1140 | if (unlikely(seq_no != mod(l_ptr->next_in_no))) { |
1182 | link_handle_out_of_seq_msg(l_ptr, skb); | 1141 | link_handle_out_of_seq_msg(l_ptr, skb); |
1183 | link_retrieve_defq(l_ptr, &head); | 1142 | link_retrieve_defq(l_ptr, &head); |
1184 | tipc_node_unlock(n_ptr); | 1143 | skb = NULL; |
1185 | continue; | 1144 | goto unlock; |
1145 | } | ||
1146 | /* Synchronize with parallel link if applicable */ | ||
1147 | if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { | ||
1148 | link_handle_out_of_seq_msg(l_ptr, skb); | ||
1149 | if (link_synch(l_ptr)) | ||
1150 | link_retrieve_defq(l_ptr, &head); | ||
1151 | skb = NULL; | ||
1152 | goto unlock; | ||
1186 | } | 1153 | } |
1187 | l_ptr->next_in_no++; | 1154 | l_ptr->next_in_no++; |
1188 | if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) | 1155 | if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) |
1189 | link_retrieve_defq(l_ptr, &head); | 1156 | link_retrieve_defq(l_ptr, &head); |
1190 | 1157 | if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { | |
1191 | if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { | ||
1192 | l_ptr->stats.sent_acks++; | 1158 | l_ptr->stats.sent_acks++; |
1193 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1159 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
1194 | } | ||
1195 | |||
1196 | if (tipc_link_prepare_input(l_ptr, &skb)) { | ||
1197 | tipc_node_unlock(n_ptr); | ||
1198 | continue; | ||
1199 | } | 1160 | } |
1161 | tipc_link_input(l_ptr, skb); | ||
1162 | skb = NULL; | ||
1163 | unlock: | ||
1200 | tipc_node_unlock(n_ptr); | 1164 | tipc_node_unlock(n_ptr); |
1201 | 1165 | tipc_node_put(n_ptr); | |
1202 | if (tipc_link_input(l_ptr, skb) != 0) | ||
1203 | goto discard; | ||
1204 | continue; | ||
1205 | unlock_discard: | ||
1206 | tipc_node_unlock(n_ptr); | ||
1207 | discard: | 1166 | discard: |
1208 | kfree_skb(skb); | 1167 | if (unlikely(skb)) |
1168 | kfree_skb(skb); | ||
1209 | } | 1169 | } |
1210 | } | 1170 | } |
1211 | 1171 | ||
1212 | /** | 1172 | /* tipc_data_input - deliver data and name distr msgs to upper layer |
1213 | * tipc_link_prepare_input - process TIPC link messages | ||
1214 | * | ||
1215 | * returns nonzero if the message was consumed | ||
1216 | * | 1173 | * |
1174 | * Consumes buffer if message is of right type | ||
1217 | * Node lock must be held | 1175 | * Node lock must be held |
1218 | */ | 1176 | */ |
1219 | static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf) | 1177 | static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb) |
1220 | { | 1178 | { |
1221 | struct tipc_node *n; | 1179 | struct tipc_node *node = link->owner; |
1222 | struct tipc_msg *msg; | 1180 | struct tipc_msg *msg = buf_msg(skb); |
1223 | int res = -EINVAL; | 1181 | u32 dport = msg_destport(msg); |
1224 | 1182 | ||
1225 | n = l->owner; | ||
1226 | msg = buf_msg(*buf); | ||
1227 | switch (msg_user(msg)) { | 1183 | switch (msg_user(msg)) { |
1228 | case CHANGEOVER_PROTOCOL: | 1184 | case TIPC_LOW_IMPORTANCE: |
1229 | if (tipc_link_tunnel_rcv(n, buf)) | 1185 | case TIPC_MEDIUM_IMPORTANCE: |
1230 | res = 0; | 1186 | case TIPC_HIGH_IMPORTANCE: |
1231 | break; | 1187 | case TIPC_CRITICAL_IMPORTANCE: |
1232 | case MSG_FRAGMENTER: | 1188 | case CONN_MANAGER: |
1233 | l->stats.recv_fragments++; | 1189 | if (tipc_skb_queue_tail(&link->inputq, skb, dport)) { |
1234 | if (tipc_buf_append(&l->reasm_buf, buf)) { | 1190 | node->inputq = &link->inputq; |
1235 | l->stats.recv_fragmented++; | 1191 | node->action_flags |= TIPC_MSG_EVT; |
1236 | res = 0; | ||
1237 | } else if (!l->reasm_buf) { | ||
1238 | tipc_link_reset(l); | ||
1239 | } | 1192 | } |
1240 | break; | 1193 | return true; |
1241 | case MSG_BUNDLER: | ||
1242 | l->stats.recv_bundles++; | ||
1243 | l->stats.recv_bundled += msg_msgcnt(msg); | ||
1244 | res = 0; | ||
1245 | break; | ||
1246 | case NAME_DISTRIBUTOR: | 1194 | case NAME_DISTRIBUTOR: |
1247 | n->bclink.recv_permitted = true; | 1195 | node->bclink.recv_permitted = true; |
1248 | res = 0; | 1196 | node->namedq = &link->namedq; |
1249 | break; | 1197 | skb_queue_tail(&link->namedq, skb); |
1198 | if (skb_queue_len(&link->namedq) == 1) | ||
1199 | node->action_flags |= TIPC_NAMED_MSG_EVT; | ||
1200 | return true; | ||
1201 | case MSG_BUNDLER: | ||
1202 | case TUNNEL_PROTOCOL: | ||
1203 | case MSG_FRAGMENTER: | ||
1250 | case BCAST_PROTOCOL: | 1204 | case BCAST_PROTOCOL: |
1251 | tipc_link_sync_rcv(n, *buf); | 1205 | return false; |
1252 | break; | ||
1253 | default: | 1206 | default: |
1254 | res = 0; | 1207 | pr_warn("Dropping received illegal msg type\n"); |
1255 | } | 1208 | kfree_skb(skb); |
1256 | return res; | 1209 | return false; |
1210 | }; | ||
1257 | } | 1211 | } |
1258 | /** | 1212 | |
1259 | * tipc_link_input - Deliver message too higher layers | 1213 | /* tipc_link_input - process packet that has passed link protocol check |
1214 | * | ||
1215 | * Consumes buffer | ||
1216 | * Node lock must be held | ||
1260 | */ | 1217 | */ |
1261 | static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf) | 1218 | static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb) |
1262 | { | 1219 | { |
1263 | struct tipc_msg *msg = buf_msg(buf); | 1220 | struct tipc_node *node = link->owner; |
1264 | int res = 0; | 1221 | struct tipc_msg *msg = buf_msg(skb); |
1222 | struct sk_buff *iskb; | ||
1223 | int pos = 0; | ||
1224 | |||
1225 | if (likely(tipc_data_input(link, skb))) | ||
1226 | return; | ||
1265 | 1227 | ||
1266 | switch (msg_user(msg)) { | 1228 | switch (msg_user(msg)) { |
1267 | case TIPC_LOW_IMPORTANCE: | 1229 | case TUNNEL_PROTOCOL: |
1268 | case TIPC_MEDIUM_IMPORTANCE: | 1230 | if (msg_dup(msg)) { |
1269 | case TIPC_HIGH_IMPORTANCE: | 1231 | link->flags |= LINK_SYNCHING; |
1270 | case TIPC_CRITICAL_IMPORTANCE: | 1232 | link->synch_point = msg_seqno(msg_get_wrapped(msg)); |
1271 | case CONN_MANAGER: | 1233 | kfree_skb(skb); |
1272 | tipc_sk_rcv(buf); | 1234 | break; |
1235 | } | ||
1236 | if (!tipc_link_failover_rcv(link, &skb)) | ||
1237 | break; | ||
1238 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { | ||
1239 | tipc_data_input(link, skb); | ||
1240 | break; | ||
1241 | } | ||
1242 | case MSG_BUNDLER: | ||
1243 | link->stats.recv_bundles++; | ||
1244 | link->stats.recv_bundled += msg_msgcnt(msg); | ||
1245 | |||
1246 | while (tipc_msg_extract(skb, &iskb, &pos)) | ||
1247 | tipc_data_input(link, iskb); | ||
1273 | break; | 1248 | break; |
1274 | case NAME_DISTRIBUTOR: | 1249 | case MSG_FRAGMENTER: |
1275 | tipc_named_rcv(buf); | 1250 | link->stats.recv_fragments++; |
1251 | if (tipc_buf_append(&link->reasm_buf, &skb)) { | ||
1252 | link->stats.recv_fragmented++; | ||
1253 | tipc_data_input(link, skb); | ||
1254 | } else if (!link->reasm_buf) { | ||
1255 | tipc_link_reset(link); | ||
1256 | } | ||
1276 | break; | 1257 | break; |
1277 | case MSG_BUNDLER: | 1258 | case BCAST_PROTOCOL: |
1278 | tipc_link_bundle_rcv(buf); | 1259 | tipc_link_sync_rcv(node, skb); |
1279 | break; | 1260 | break; |
1280 | default: | 1261 | default: |
1281 | res = -EINVAL; | 1262 | break; |
1282 | } | 1263 | }; |
1283 | return res; | ||
1284 | } | 1264 | } |
1285 | 1265 | ||
1286 | /** | 1266 | /** |
@@ -1348,11 +1328,10 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1348 | return; | 1328 | return; |
1349 | } | 1329 | } |
1350 | 1330 | ||
1351 | if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { | 1331 | if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { |
1352 | l_ptr->stats.deferred_recv++; | 1332 | l_ptr->stats.deferred_recv++; |
1353 | TIPC_SKB_CB(buf)->deferred = true; | 1333 | if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) |
1354 | if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) | 1334 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
1355 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | ||
1356 | } else { | 1335 | } else { |
1357 | l_ptr->stats.duplicates++; | 1336 | l_ptr->stats.duplicates++; |
1358 | } | 1337 | } |
@@ -1362,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1362 | * Send protocol message to the other endpoint. | 1341 | * Send protocol message to the other endpoint. |
1363 | */ | 1342 | */ |
1364 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | 1343 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, |
1365 | u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) | 1344 | u32 gap, u32 tolerance, u32 priority) |
1366 | { | 1345 | { |
1367 | struct sk_buff *buf = NULL; | 1346 | struct sk_buff *buf = NULL; |
1368 | struct tipc_msg *msg = l_ptr->pmsg; | 1347 | struct tipc_msg *msg = l_ptr->pmsg; |
1369 | u32 msg_size = sizeof(l_ptr->proto_msg); | 1348 | u32 msg_size = sizeof(l_ptr->proto_msg); |
1370 | int r_flag; | 1349 | int r_flag; |
1371 | 1350 | ||
1372 | /* Don't send protocol message during link changeover */ | 1351 | /* Don't send protocol message during link failover */ |
1373 | if (l_ptr->exp_msg_count) | 1352 | if (l_ptr->flags & LINK_FAILINGOVER) |
1374 | return; | 1353 | return; |
1375 | 1354 | ||
1376 | /* Abort non-RESET send if communication with node is prohibited */ | 1355 | /* Abort non-RESET send if communication with node is prohibited */ |
@@ -1381,18 +1360,18 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1381 | msg_set_type(msg, msg_typ); | 1360 | msg_set_type(msg, msg_typ); |
1382 | msg_set_net_plane(msg, l_ptr->net_plane); | 1361 | msg_set_net_plane(msg, l_ptr->net_plane); |
1383 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1362 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
1384 | msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); | 1363 | msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net)); |
1385 | 1364 | ||
1386 | if (msg_typ == STATE_MSG) { | 1365 | if (msg_typ == STATE_MSG) { |
1387 | u32 next_sent = mod(l_ptr->next_out_no); | 1366 | u32 next_sent = mod(l_ptr->next_out_no); |
1388 | 1367 | ||
1389 | if (!tipc_link_is_up(l_ptr)) | 1368 | if (!tipc_link_is_up(l_ptr)) |
1390 | return; | 1369 | return; |
1391 | if (l_ptr->next_out) | 1370 | if (skb_queue_len(&l_ptr->backlogq)) |
1392 | next_sent = buf_seqno(l_ptr->next_out); | 1371 | next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); |
1393 | msg_set_next_sent(msg, next_sent); | 1372 | msg_set_next_sent(msg, next_sent); |
1394 | if (!skb_queue_empty(&l_ptr->deferred_queue)) { | 1373 | if (!skb_queue_empty(&l_ptr->deferdq)) { |
1395 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); | 1374 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq)); |
1396 | gap = mod(rec - mod(l_ptr->next_in_no)); | 1375 | gap = mod(rec - mod(l_ptr->next_in_no)); |
1397 | } | 1376 | } |
1398 | msg_set_seq_gap(msg, gap); | 1377 | msg_set_seq_gap(msg, gap); |
@@ -1400,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1400 | l_ptr->stats.sent_nacks++; | 1379 | l_ptr->stats.sent_nacks++; |
1401 | msg_set_link_tolerance(msg, tolerance); | 1380 | msg_set_link_tolerance(msg, tolerance); |
1402 | msg_set_linkprio(msg, priority); | 1381 | msg_set_linkprio(msg, priority); |
1403 | msg_set_max_pkt(msg, ack_mtu); | 1382 | msg_set_max_pkt(msg, l_ptr->mtu); |
1404 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1383 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1405 | msg_set_probe(msg, probe_msg != 0); | 1384 | msg_set_probe(msg, probe_msg != 0); |
1406 | if (probe_msg) { | 1385 | if (probe_msg) |
1407 | u32 mtu = l_ptr->max_pkt; | ||
1408 | |||
1409 | if ((mtu < l_ptr->max_pkt_target) && | ||
1410 | link_working_working(l_ptr) && | ||
1411 | l_ptr->fsm_msg_cnt) { | ||
1412 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
1413 | if (l_ptr->max_pkt_probes == 10) { | ||
1414 | l_ptr->max_pkt_target = (msg_size - 4); | ||
1415 | l_ptr->max_pkt_probes = 0; | ||
1416 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
1417 | } | ||
1418 | l_ptr->max_pkt_probes++; | ||
1419 | } | ||
1420 | |||
1421 | l_ptr->stats.sent_probes++; | 1386 | l_ptr->stats.sent_probes++; |
1422 | } | ||
1423 | l_ptr->stats.sent_states++; | 1387 | l_ptr->stats.sent_states++; |
1424 | } else { /* RESET_MSG or ACTIVATE_MSG */ | 1388 | } else { /* RESET_MSG or ACTIVATE_MSG */ |
1425 | msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); | 1389 | msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1)); |
1426 | msg_set_seq_gap(msg, 0); | 1390 | msg_set_seq_gap(msg, 0); |
1427 | msg_set_next_sent(msg, 1); | 1391 | msg_set_next_sent(msg, 1); |
1428 | msg_set_probe(msg, 0); | 1392 | msg_set_probe(msg, 0); |
1429 | msg_set_link_tolerance(msg, l_ptr->tolerance); | 1393 | msg_set_link_tolerance(msg, l_ptr->tolerance); |
1430 | msg_set_linkprio(msg, l_ptr->priority); | 1394 | msg_set_linkprio(msg, l_ptr->priority); |
1431 | msg_set_max_pkt(msg, l_ptr->max_pkt_target); | 1395 | msg_set_max_pkt(msg, l_ptr->advertised_mtu); |
1432 | } | 1396 | } |
1433 | 1397 | ||
1434 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); | 1398 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); |
@@ -1444,9 +1408,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1444 | 1408 | ||
1445 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); | 1409 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); |
1446 | buf->priority = TC_PRIO_CONTROL; | 1410 | buf->priority = TC_PRIO_CONTROL; |
1447 | 1411 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, | |
1448 | tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); | 1412 | &l_ptr->media_addr); |
1449 | l_ptr->unacked_window = 0; | 1413 | l_ptr->rcv_unacked = 0; |
1450 | kfree_skb(buf); | 1414 | kfree_skb(buf); |
1451 | } | 1415 | } |
1452 | 1416 | ||
@@ -1455,20 +1419,18 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1455 | * Note that network plane id propagates through the network, and may | 1419 | * Note that network plane id propagates through the network, and may |
1456 | * change at any time. The node with lowest address rules | 1420 | * change at any time. The node with lowest address rules |
1457 | */ | 1421 | */ |
1458 | static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) | 1422 | static void tipc_link_proto_rcv(struct tipc_link *l_ptr, |
1423 | struct sk_buff *buf) | ||
1459 | { | 1424 | { |
1460 | u32 rec_gap = 0; | 1425 | u32 rec_gap = 0; |
1461 | u32 max_pkt_info; | ||
1462 | u32 max_pkt_ack; | ||
1463 | u32 msg_tol; | 1426 | u32 msg_tol; |
1464 | struct tipc_msg *msg = buf_msg(buf); | 1427 | struct tipc_msg *msg = buf_msg(buf); |
1465 | 1428 | ||
1466 | /* Discard protocol message during link changeover */ | 1429 | if (l_ptr->flags & LINK_FAILINGOVER) |
1467 | if (l_ptr->exp_msg_count) | ||
1468 | goto exit; | 1430 | goto exit; |
1469 | 1431 | ||
1470 | if (l_ptr->net_plane != msg_net_plane(msg)) | 1432 | if (l_ptr->net_plane != msg_net_plane(msg)) |
1471 | if (tipc_own_addr > msg_prevnode(msg)) | 1433 | if (link_own_addr(l_ptr) > msg_prevnode(msg)) |
1472 | l_ptr->net_plane = msg_net_plane(msg); | 1434 | l_ptr->net_plane = msg_net_plane(msg); |
1473 | 1435 | ||
1474 | switch (msg_type(msg)) { | 1436 | switch (msg_type(msg)) { |
@@ -1503,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
1503 | if (msg_linkprio(msg) > l_ptr->priority) | 1465 | if (msg_linkprio(msg) > l_ptr->priority) |
1504 | l_ptr->priority = msg_linkprio(msg); | 1466 | l_ptr->priority = msg_linkprio(msg); |
1505 | 1467 | ||
1506 | max_pkt_info = msg_max_pkt(msg); | 1468 | if (l_ptr->mtu > msg_max_pkt(msg)) |
1507 | if (max_pkt_info) { | 1469 | l_ptr->mtu = msg_max_pkt(msg); |
1508 | if (max_pkt_info < l_ptr->max_pkt_target) | ||
1509 | l_ptr->max_pkt_target = max_pkt_info; | ||
1510 | if (l_ptr->max_pkt > l_ptr->max_pkt_target) | ||
1511 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
1512 | } else { | ||
1513 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
1514 | } | ||
1515 | 1470 | ||
1516 | /* Synchronize broadcast link info, if not done previously */ | 1471 | /* Synchronize broadcast link info, if not done previously */ |
1517 | if (!tipc_node_is_up(l_ptr->owner)) { | 1472 | if (!tipc_node_is_up(l_ptr->owner)) { |
@@ -1535,9 +1490,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
1535 | 1490 | ||
1536 | if (msg_linkprio(msg) && | 1491 | if (msg_linkprio(msg) && |
1537 | (msg_linkprio(msg) != l_ptr->priority)) { | 1492 | (msg_linkprio(msg) != l_ptr->priority)) { |
1538 | pr_warn("%s<%s>, priority change %u->%u\n", | 1493 | pr_debug("%s<%s>, priority change %u->%u\n", |
1539 | link_rst_msg, l_ptr->name, l_ptr->priority, | 1494 | link_rst_msg, l_ptr->name, |
1540 | msg_linkprio(msg)); | 1495 | l_ptr->priority, msg_linkprio(msg)); |
1541 | l_ptr->priority = msg_linkprio(msg); | 1496 | l_ptr->priority = msg_linkprio(msg); |
1542 | tipc_link_reset(l_ptr); /* Enforce change to take effect */ | 1497 | tipc_link_reset(l_ptr); /* Enforce change to take effect */ |
1543 | break; | 1498 | break; |
@@ -1556,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
1556 | mod(l_ptr->next_in_no)); | 1511 | mod(l_ptr->next_in_no)); |
1557 | } | 1512 | } |
1558 | 1513 | ||
1559 | max_pkt_ack = msg_max_pkt(msg); | 1514 | if (msg_probe(msg)) |
1560 | if (max_pkt_ack > l_ptr->max_pkt) { | ||
1561 | l_ptr->max_pkt = max_pkt_ack; | ||
1562 | l_ptr->max_pkt_probes = 0; | ||
1563 | } | ||
1564 | |||
1565 | max_pkt_ack = 0; | ||
1566 | if (msg_probe(msg)) { | ||
1567 | l_ptr->stats.recv_probes++; | 1515 | l_ptr->stats.recv_probes++; |
1568 | if (msg_size(msg) > sizeof(l_ptr->proto_msg)) | ||
1569 | max_pkt_ack = msg_size(msg); | ||
1570 | } | ||
1571 | 1516 | ||
1572 | /* Protocol message before retransmits, reduce loss risk */ | 1517 | /* Protocol message before retransmits, reduce loss risk */ |
1573 | if (l_ptr->owner->bclink.recv_permitted) | 1518 | if (l_ptr->owner->bclink.recv_permitted) |
@@ -1575,12 +1520,12 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
1575 | msg_last_bcast(msg)); | 1520 | msg_last_bcast(msg)); |
1576 | 1521 | ||
1577 | if (rec_gap || (msg_probe(msg))) { | 1522 | if (rec_gap || (msg_probe(msg))) { |
1578 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, | 1523 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, |
1579 | 0, max_pkt_ack); | 1524 | rec_gap, 0, 0); |
1580 | } | 1525 | } |
1581 | if (msg_seq_gap(msg)) { | 1526 | if (msg_seq_gap(msg)) { |
1582 | l_ptr->stats.recv_nacks++; | 1527 | l_ptr->stats.recv_nacks++; |
1583 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), | 1528 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq), |
1584 | msg_seq_gap(msg)); | 1529 | msg_seq_gap(msg)); |
1585 | } | 1530 | } |
1586 | break; | 1531 | break; |
@@ -1627,7 +1572,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, | |||
1627 | */ | 1572 | */ |
1628 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | 1573 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) |
1629 | { | 1574 | { |
1630 | u32 msgcount = skb_queue_len(&l_ptr->outqueue); | 1575 | int msgcount; |
1631 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; | 1576 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; |
1632 | struct tipc_msg tunnel_hdr; | 1577 | struct tipc_msg tunnel_hdr; |
1633 | struct sk_buff *skb; | 1578 | struct sk_buff *skb; |
@@ -1636,12 +1581,15 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1636 | if (!tunnel) | 1581 | if (!tunnel) |
1637 | return; | 1582 | return; |
1638 | 1583 | ||
1639 | tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, | 1584 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, |
1640 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); | 1585 | FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); |
1586 | skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); | ||
1587 | tipc_link_purge_backlog(l_ptr); | ||
1588 | msgcount = skb_queue_len(&l_ptr->transmq); | ||
1641 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1589 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
1642 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 1590 | msg_set_msgcnt(&tunnel_hdr, msgcount); |
1643 | 1591 | ||
1644 | if (skb_queue_empty(&l_ptr->outqueue)) { | 1592 | if (skb_queue_empty(&l_ptr->transmq)) { |
1645 | skb = tipc_buf_acquire(INT_H_SIZE); | 1593 | skb = tipc_buf_acquire(INT_H_SIZE); |
1646 | if (skb) { | 1594 | if (skb) { |
1647 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); | 1595 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); |
@@ -1657,7 +1605,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1657 | split_bundles = (l_ptr->owner->active_links[0] != | 1605 | split_bundles = (l_ptr->owner->active_links[0] != |
1658 | l_ptr->owner->active_links[1]); | 1606 | l_ptr->owner->active_links[1]); |
1659 | 1607 | ||
1660 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1608 | skb_queue_walk(&l_ptr->transmq, skb) { |
1661 | struct tipc_msg *msg = buf_msg(skb); | 1609 | struct tipc_msg *msg = buf_msg(skb); |
1662 | 1610 | ||
1663 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { | 1611 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { |
@@ -1688,245 +1636,151 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1688 | * and sequence order is preserved per sender/receiver socket pair. | 1636 | * and sequence order is preserved per sender/receiver socket pair. |
1689 | * Owner node is locked. | 1637 | * Owner node is locked. |
1690 | */ | 1638 | */ |
1691 | void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, | 1639 | void tipc_link_dup_queue_xmit(struct tipc_link *link, |
1692 | struct tipc_link *tunnel) | 1640 | struct tipc_link *tnl) |
1693 | { | 1641 | { |
1694 | struct sk_buff *skb; | 1642 | struct sk_buff *skb; |
1695 | struct tipc_msg tunnel_hdr; | 1643 | struct tipc_msg tnl_hdr; |
1696 | 1644 | struct sk_buff_head *queue = &link->transmq; | |
1697 | tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, | 1645 | int mcnt; |
1698 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); | 1646 | |
1699 | msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); | 1647 | tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, |
1700 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1648 | SYNCH_MSG, INT_H_SIZE, link->addr); |
1701 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1649 | mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); |
1650 | msg_set_msgcnt(&tnl_hdr, mcnt); | ||
1651 | msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); | ||
1652 | |||
1653 | tunnel_queue: | ||
1654 | skb_queue_walk(queue, skb) { | ||
1702 | struct sk_buff *outskb; | 1655 | struct sk_buff *outskb; |
1703 | struct tipc_msg *msg = buf_msg(skb); | 1656 | struct tipc_msg *msg = buf_msg(skb); |
1704 | u32 length = msg_size(msg); | 1657 | u32 len = msg_size(msg); |
1705 | 1658 | ||
1706 | if (msg_user(msg) == MSG_BUNDLER) | 1659 | msg_set_ack(msg, mod(link->next_in_no - 1)); |
1707 | msg_set_type(msg, CLOSED_MSG); | 1660 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); |
1708 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ | 1661 | msg_set_size(&tnl_hdr, len + INT_H_SIZE); |
1709 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1662 | outskb = tipc_buf_acquire(len + INT_H_SIZE); |
1710 | msg_set_size(&tunnel_hdr, length + INT_H_SIZE); | ||
1711 | outskb = tipc_buf_acquire(length + INT_H_SIZE); | ||
1712 | if (outskb == NULL) { | 1663 | if (outskb == NULL) { |
1713 | pr_warn("%sunable to send duplicate msg\n", | 1664 | pr_warn("%sunable to send duplicate msg\n", |
1714 | link_co_err); | 1665 | link_co_err); |
1715 | return; | 1666 | return; |
1716 | } | 1667 | } |
1717 | skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); | 1668 | skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE); |
1718 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, | 1669 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, |
1719 | length); | 1670 | skb->data, len); |
1720 | __tipc_link_xmit_skb(tunnel, outskb); | 1671 | __tipc_link_xmit_skb(tnl, outskb); |
1721 | if (!tipc_link_is_up(l_ptr)) | 1672 | if (!tipc_link_is_up(link)) |
1722 | return; | 1673 | return; |
1723 | } | 1674 | } |
1724 | } | 1675 | if (queue == &link->backlogq) |
1725 | |||
1726 | /** | ||
1727 | * buf_extract - extracts embedded TIPC message from another message | ||
1728 | * @skb: encapsulating message buffer | ||
1729 | * @from_pos: offset to extract from | ||
1730 | * | ||
1731 | * Returns a new message buffer containing an embedded message. The | ||
1732 | * encapsulating message itself is left unchanged. | ||
1733 | */ | ||
1734 | static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) | ||
1735 | { | ||
1736 | struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); | ||
1737 | u32 size = msg_size(msg); | ||
1738 | struct sk_buff *eb; | ||
1739 | |||
1740 | eb = tipc_buf_acquire(size); | ||
1741 | if (eb) | ||
1742 | skb_copy_to_linear_data(eb, msg, size); | ||
1743 | return eb; | ||
1744 | } | ||
1745 | |||
1746 | |||
1747 | |||
1748 | /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. | ||
1749 | * Owner node is locked. | ||
1750 | */ | ||
1751 | static void tipc_link_dup_rcv(struct tipc_link *l_ptr, | ||
1752 | struct sk_buff *t_buf) | ||
1753 | { | ||
1754 | struct sk_buff *buf; | ||
1755 | |||
1756 | if (!tipc_link_is_up(l_ptr)) | ||
1757 | return; | ||
1758 | |||
1759 | buf = buf_extract(t_buf, INT_H_SIZE); | ||
1760 | if (buf == NULL) { | ||
1761 | pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); | ||
1762 | return; | 1676 | return; |
1763 | } | 1677 | queue = &link->backlogq; |
1764 | 1678 | goto tunnel_queue; | |
1765 | /* Add buffer to deferred queue, if applicable: */ | ||
1766 | link_handle_out_of_seq_msg(l_ptr, buf); | ||
1767 | } | 1679 | } |
1768 | 1680 | ||
1769 | /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet | 1681 | /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet |
1770 | * Owner node is locked. | 1682 | * Owner node is locked. |
1771 | */ | 1683 | */ |
1772 | static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, | 1684 | static bool tipc_link_failover_rcv(struct tipc_link *link, |
1773 | struct sk_buff *t_buf) | 1685 | struct sk_buff **skb) |
1774 | { | 1686 | { |
1775 | struct tipc_msg *t_msg = buf_msg(t_buf); | 1687 | struct tipc_msg *msg = buf_msg(*skb); |
1776 | struct sk_buff *buf = NULL; | 1688 | struct sk_buff *iskb = NULL; |
1777 | struct tipc_msg *msg; | 1689 | struct tipc_link *pl = NULL; |
1690 | int bearer_id = msg_bearer_id(msg); | ||
1691 | int pos = 0; | ||
1778 | 1692 | ||
1779 | if (tipc_link_is_up(l_ptr)) | 1693 | if (msg_type(msg) != FAILOVER_MSG) { |
1780 | tipc_link_reset(l_ptr); | 1694 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); |
1781 | 1695 | goto exit; | |
1782 | /* First failover packet? */ | ||
1783 | if (l_ptr->exp_msg_count == START_CHANGEOVER) | ||
1784 | l_ptr->exp_msg_count = msg_msgcnt(t_msg); | ||
1785 | |||
1786 | /* Should there be an inner packet? */ | ||
1787 | if (l_ptr->exp_msg_count) { | ||
1788 | l_ptr->exp_msg_count--; | ||
1789 | buf = buf_extract(t_buf, INT_H_SIZE); | ||
1790 | if (buf == NULL) { | ||
1791 | pr_warn("%sno inner failover pkt\n", link_co_err); | ||
1792 | goto exit; | ||
1793 | } | ||
1794 | msg = buf_msg(buf); | ||
1795 | |||
1796 | if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) { | ||
1797 | kfree_skb(buf); | ||
1798 | buf = NULL; | ||
1799 | goto exit; | ||
1800 | } | ||
1801 | if (msg_user(msg) == MSG_FRAGMENTER) { | ||
1802 | l_ptr->stats.recv_fragments++; | ||
1803 | tipc_buf_append(&l_ptr->reasm_buf, &buf); | ||
1804 | } | ||
1805 | } | ||
1806 | exit: | ||
1807 | if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) { | ||
1808 | tipc_node_detach_link(l_ptr->owner, l_ptr); | ||
1809 | kfree(l_ptr); | ||
1810 | } | 1696 | } |
1811 | return buf; | 1697 | if (bearer_id >= MAX_BEARERS) |
1812 | } | 1698 | goto exit; |
1813 | 1699 | ||
1814 | /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent | 1700 | if (bearer_id == link->bearer_id) |
1815 | * via other link as result of a failover (ORIGINAL_MSG) or | 1701 | goto exit; |
1816 | * a new active link (DUPLICATE_MSG). Failover packets are | ||
1817 | * returned to the active link for delivery upwards. | ||
1818 | * Owner node is locked. | ||
1819 | */ | ||
1820 | static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, | ||
1821 | struct sk_buff **buf) | ||
1822 | { | ||
1823 | struct sk_buff *t_buf = *buf; | ||
1824 | struct tipc_link *l_ptr; | ||
1825 | struct tipc_msg *t_msg = buf_msg(t_buf); | ||
1826 | u32 bearer_id = msg_bearer_id(t_msg); | ||
1827 | 1702 | ||
1828 | *buf = NULL; | 1703 | pl = link->owner->links[bearer_id]; |
1704 | if (pl && tipc_link_is_up(pl)) | ||
1705 | tipc_link_reset(pl); | ||
1829 | 1706 | ||
1830 | if (bearer_id >= MAX_BEARERS) | 1707 | if (link->failover_pkts == FIRST_FAILOVER) |
1708 | link->failover_pkts = msg_msgcnt(msg); | ||
1709 | |||
1710 | /* Should we expect an inner packet? */ | ||
1711 | if (!link->failover_pkts) | ||
1831 | goto exit; | 1712 | goto exit; |
1832 | 1713 | ||
1833 | l_ptr = n_ptr->links[bearer_id]; | 1714 | if (!tipc_msg_extract(*skb, &iskb, &pos)) { |
1834 | if (!l_ptr) | 1715 | pr_warn("%sno inner failover pkt\n", link_co_err); |
1716 | *skb = NULL; | ||
1835 | goto exit; | 1717 | goto exit; |
1718 | } | ||
1719 | link->failover_pkts--; | ||
1720 | *skb = NULL; | ||
1836 | 1721 | ||
1837 | if (msg_type(t_msg) == DUPLICATE_MSG) | 1722 | /* Was this packet already delivered? */ |
1838 | tipc_link_dup_rcv(l_ptr, t_buf); | 1723 | if (less(buf_seqno(iskb), link->failover_checkpt)) { |
1839 | else if (msg_type(t_msg) == ORIGINAL_MSG) | 1724 | kfree_skb(iskb); |
1840 | *buf = tipc_link_failover_rcv(l_ptr, t_buf); | 1725 | iskb = NULL; |
1841 | else | 1726 | goto exit; |
1842 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); | 1727 | } |
1728 | if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) { | ||
1729 | link->stats.recv_fragments++; | ||
1730 | tipc_buf_append(&link->failover_skb, &iskb); | ||
1731 | } | ||
1843 | exit: | 1732 | exit: |
1844 | kfree_skb(t_buf); | 1733 | if (!link->failover_pkts && pl) |
1845 | return *buf != NULL; | 1734 | pl->flags &= ~LINK_FAILINGOVER; |
1735 | kfree_skb(*skb); | ||
1736 | *skb = iskb; | ||
1737 | return *skb; | ||
1846 | } | 1738 | } |
1847 | 1739 | ||
1848 | /* | 1740 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) |
1849 | * Bundler functionality: | ||
1850 | */ | ||
1851 | void tipc_link_bundle_rcv(struct sk_buff *buf) | ||
1852 | { | 1741 | { |
1853 | u32 msgcount = msg_msgcnt(buf_msg(buf)); | 1742 | unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; |
1854 | u32 pos = INT_H_SIZE; | ||
1855 | struct sk_buff *obuf; | ||
1856 | struct tipc_msg *omsg; | ||
1857 | |||
1858 | while (msgcount--) { | ||
1859 | obuf = buf_extract(buf, pos); | ||
1860 | if (obuf == NULL) { | ||
1861 | pr_warn("Link unable to unbundle message(s)\n"); | ||
1862 | break; | ||
1863 | } | ||
1864 | omsg = buf_msg(obuf); | ||
1865 | pos += align(msg_size(omsg)); | ||
1866 | if (msg_isdata(omsg)) { | ||
1867 | if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG)) | ||
1868 | tipc_sk_mcast_rcv(obuf); | ||
1869 | else | ||
1870 | tipc_sk_rcv(obuf); | ||
1871 | } else if (msg_user(omsg) == CONN_MANAGER) { | ||
1872 | tipc_sk_rcv(obuf); | ||
1873 | } else if (msg_user(omsg) == NAME_DISTRIBUTOR) { | ||
1874 | tipc_named_rcv(obuf); | ||
1875 | } else { | ||
1876 | pr_warn("Illegal bundled msg: %u\n", msg_user(omsg)); | ||
1877 | kfree_skb(obuf); | ||
1878 | } | ||
1879 | } | ||
1880 | kfree_skb(buf); | ||
1881 | } | ||
1882 | 1743 | ||
1883 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) | 1744 | if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) |
1884 | { | ||
1885 | if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) | ||
1886 | return; | 1745 | return; |
1887 | 1746 | ||
1888 | l_ptr->tolerance = tolerance; | 1747 | l_ptr->tolerance = tol; |
1889 | l_ptr->continuity_interval = | 1748 | l_ptr->cont_intv = msecs_to_jiffies(intv); |
1890 | ((tolerance / 4) > 500) ? 500 : tolerance / 4; | 1749 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); |
1891 | l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); | ||
1892 | } | 1750 | } |
1893 | 1751 | ||
1894 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) | 1752 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) |
1895 | { | 1753 | { |
1896 | /* Data messages from this node, inclusive FIRST_FRAGM */ | 1754 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); |
1897 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; | 1755 | |
1898 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; | 1756 | l->window = win; |
1899 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; | 1757 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; |
1900 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; | 1758 | l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; |
1901 | /* Transiting data messages,inclusive FIRST_FRAGM */ | 1759 | l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; |
1902 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; | 1760 | l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; |
1903 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; | 1761 | l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; |
1904 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; | ||
1905 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; | ||
1906 | l_ptr->queue_limit[CONN_MANAGER] = 1200; | ||
1907 | l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; | ||
1908 | l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; | ||
1909 | /* FRAGMENT and LAST_FRAGMENT packets */ | ||
1910 | l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; | ||
1911 | } | 1762 | } |
1912 | 1763 | ||
1913 | /* tipc_link_find_owner - locate owner node of link by link's name | 1764 | /* tipc_link_find_owner - locate owner node of link by link's name |
1765 | * @net: the applicable net namespace | ||
1914 | * @name: pointer to link name string | 1766 | * @name: pointer to link name string |
1915 | * @bearer_id: pointer to index in 'node->links' array where the link was found. | 1767 | * @bearer_id: pointer to index in 'node->links' array where the link was found. |
1916 | * | 1768 | * |
1917 | * Returns pointer to node owning the link, or 0 if no matching link is found. | 1769 | * Returns pointer to node owning the link, or 0 if no matching link is found. |
1918 | */ | 1770 | */ |
1919 | static struct tipc_node *tipc_link_find_owner(const char *link_name, | 1771 | static struct tipc_node *tipc_link_find_owner(struct net *net, |
1772 | const char *link_name, | ||
1920 | unsigned int *bearer_id) | 1773 | unsigned int *bearer_id) |
1921 | { | 1774 | { |
1775 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
1922 | struct tipc_link *l_ptr; | 1776 | struct tipc_link *l_ptr; |
1923 | struct tipc_node *n_ptr; | 1777 | struct tipc_node *n_ptr; |
1924 | struct tipc_node *found_node = 0; | 1778 | struct tipc_node *found_node = NULL; |
1925 | int i; | 1779 | int i; |
1926 | 1780 | ||
1927 | *bearer_id = 0; | 1781 | *bearer_id = 0; |
1928 | rcu_read_lock(); | 1782 | rcu_read_lock(); |
1929 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | 1783 | list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { |
1930 | tipc_node_lock(n_ptr); | 1784 | tipc_node_lock(n_ptr); |
1931 | for (i = 0; i < MAX_BEARERS; i++) { | 1785 | for (i = 0; i < MAX_BEARERS; i++) { |
1932 | l_ptr = n_ptr->links[i]; | 1786 | l_ptr = n_ptr->links[i]; |
@@ -1946,148 +1800,6 @@ static struct tipc_node *tipc_link_find_owner(const char *link_name, | |||
1946 | } | 1800 | } |
1947 | 1801 | ||
1948 | /** | 1802 | /** |
1949 | * link_value_is_valid -- validate proposed link tolerance/priority/window | ||
1950 | * | ||
1951 | * @cmd: value type (TIPC_CMD_SET_LINK_*) | ||
1952 | * @new_value: the new value | ||
1953 | * | ||
1954 | * Returns 1 if value is within range, 0 if not. | ||
1955 | */ | ||
1956 | static int link_value_is_valid(u16 cmd, u32 new_value) | ||
1957 | { | ||
1958 | switch (cmd) { | ||
1959 | case TIPC_CMD_SET_LINK_TOL: | ||
1960 | return (new_value >= TIPC_MIN_LINK_TOL) && | ||
1961 | (new_value <= TIPC_MAX_LINK_TOL); | ||
1962 | case TIPC_CMD_SET_LINK_PRI: | ||
1963 | return (new_value <= TIPC_MAX_LINK_PRI); | ||
1964 | case TIPC_CMD_SET_LINK_WINDOW: | ||
1965 | return (new_value >= TIPC_MIN_LINK_WIN) && | ||
1966 | (new_value <= TIPC_MAX_LINK_WIN); | ||
1967 | } | ||
1968 | return 0; | ||
1969 | } | ||
1970 | |||
1971 | /** | ||
1972 | * link_cmd_set_value - change priority/tolerance/window for link/bearer/media | ||
1973 | * @name: ptr to link, bearer, or media name | ||
1974 | * @new_value: new value of link, bearer, or media setting | ||
1975 | * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) | ||
1976 | * | ||
1977 | * Caller must hold RTNL lock to ensure link/bearer/media is not deleted. | ||
1978 | * | ||
1979 | * Returns 0 if value updated and negative value on error. | ||
1980 | */ | ||
1981 | static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) | ||
1982 | { | ||
1983 | struct tipc_node *node; | ||
1984 | struct tipc_link *l_ptr; | ||
1985 | struct tipc_bearer *b_ptr; | ||
1986 | struct tipc_media *m_ptr; | ||
1987 | int bearer_id; | ||
1988 | int res = 0; | ||
1989 | |||
1990 | node = tipc_link_find_owner(name, &bearer_id); | ||
1991 | if (node) { | ||
1992 | tipc_node_lock(node); | ||
1993 | l_ptr = node->links[bearer_id]; | ||
1994 | |||
1995 | if (l_ptr) { | ||
1996 | switch (cmd) { | ||
1997 | case TIPC_CMD_SET_LINK_TOL: | ||
1998 | link_set_supervision_props(l_ptr, new_value); | ||
1999 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, | ||
2000 | new_value, 0, 0); | ||
2001 | break; | ||
2002 | case TIPC_CMD_SET_LINK_PRI: | ||
2003 | l_ptr->priority = new_value; | ||
2004 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, | ||
2005 | 0, new_value, 0); | ||
2006 | break; | ||
2007 | case TIPC_CMD_SET_LINK_WINDOW: | ||
2008 | tipc_link_set_queue_limits(l_ptr, new_value); | ||
2009 | break; | ||
2010 | default: | ||
2011 | res = -EINVAL; | ||
2012 | break; | ||
2013 | } | ||
2014 | } | ||
2015 | tipc_node_unlock(node); | ||
2016 | return res; | ||
2017 | } | ||
2018 | |||
2019 | b_ptr = tipc_bearer_find(name); | ||
2020 | if (b_ptr) { | ||
2021 | switch (cmd) { | ||
2022 | case TIPC_CMD_SET_LINK_TOL: | ||
2023 | b_ptr->tolerance = new_value; | ||
2024 | break; | ||
2025 | case TIPC_CMD_SET_LINK_PRI: | ||
2026 | b_ptr->priority = new_value; | ||
2027 | break; | ||
2028 | case TIPC_CMD_SET_LINK_WINDOW: | ||
2029 | b_ptr->window = new_value; | ||
2030 | break; | ||
2031 | default: | ||
2032 | res = -EINVAL; | ||
2033 | break; | ||
2034 | } | ||
2035 | return res; | ||
2036 | } | ||
2037 | |||
2038 | m_ptr = tipc_media_find(name); | ||
2039 | if (!m_ptr) | ||
2040 | return -ENODEV; | ||
2041 | switch (cmd) { | ||
2042 | case TIPC_CMD_SET_LINK_TOL: | ||
2043 | m_ptr->tolerance = new_value; | ||
2044 | break; | ||
2045 | case TIPC_CMD_SET_LINK_PRI: | ||
2046 | m_ptr->priority = new_value; | ||
2047 | break; | ||
2048 | case TIPC_CMD_SET_LINK_WINDOW: | ||
2049 | m_ptr->window = new_value; | ||
2050 | break; | ||
2051 | default: | ||
2052 | res = -EINVAL; | ||
2053 | break; | ||
2054 | } | ||
2055 | return res; | ||
2056 | } | ||
2057 | |||
2058 | struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, | ||
2059 | u16 cmd) | ||
2060 | { | ||
2061 | struct tipc_link_config *args; | ||
2062 | u32 new_value; | ||
2063 | int res; | ||
2064 | |||
2065 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) | ||
2066 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
2067 | |||
2068 | args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); | ||
2069 | new_value = ntohl(args->value); | ||
2070 | |||
2071 | if (!link_value_is_valid(cmd, new_value)) | ||
2072 | return tipc_cfg_reply_error_string( | ||
2073 | "cannot change, value invalid"); | ||
2074 | |||
2075 | if (!strcmp(args->name, tipc_bclink_name)) { | ||
2076 | if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && | ||
2077 | (tipc_bclink_set_queue_limits(new_value) == 0)) | ||
2078 | return tipc_cfg_reply_none(); | ||
2079 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
2080 | " (cannot change setting on broadcast link)"); | ||
2081 | } | ||
2082 | |||
2083 | res = link_cmd_set_value(args->name, new_value, cmd); | ||
2084 | if (res) | ||
2085 | return tipc_cfg_reply_error_string("cannot change link setting"); | ||
2086 | |||
2087 | return tipc_cfg_reply_none(); | ||
2088 | } | ||
2089 | |||
2090 | /** | ||
2091 | * link_reset_statistics - reset link statistics | 1803 | * link_reset_statistics - reset link statistics |
2092 | * @l_ptr: pointer to link | 1804 | * @l_ptr: pointer to link |
2093 | */ | 1805 | */ |
@@ -2098,207 +1810,13 @@ static void link_reset_statistics(struct tipc_link *l_ptr) | |||
2098 | l_ptr->stats.recv_info = l_ptr->next_in_no; | 1810 | l_ptr->stats.recv_info = l_ptr->next_in_no; |
2099 | } | 1811 | } |
2100 | 1812 | ||
2101 | struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) | ||
2102 | { | ||
2103 | char *link_name; | ||
2104 | struct tipc_link *l_ptr; | ||
2105 | struct tipc_node *node; | ||
2106 | unsigned int bearer_id; | ||
2107 | |||
2108 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) | ||
2109 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
2110 | |||
2111 | link_name = (char *)TLV_DATA(req_tlv_area); | ||
2112 | if (!strcmp(link_name, tipc_bclink_name)) { | ||
2113 | if (tipc_bclink_reset_stats()) | ||
2114 | return tipc_cfg_reply_error_string("link not found"); | ||
2115 | return tipc_cfg_reply_none(); | ||
2116 | } | ||
2117 | node = tipc_link_find_owner(link_name, &bearer_id); | ||
2118 | if (!node) | ||
2119 | return tipc_cfg_reply_error_string("link not found"); | ||
2120 | |||
2121 | tipc_node_lock(node); | ||
2122 | l_ptr = node->links[bearer_id]; | ||
2123 | if (!l_ptr) { | ||
2124 | tipc_node_unlock(node); | ||
2125 | return tipc_cfg_reply_error_string("link not found"); | ||
2126 | } | ||
2127 | link_reset_statistics(l_ptr); | ||
2128 | tipc_node_unlock(node); | ||
2129 | return tipc_cfg_reply_none(); | ||
2130 | } | ||
2131 | |||
2132 | /** | ||
2133 | * percent - convert count to a percentage of total (rounding up or down) | ||
2134 | */ | ||
2135 | static u32 percent(u32 count, u32 total) | ||
2136 | { | ||
2137 | return (count * 100 + (total / 2)) / total; | ||
2138 | } | ||
2139 | |||
2140 | /** | ||
2141 | * tipc_link_stats - print link statistics | ||
2142 | * @name: link name | ||
2143 | * @buf: print buffer area | ||
2144 | * @buf_size: size of print buffer area | ||
2145 | * | ||
2146 | * Returns length of print buffer data string (or 0 if error) | ||
2147 | */ | ||
2148 | static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) | ||
2149 | { | ||
2150 | struct tipc_link *l; | ||
2151 | struct tipc_stats *s; | ||
2152 | struct tipc_node *node; | ||
2153 | char *status; | ||
2154 | u32 profile_total = 0; | ||
2155 | unsigned int bearer_id; | ||
2156 | int ret; | ||
2157 | |||
2158 | if (!strcmp(name, tipc_bclink_name)) | ||
2159 | return tipc_bclink_stats(buf, buf_size); | ||
2160 | |||
2161 | node = tipc_link_find_owner(name, &bearer_id); | ||
2162 | if (!node) | ||
2163 | return 0; | ||
2164 | |||
2165 | tipc_node_lock(node); | ||
2166 | |||
2167 | l = node->links[bearer_id]; | ||
2168 | if (!l) { | ||
2169 | tipc_node_unlock(node); | ||
2170 | return 0; | ||
2171 | } | ||
2172 | |||
2173 | s = &l->stats; | ||
2174 | |||
2175 | if (tipc_link_is_active(l)) | ||
2176 | status = "ACTIVE"; | ||
2177 | else if (tipc_link_is_up(l)) | ||
2178 | status = "STANDBY"; | ||
2179 | else | ||
2180 | status = "DEFUNCT"; | ||
2181 | |||
2182 | ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" | ||
2183 | " %s MTU:%u Priority:%u Tolerance:%u ms" | ||
2184 | " Window:%u packets\n", | ||
2185 | l->name, status, l->max_pkt, l->priority, | ||
2186 | l->tolerance, l->queue_limit[0]); | ||
2187 | |||
2188 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
2189 | " RX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
2190 | l->next_in_no - s->recv_info, s->recv_fragments, | ||
2191 | s->recv_fragmented, s->recv_bundles, | ||
2192 | s->recv_bundled); | ||
2193 | |||
2194 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
2195 | " TX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
2196 | l->next_out_no - s->sent_info, s->sent_fragments, | ||
2197 | s->sent_fragmented, s->sent_bundles, | ||
2198 | s->sent_bundled); | ||
2199 | |||
2200 | profile_total = s->msg_length_counts; | ||
2201 | if (!profile_total) | ||
2202 | profile_total = 1; | ||
2203 | |||
2204 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
2205 | " TX profile sample:%u packets average:%u octets\n" | ||
2206 | " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " | ||
2207 | "-16384:%u%% -32768:%u%% -66000:%u%%\n", | ||
2208 | s->msg_length_counts, | ||
2209 | s->msg_lengths_total / profile_total, | ||
2210 | percent(s->msg_length_profile[0], profile_total), | ||
2211 | percent(s->msg_length_profile[1], profile_total), | ||
2212 | percent(s->msg_length_profile[2], profile_total), | ||
2213 | percent(s->msg_length_profile[3], profile_total), | ||
2214 | percent(s->msg_length_profile[4], profile_total), | ||
2215 | percent(s->msg_length_profile[5], profile_total), | ||
2216 | percent(s->msg_length_profile[6], profile_total)); | ||
2217 | |||
2218 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
2219 | " RX states:%u probes:%u naks:%u defs:%u" | ||
2220 | " dups:%u\n", s->recv_states, s->recv_probes, | ||
2221 | s->recv_nacks, s->deferred_recv, s->duplicates); | ||
2222 | |||
2223 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
2224 | " TX states:%u probes:%u naks:%u acks:%u" | ||
2225 | " dups:%u\n", s->sent_states, s->sent_probes, | ||
2226 | s->sent_nacks, s->sent_acks, s->retransmitted); | ||
2227 | |||
2228 | ret += tipc_snprintf(buf + ret, buf_size - ret, | ||
2229 | " Congestion link:%u Send queue" | ||
2230 | " max:%u avg:%u\n", s->link_congs, | ||
2231 | s->max_queue_sz, s->queue_sz_counts ? | ||
2232 | (s->accu_queue_sz / s->queue_sz_counts) : 0); | ||
2233 | |||
2234 | tipc_node_unlock(node); | ||
2235 | return ret; | ||
2236 | } | ||
2237 | |||
2238 | struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) | ||
2239 | { | ||
2240 | struct sk_buff *buf; | ||
2241 | struct tlv_desc *rep_tlv; | ||
2242 | int str_len; | ||
2243 | int pb_len; | ||
2244 | char *pb; | ||
2245 | |||
2246 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) | ||
2247 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
2248 | |||
2249 | buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); | ||
2250 | if (!buf) | ||
2251 | return NULL; | ||
2252 | |||
2253 | rep_tlv = (struct tlv_desc *)buf->data; | ||
2254 | pb = TLV_DATA(rep_tlv); | ||
2255 | pb_len = ULTRA_STRING_MAX_LEN; | ||
2256 | str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), | ||
2257 | pb, pb_len); | ||
2258 | if (!str_len) { | ||
2259 | kfree_skb(buf); | ||
2260 | return tipc_cfg_reply_error_string("link not found"); | ||
2261 | } | ||
2262 | str_len += 1; /* for "\0" */ | ||
2263 | skb_put(buf, TLV_SPACE(str_len)); | ||
2264 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | ||
2265 | |||
2266 | return buf; | ||
2267 | } | ||
2268 | |||
2269 | /** | ||
2270 | * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination | ||
2271 | * @dest: network address of destination node | ||
2272 | * @selector: used to select from set of active links | ||
2273 | * | ||
2274 | * If no active link can be found, uses default maximum packet size. | ||
2275 | */ | ||
2276 | u32 tipc_link_get_max_pkt(u32 dest, u32 selector) | ||
2277 | { | ||
2278 | struct tipc_node *n_ptr; | ||
2279 | struct tipc_link *l_ptr; | ||
2280 | u32 res = MAX_PKT_DEFAULT; | ||
2281 | |||
2282 | if (dest == tipc_own_addr) | ||
2283 | return MAX_MSG_SIZE; | ||
2284 | |||
2285 | n_ptr = tipc_node_find(dest); | ||
2286 | if (n_ptr) { | ||
2287 | tipc_node_lock(n_ptr); | ||
2288 | l_ptr = n_ptr->active_links[selector & 1]; | ||
2289 | if (l_ptr) | ||
2290 | res = l_ptr->max_pkt; | ||
2291 | tipc_node_unlock(n_ptr); | ||
2292 | } | ||
2293 | return res; | ||
2294 | } | ||
2295 | |||
2296 | static void link_print(struct tipc_link *l_ptr, const char *str) | 1813 | static void link_print(struct tipc_link *l_ptr, const char *str) |
2297 | { | 1814 | { |
1815 | struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id); | ||
2298 | struct tipc_bearer *b_ptr; | 1816 | struct tipc_bearer *b_ptr; |
2299 | 1817 | ||
2300 | rcu_read_lock(); | 1818 | rcu_read_lock(); |
2301 | b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]); | 1819 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]); |
2302 | if (b_ptr) | 1820 | if (b_ptr) |
2303 | pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name); | 1821 | pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name); |
2304 | rcu_read_unlock(); | 1822 | rcu_read_unlock(); |
@@ -2362,6 +1880,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
2362 | struct tipc_link *link; | 1880 | struct tipc_link *link; |
2363 | struct tipc_node *node; | 1881 | struct tipc_node *node; |
2364 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; | 1882 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; |
1883 | struct net *net = sock_net(skb->sk); | ||
2365 | 1884 | ||
2366 | if (!info->attrs[TIPC_NLA_LINK]) | 1885 | if (!info->attrs[TIPC_NLA_LINK]) |
2367 | return -EINVAL; | 1886 | return -EINVAL; |
@@ -2377,7 +1896,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
2377 | 1896 | ||
2378 | name = nla_data(attrs[TIPC_NLA_LINK_NAME]); | 1897 | name = nla_data(attrs[TIPC_NLA_LINK_NAME]); |
2379 | 1898 | ||
2380 | node = tipc_link_find_owner(name, &bearer_id); | 1899 | node = tipc_link_find_owner(net, name, &bearer_id); |
2381 | if (!node) | 1900 | if (!node) |
2382 | return -EINVAL; | 1901 | return -EINVAL; |
2383 | 1902 | ||
@@ -2404,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
2404 | 1923 | ||
2405 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1924 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
2406 | link_set_supervision_props(link, tol); | 1925 | link_set_supervision_props(link, tol); |
2407 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); | 1926 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); |
2408 | } | 1927 | } |
2409 | if (props[TIPC_NLA_PROP_PRIO]) { | 1928 | if (props[TIPC_NLA_PROP_PRIO]) { |
2410 | u32 prio; | 1929 | u32 prio; |
2411 | 1930 | ||
2412 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 1931 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
2413 | link->priority = prio; | 1932 | link->priority = prio; |
2414 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); | 1933 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); |
2415 | } | 1934 | } |
2416 | if (props[TIPC_NLA_PROP_WIN]) { | 1935 | if (props[TIPC_NLA_PROP_WIN]) { |
2417 | u32 win; | 1936 | u32 win; |
@@ -2493,14 +2012,16 @@ msg_full: | |||
2493 | } | 2012 | } |
2494 | 2013 | ||
2495 | /* Caller should hold appropriate locks to protect the link */ | 2014 | /* Caller should hold appropriate locks to protect the link */ |
2496 | static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link) | 2015 | static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, |
2016 | struct tipc_link *link) | ||
2497 | { | 2017 | { |
2498 | int err; | 2018 | int err; |
2499 | void *hdr; | 2019 | void *hdr; |
2500 | struct nlattr *attrs; | 2020 | struct nlattr *attrs; |
2501 | struct nlattr *prop; | 2021 | struct nlattr *prop; |
2022 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
2502 | 2023 | ||
2503 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, | 2024 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
2504 | NLM_F_MULTI, TIPC_NL_LINK_GET); | 2025 | NLM_F_MULTI, TIPC_NL_LINK_GET); |
2505 | if (!hdr) | 2026 | if (!hdr) |
2506 | return -EMSGSIZE; | 2027 | return -EMSGSIZE; |
@@ -2512,9 +2033,9 @@ static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link) | |||
2512 | if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) | 2033 | if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) |
2513 | goto attr_msg_full; | 2034 | goto attr_msg_full; |
2514 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | 2035 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, |
2515 | tipc_cluster_mask(tipc_own_addr))) | 2036 | tipc_cluster_mask(tn->own_addr))) |
2516 | goto attr_msg_full; | 2037 | goto attr_msg_full; |
2517 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) | 2038 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) |
2518 | goto attr_msg_full; | 2039 | goto attr_msg_full; |
2519 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) | 2040 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) |
2520 | goto attr_msg_full; | 2041 | goto attr_msg_full; |
@@ -2536,7 +2057,7 @@ static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link) | |||
2536 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) | 2057 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) |
2537 | goto prop_msg_full; | 2058 | goto prop_msg_full; |
2538 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, | 2059 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, |
2539 | link->queue_limit[TIPC_LOW_IMPORTANCE])) | 2060 | link->window)) |
2540 | goto prop_msg_full; | 2061 | goto prop_msg_full; |
2541 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | 2062 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) |
2542 | goto prop_msg_full; | 2063 | goto prop_msg_full; |
@@ -2562,9 +2083,8 @@ msg_full: | |||
2562 | } | 2083 | } |
2563 | 2084 | ||
2564 | /* Caller should hold node lock */ | 2085 | /* Caller should hold node lock */ |
2565 | static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg, | 2086 | static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, |
2566 | struct tipc_node *node, | 2087 | struct tipc_node *node, u32 *prev_link) |
2567 | u32 *prev_link) | ||
2568 | { | 2088 | { |
2569 | u32 i; | 2089 | u32 i; |
2570 | int err; | 2090 | int err; |
@@ -2575,7 +2095,7 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg, | |||
2575 | if (!node->links[i]) | 2095 | if (!node->links[i]) |
2576 | continue; | 2096 | continue; |
2577 | 2097 | ||
2578 | err = __tipc_nl_add_link(msg, node->links[i]); | 2098 | err = __tipc_nl_add_link(net, msg, node->links[i]); |
2579 | if (err) | 2099 | if (err) |
2580 | return err; | 2100 | return err; |
2581 | } | 2101 | } |
@@ -2586,6 +2106,8 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg, | |||
2586 | 2106 | ||
2587 | int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | 2107 | int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) |
2588 | { | 2108 | { |
2109 | struct net *net = sock_net(skb->sk); | ||
2110 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
2589 | struct tipc_node *node; | 2111 | struct tipc_node *node; |
2590 | struct tipc_nl_msg msg; | 2112 | struct tipc_nl_msg msg; |
2591 | u32 prev_node = cb->args[0]; | 2113 | u32 prev_node = cb->args[0]; |
@@ -2601,9 +2123,8 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2601 | msg.seq = cb->nlh->nlmsg_seq; | 2123 | msg.seq = cb->nlh->nlmsg_seq; |
2602 | 2124 | ||
2603 | rcu_read_lock(); | 2125 | rcu_read_lock(); |
2604 | |||
2605 | if (prev_node) { | 2126 | if (prev_node) { |
2606 | node = tipc_node_find(prev_node); | 2127 | node = tipc_node_find(net, prev_node); |
2607 | if (!node) { | 2128 | if (!node) { |
2608 | /* We never set seq or call nl_dump_check_consistent() | 2129 | /* We never set seq or call nl_dump_check_consistent() |
2609 | * this means that setting prev_seq here will cause the | 2130 | * this means that setting prev_seq here will cause the |
@@ -2614,24 +2135,29 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2614 | cb->prev_seq = 1; | 2135 | cb->prev_seq = 1; |
2615 | goto out; | 2136 | goto out; |
2616 | } | 2137 | } |
2138 | tipc_node_put(node); | ||
2617 | 2139 | ||
2618 | list_for_each_entry_continue_rcu(node, &tipc_node_list, list) { | 2140 | list_for_each_entry_continue_rcu(node, &tn->node_list, |
2141 | list) { | ||
2619 | tipc_node_lock(node); | 2142 | tipc_node_lock(node); |
2620 | err = __tipc_nl_add_node_links(&msg, node, &prev_link); | 2143 | err = __tipc_nl_add_node_links(net, &msg, node, |
2144 | &prev_link); | ||
2621 | tipc_node_unlock(node); | 2145 | tipc_node_unlock(node); |
2146 | tipc_node_put(node); | ||
2622 | if (err) | 2147 | if (err) |
2623 | goto out; | 2148 | goto out; |
2624 | 2149 | ||
2625 | prev_node = node->addr; | 2150 | prev_node = node->addr; |
2626 | } | 2151 | } |
2627 | } else { | 2152 | } else { |
2628 | err = tipc_nl_add_bc_link(&msg); | 2153 | err = tipc_nl_add_bc_link(net, &msg); |
2629 | if (err) | 2154 | if (err) |
2630 | goto out; | 2155 | goto out; |
2631 | 2156 | ||
2632 | list_for_each_entry_rcu(node, &tipc_node_list, list) { | 2157 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
2633 | tipc_node_lock(node); | 2158 | tipc_node_lock(node); |
2634 | err = __tipc_nl_add_node_links(&msg, node, &prev_link); | 2159 | err = __tipc_nl_add_node_links(net, &msg, node, |
2160 | &prev_link); | ||
2635 | tipc_node_unlock(node); | 2161 | tipc_node_unlock(node); |
2636 | if (err) | 2162 | if (err) |
2637 | goto out; | 2163 | goto out; |
@@ -2652,6 +2178,7 @@ out: | |||
2652 | 2178 | ||
2653 | int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) | 2179 | int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) |
2654 | { | 2180 | { |
2181 | struct net *net = genl_info_net(info); | ||
2655 | struct sk_buff *ans_skb; | 2182 | struct sk_buff *ans_skb; |
2656 | struct tipc_nl_msg msg; | 2183 | struct tipc_nl_msg msg; |
2657 | struct tipc_link *link; | 2184 | struct tipc_link *link; |
@@ -2664,7 +2191,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) | |||
2664 | return -EINVAL; | 2191 | return -EINVAL; |
2665 | 2192 | ||
2666 | name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); | 2193 | name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); |
2667 | node = tipc_link_find_owner(name, &bearer_id); | 2194 | node = tipc_link_find_owner(net, name, &bearer_id); |
2668 | if (!node) | 2195 | if (!node) |
2669 | return -EINVAL; | 2196 | return -EINVAL; |
2670 | 2197 | ||
@@ -2683,7 +2210,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) | |||
2683 | goto err_out; | 2210 | goto err_out; |
2684 | } | 2211 | } |
2685 | 2212 | ||
2686 | err = __tipc_nl_add_link(&msg, link); | 2213 | err = __tipc_nl_add_link(net, &msg, link); |
2687 | if (err) | 2214 | if (err) |
2688 | goto err_out; | 2215 | goto err_out; |
2689 | 2216 | ||
@@ -2706,6 +2233,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) | |||
2706 | struct tipc_link *link; | 2233 | struct tipc_link *link; |
2707 | struct tipc_node *node; | 2234 | struct tipc_node *node; |
2708 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; | 2235 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; |
2236 | struct net *net = sock_net(skb->sk); | ||
2709 | 2237 | ||
2710 | if (!info->attrs[TIPC_NLA_LINK]) | 2238 | if (!info->attrs[TIPC_NLA_LINK]) |
2711 | return -EINVAL; | 2239 | return -EINVAL; |
@@ -2722,13 +2250,13 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) | |||
2722 | link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); | 2250 | link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); |
2723 | 2251 | ||
2724 | if (strcmp(link_name, tipc_bclink_name) == 0) { | 2252 | if (strcmp(link_name, tipc_bclink_name) == 0) { |
2725 | err = tipc_bclink_reset_stats(); | 2253 | err = tipc_bclink_reset_stats(net); |
2726 | if (err) | 2254 | if (err) |
2727 | return err; | 2255 | return err; |
2728 | return 0; | 2256 | return 0; |
2729 | } | 2257 | } |
2730 | 2258 | ||
2731 | node = tipc_link_find_owner(link_name, &bearer_id); | 2259 | node = tipc_link_find_owner(net, link_name, &bearer_id); |
2732 | if (!node) | 2260 | if (!node) |
2733 | return -EINVAL; | 2261 | return -EINVAL; |
2734 | 2262 | ||
diff --git a/net/tipc/link.h b/net/tipc/link.h index 55812e87ca1e..b5b4e3554d4e 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h | |||
@@ -41,6 +41,10 @@ | |||
41 | #include "msg.h" | 41 | #include "msg.h" |
42 | #include "node.h" | 42 | #include "node.h" |
43 | 43 | ||
44 | /* TIPC-specific error codes | ||
45 | */ | ||
46 | #define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ | ||
47 | |||
44 | /* Out-of-range value for link sequence numbers | 48 | /* Out-of-range value for link sequence numbers |
45 | */ | 49 | */ |
46 | #define INVALID_LINK_SEQ 0x10000 | 50 | #define INVALID_LINK_SEQ 0x10000 |
@@ -54,8 +58,10 @@ | |||
54 | 58 | ||
55 | /* Link endpoint execution states | 59 | /* Link endpoint execution states |
56 | */ | 60 | */ |
57 | #define LINK_STARTED 0x0001 | 61 | #define LINK_STARTED 0x0001 |
58 | #define LINK_STOPPED 0x0002 | 62 | #define LINK_STOPPED 0x0002 |
63 | #define LINK_SYNCHING 0x0004 | ||
64 | #define LINK_FAILINGOVER 0x0008 | ||
59 | 65 | ||
60 | /* Starting value for maximum packet size negotiation on unicast links | 66 | /* Starting value for maximum packet size negotiation on unicast links |
61 | * (unless bearer MTU is less) | 67 | * (unless bearer MTU is less) |
@@ -99,13 +105,14 @@ struct tipc_stats { | |||
99 | * @media_addr: media address to use when sending messages over link | 105 | * @media_addr: media address to use when sending messages over link |
100 | * @timer: link timer | 106 | * @timer: link timer |
101 | * @owner: pointer to peer node | 107 | * @owner: pointer to peer node |
108 | * @refcnt: reference counter for permanent references (owner node & timer) | ||
102 | * @flags: execution state flags for link endpoint instance | 109 | * @flags: execution state flags for link endpoint instance |
103 | * @checkpoint: reference point for triggering link continuity checking | 110 | * @checkpoint: reference point for triggering link continuity checking |
104 | * @peer_session: link session # being used by peer end of link | 111 | * @peer_session: link session # being used by peer end of link |
105 | * @peer_bearer_id: bearer id used by link's peer endpoint | 112 | * @peer_bearer_id: bearer id used by link's peer endpoint |
106 | * @bearer_id: local bearer id used by link | 113 | * @bearer_id: local bearer id used by link |
107 | * @tolerance: minimum link continuity loss needed to reset link [in ms] | 114 | * @tolerance: minimum link continuity loss needed to reset link [in ms] |
108 | * @continuity_interval: link continuity testing interval [in ms] | 115 | * @cont_intv: link continuity testing interval |
109 | * @abort_limit: # of unacknowledged continuity probes needed to reset link | 116 | * @abort_limit: # of unacknowledged continuity probes needed to reset link |
110 | * @state: current state of link FSM | 117 | * @state: current state of link FSM |
111 | * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state | 118 | * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state |
@@ -113,21 +120,23 @@ struct tipc_stats { | |||
113 | * @pmsg: convenience pointer to "proto_msg" field | 120 | * @pmsg: convenience pointer to "proto_msg" field |
114 | * @priority: current link priority | 121 | * @priority: current link priority |
115 | * @net_plane: current link network plane ('A' through 'H') | 122 | * @net_plane: current link network plane ('A' through 'H') |
116 | * @queue_limit: outbound message queue congestion thresholds (indexed by user) | 123 | * @backlog_limit: backlog queue congestion thresholds (indexed by importance) |
117 | * @exp_msg_count: # of tunnelled messages expected during link changeover | 124 | * @exp_msg_count: # of tunnelled messages expected during link changeover |
118 | * @reset_checkpoint: seq # of last acknowledged message at time of link reset | 125 | * @reset_checkpoint: seq # of last acknowledged message at time of link reset |
119 | * @max_pkt: current maximum packet size for this link | 126 | * @mtu: current maximum packet size for this link |
120 | * @max_pkt_target: desired maximum packet size for this link | 127 | * @advertised_mtu: advertised own mtu when link is being established |
121 | * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) | 128 | * @transmitq: queue for sent, non-acked messages |
122 | * @outqueue: outbound message queue | 129 | * @backlogq: queue for messages waiting to be sent |
123 | * @next_out_no: next sequence number to use for outbound messages | 130 | * @next_out_no: next sequence number to use for outbound messages |
124 | * @last_retransmitted: sequence number of most recently retransmitted message | 131 | * @last_retransmitted: sequence number of most recently retransmitted message |
125 | * @stale_count: # of identical retransmit requests made by peer | 132 | * @stale_count: # of identical retransmit requests made by peer |
126 | * @next_in_no: next sequence number to expect for inbound messages | 133 | * @next_in_no: next sequence number to expect for inbound messages |
127 | * @deferred_queue: deferred queue saved OOS b'cast message received from node | 134 | * @deferred_queue: deferred queue saved OOS b'cast message received from node |
128 | * @unacked_window: # of inbound messages rx'd without ack'ing back to peer | 135 | * @unacked_window: # of inbound messages rx'd without ack'ing back to peer |
136 | * @inputq: buffer queue for messages to be delivered upwards | ||
137 | * @namedq: buffer queue for name table messages to be delivered upwards | ||
129 | * @next_out: ptr to first unsent outbound message in queue | 138 | * @next_out: ptr to first unsent outbound message in queue |
130 | * @waiting_sks: linked list of sockets waiting for link congestion to abate | 139 | * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate |
131 | * @long_msg_seq_no: next identifier to use for outbound fragmented messages | 140 | * @long_msg_seq_no: next identifier to use for outbound fragmented messages |
132 | * @reasm_buf: head of partially reassembled inbound message fragments | 141 | * @reasm_buf: head of partially reassembled inbound message fragments |
133 | * @stats: collects statistics regarding link activity | 142 | * @stats: collects statistics regarding link activity |
@@ -138,6 +147,7 @@ struct tipc_link { | |||
138 | struct tipc_media_addr media_addr; | 147 | struct tipc_media_addr media_addr; |
139 | struct timer_list timer; | 148 | struct timer_list timer; |
140 | struct tipc_node *owner; | 149 | struct tipc_node *owner; |
150 | struct kref ref; | ||
141 | 151 | ||
142 | /* Management and link supervision data */ | 152 | /* Management and link supervision data */ |
143 | unsigned int flags; | 153 | unsigned int flags; |
@@ -146,7 +156,7 @@ struct tipc_link { | |||
146 | u32 peer_bearer_id; | 156 | u32 peer_bearer_id; |
147 | u32 bearer_id; | 157 | u32 bearer_id; |
148 | u32 tolerance; | 158 | u32 tolerance; |
149 | u32 continuity_interval; | 159 | unsigned long cont_intv; |
150 | u32 abort_limit; | 160 | u32 abort_limit; |
151 | int state; | 161 | int state; |
152 | u32 fsm_msg_cnt; | 162 | u32 fsm_msg_cnt; |
@@ -157,34 +167,40 @@ struct tipc_link { | |||
157 | struct tipc_msg *pmsg; | 167 | struct tipc_msg *pmsg; |
158 | u32 priority; | 168 | u32 priority; |
159 | char net_plane; | 169 | char net_plane; |
160 | u32 queue_limit[15]; /* queue_limit[0]==window limit */ | 170 | u16 synch_point; |
161 | 171 | ||
162 | /* Changeover */ | 172 | /* Failover */ |
163 | u32 exp_msg_count; | 173 | u16 failover_pkts; |
164 | u32 reset_checkpoint; | 174 | u16 failover_checkpt; |
175 | struct sk_buff *failover_skb; | ||
165 | 176 | ||
166 | /* Max packet negotiation */ | 177 | /* Max packet negotiation */ |
167 | u32 max_pkt; | 178 | u16 mtu; |
168 | u32 max_pkt_target; | 179 | u16 advertised_mtu; |
169 | u32 max_pkt_probes; | ||
170 | 180 | ||
171 | /* Sending */ | 181 | /* Sending */ |
172 | struct sk_buff_head outqueue; | 182 | struct sk_buff_head transmq; |
183 | struct sk_buff_head backlogq; | ||
184 | struct { | ||
185 | u16 len; | ||
186 | u16 limit; | ||
187 | } backlog[5]; | ||
173 | u32 next_out_no; | 188 | u32 next_out_no; |
189 | u32 window; | ||
174 | u32 last_retransmitted; | 190 | u32 last_retransmitted; |
175 | u32 stale_count; | 191 | u32 stale_count; |
176 | 192 | ||
177 | /* Reception */ | 193 | /* Reception */ |
178 | u32 next_in_no; | 194 | u32 next_in_no; |
179 | struct sk_buff_head deferred_queue; | 195 | u32 rcv_unacked; |
180 | u32 unacked_window; | 196 | struct sk_buff_head deferdq; |
197 | struct sk_buff_head inputq; | ||
198 | struct sk_buff_head namedq; | ||
181 | 199 | ||
182 | /* Congestion handling */ | 200 | /* Congestion handling */ |
183 | struct sk_buff *next_out; | 201 | struct sk_buff_head wakeupq; |
184 | struct sk_buff_head waiting_sks; | ||
185 | 202 | ||
186 | /* Fragmentation/reassembly */ | 203 | /* Fragmentation/reassembly */ |
187 | u32 long_msg_seq_no; | ||
188 | struct sk_buff *reasm_buf; | 204 | struct sk_buff *reasm_buf; |
189 | 205 | ||
190 | /* Statistics */ | 206 | /* Statistics */ |
@@ -196,30 +212,26 @@ struct tipc_port; | |||
196 | struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | 212 | struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, |
197 | struct tipc_bearer *b_ptr, | 213 | struct tipc_bearer *b_ptr, |
198 | const struct tipc_media_addr *media_addr); | 214 | const struct tipc_media_addr *media_addr); |
199 | void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down); | 215 | void tipc_link_delete(struct tipc_link *link); |
216 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | ||
217 | bool shutting_down); | ||
200 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr); | 218 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr); |
201 | void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest); | 219 | void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest); |
202 | void tipc_link_reset_fragments(struct tipc_link *l_ptr); | 220 | void tipc_link_reset_fragments(struct tipc_link *l_ptr); |
203 | int tipc_link_is_up(struct tipc_link *l_ptr); | 221 | int tipc_link_is_up(struct tipc_link *l_ptr); |
204 | int tipc_link_is_active(struct tipc_link *l_ptr); | 222 | int tipc_link_is_active(struct tipc_link *l_ptr); |
205 | void tipc_link_purge_queues(struct tipc_link *l_ptr); | 223 | void tipc_link_purge_queues(struct tipc_link *l_ptr); |
206 | struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, | ||
207 | int req_tlv_space, | ||
208 | u16 cmd); | ||
209 | struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, | ||
210 | int req_tlv_space); | ||
211 | struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, | ||
212 | int req_tlv_space); | ||
213 | void tipc_link_reset_all(struct tipc_node *node); | 224 | void tipc_link_reset_all(struct tipc_node *node); |
214 | void tipc_link_reset(struct tipc_link *l_ptr); | 225 | void tipc_link_reset(struct tipc_link *l_ptr); |
215 | void tipc_link_reset_list(unsigned int bearer_id); | 226 | void tipc_link_reset_list(struct net *net, unsigned int bearer_id); |
216 | int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector); | 227 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, |
217 | int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector); | 228 | u32 selector); |
218 | int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list); | 229 | int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest, |
219 | u32 tipc_link_get_max_pkt(u32 dest, u32 selector); | 230 | u32 selector); |
220 | void tipc_link_bundle_rcv(struct sk_buff *buf); | 231 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, |
232 | struct sk_buff_head *list); | ||
221 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, | 233 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, |
222 | u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); | 234 | u32 gap, u32 tolerance, u32 priority); |
223 | void tipc_link_push_packets(struct tipc_link *l_ptr); | 235 | void tipc_link_push_packets(struct tipc_link *l_ptr); |
224 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); | 236 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); |
225 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); | 237 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); |
@@ -233,6 +245,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info); | |||
233 | int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info); | 245 | int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info); |
234 | int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info); | 246 | int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info); |
235 | int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); | 247 | int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); |
248 | void link_prepare_wakeup(struct tipc_link *l); | ||
236 | 249 | ||
237 | /* | 250 | /* |
238 | * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) | 251 | * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) |
@@ -267,6 +280,10 @@ static inline u32 lesser(u32 left, u32 right) | |||
267 | return less_eq(left, right) ? left : right; | 280 | return less_eq(left, right) ? left : right; |
268 | } | 281 | } |
269 | 282 | ||
283 | static inline u32 link_own_addr(struct tipc_link *l) | ||
284 | { | ||
285 | return msg_prevnode(l->pmsg); | ||
286 | } | ||
270 | 287 | ||
271 | /* | 288 | /* |
272 | * Link status checking routines | 289 | * Link status checking routines |
@@ -291,9 +308,4 @@ static inline int link_reset_reset(struct tipc_link *l_ptr) | |||
291 | return l_ptr->state == RESET_RESET; | 308 | return l_ptr->state == RESET_RESET; |
292 | } | 309 | } |
293 | 310 | ||
294 | static inline int link_congested(struct tipc_link *l_ptr) | ||
295 | { | ||
296 | return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0]; | ||
297 | } | ||
298 | |||
299 | #endif | 311 | #endif |
diff --git a/net/tipc/log.c b/net/tipc/log.c deleted file mode 100644 index abef644f27d8..000000000000 --- a/net/tipc/log.c +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* | ||
2 | * net/tipc/log.c: TIPC print buffer routines for debugging | ||
3 | * | ||
4 | * Copyright (c) 1996-2006, Ericsson AB | ||
5 | * Copyright (c) 2005-2007, Wind River Systems | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Redistribution and use in source and binary forms, with or without | ||
9 | * modification, are permitted provided that the following conditions are met: | ||
10 | * | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer. | ||
13 | * 2. Redistributions in binary form must reproduce the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer in the | ||
15 | * documentation and/or other materials provided with the distribution. | ||
16 | * 3. Neither the names of the copyright holders nor the names of its | ||
17 | * contributors may be used to endorse or promote products derived from | ||
18 | * this software without specific prior written permission. | ||
19 | * | ||
20 | * Alternatively, this software may be distributed under the terms of the | ||
21 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
22 | * Software Foundation. | ||
23 | * | ||
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
34 | * POSSIBILITY OF SUCH DAMAGE. | ||
35 | */ | ||
36 | |||
37 | #include "core.h" | ||
38 | #include "config.h" | ||
39 | |||
40 | /** | ||
41 | * tipc_snprintf - append formatted output to print buffer | ||
42 | * @buf: pointer to print buffer | ||
43 | * @len: buffer length | ||
44 | * @fmt: formatted info to be printed | ||
45 | */ | ||
46 | int tipc_snprintf(char *buf, int len, const char *fmt, ...) | ||
47 | { | ||
48 | int i; | ||
49 | va_list args; | ||
50 | |||
51 | va_start(args, fmt); | ||
52 | i = vscnprintf(buf, len, fmt, args); | ||
53 | va_end(args); | ||
54 | return i; | ||
55 | } | ||
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index a687b30a699c..c3e96e815418 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/msg.c: TIPC message header routines | 2 | * net/tipc/msg.c: TIPC message header routines |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -34,6 +34,7 @@ | |||
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <net/sock.h> | ||
37 | #include "core.h" | 38 | #include "core.h" |
38 | #include "msg.h" | 39 | #include "msg.h" |
39 | #include "addr.h" | 40 | #include "addr.h" |
@@ -46,25 +47,48 @@ static unsigned int align(unsigned int i) | |||
46 | return (i + 3) & ~3u; | 47 | return (i + 3) & ~3u; |
47 | } | 48 | } |
48 | 49 | ||
49 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, | 50 | /** |
50 | u32 destnode) | 51 | * tipc_buf_acquire - creates a TIPC message buffer |
52 | * @size: message size (including TIPC header) | ||
53 | * | ||
54 | * Returns a new buffer with data pointers set to the specified size. | ||
55 | * | ||
56 | * NOTE: Headroom is reserved to allow prepending of a data link header. | ||
57 | * There may also be unrequested tailroom present at the buffer's end. | ||
58 | */ | ||
59 | struct sk_buff *tipc_buf_acquire(u32 size) | ||
60 | { | ||
61 | struct sk_buff *skb; | ||
62 | unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; | ||
63 | |||
64 | skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); | ||
65 | if (skb) { | ||
66 | skb_reserve(skb, BUF_HEADROOM); | ||
67 | skb_put(skb, size); | ||
68 | skb->next = NULL; | ||
69 | } | ||
70 | return skb; | ||
71 | } | ||
72 | |||
73 | void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, | ||
74 | u32 hsize, u32 dnode) | ||
51 | { | 75 | { |
52 | memset(m, 0, hsize); | 76 | memset(m, 0, hsize); |
53 | msg_set_version(m); | 77 | msg_set_version(m); |
54 | msg_set_user(m, user); | 78 | msg_set_user(m, user); |
55 | msg_set_hdr_sz(m, hsize); | 79 | msg_set_hdr_sz(m, hsize); |
56 | msg_set_size(m, hsize); | 80 | msg_set_size(m, hsize); |
57 | msg_set_prevnode(m, tipc_own_addr); | 81 | msg_set_prevnode(m, own_node); |
58 | msg_set_type(m, type); | 82 | msg_set_type(m, type); |
59 | if (hsize > SHORT_H_SIZE) { | 83 | if (hsize > SHORT_H_SIZE) { |
60 | msg_set_orignode(m, tipc_own_addr); | 84 | msg_set_orignode(m, own_node); |
61 | msg_set_destnode(m, destnode); | 85 | msg_set_destnode(m, dnode); |
62 | } | 86 | } |
63 | } | 87 | } |
64 | 88 | ||
65 | struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, | 89 | struct sk_buff *tipc_msg_create(uint user, uint type, |
66 | uint data_sz, u32 dnode, u32 onode, | 90 | uint hdr_sz, uint data_sz, u32 dnode, |
67 | u32 dport, u32 oport, int errcode) | 91 | u32 onode, u32 dport, u32 oport, int errcode) |
68 | { | 92 | { |
69 | struct tipc_msg *msg; | 93 | struct tipc_msg *msg; |
70 | struct sk_buff *buf; | 94 | struct sk_buff *buf; |
@@ -74,9 +98,8 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, | |||
74 | return NULL; | 98 | return NULL; |
75 | 99 | ||
76 | msg = buf_msg(buf); | 100 | msg = buf_msg(buf); |
77 | tipc_msg_init(msg, user, type, hdr_sz, dnode); | 101 | tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); |
78 | msg_set_size(msg, hdr_sz + data_sz); | 102 | msg_set_size(msg, hdr_sz + data_sz); |
79 | msg_set_prevnode(msg, onode); | ||
80 | msg_set_origport(msg, oport); | 103 | msg_set_origport(msg, oport); |
81 | msg_set_destport(msg, dport); | 104 | msg_set_destport(msg, dport); |
82 | msg_set_errcode(msg, errcode); | 105 | msg_set_errcode(msg, errcode); |
@@ -142,6 +165,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
142 | } | 165 | } |
143 | 166 | ||
144 | if (fragid == LAST_FRAGMENT) { | 167 | if (fragid == LAST_FRAGMENT) { |
168 | TIPC_SKB_CB(head)->validated = false; | ||
169 | if (unlikely(!tipc_msg_validate(head))) | ||
170 | goto err; | ||
145 | *buf = head; | 171 | *buf = head; |
146 | TIPC_SKB_CB(head)->tail = NULL; | 172 | TIPC_SKB_CB(head)->tail = NULL; |
147 | *headbuf = NULL; | 173 | *headbuf = NULL; |
@@ -149,7 +175,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
149 | } | 175 | } |
150 | *buf = NULL; | 176 | *buf = NULL; |
151 | return 0; | 177 | return 0; |
152 | |||
153 | err: | 178 | err: |
154 | pr_warn_ratelimited("Unable to build fragment list\n"); | 179 | pr_warn_ratelimited("Unable to build fragment list\n"); |
155 | kfree_skb(*buf); | 180 | kfree_skb(*buf); |
@@ -158,20 +183,61 @@ err: | |||
158 | return 0; | 183 | return 0; |
159 | } | 184 | } |
160 | 185 | ||
186 | /* tipc_msg_validate - validate basic format of received message | ||
187 | * | ||
188 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
189 | * as much data as the header indicates it should. The routine also ensures | ||
190 | * that the entire message header is stored in the main fragment of the message | ||
191 | * buffer, to simplify future access to message header fields. | ||
192 | * | ||
193 | * Note: Having extra info present in the message header or data areas is OK. | ||
194 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
195 | * introduced by a later release of the protocol. | ||
196 | */ | ||
197 | bool tipc_msg_validate(struct sk_buff *skb) | ||
198 | { | ||
199 | struct tipc_msg *msg; | ||
200 | int msz, hsz; | ||
201 | |||
202 | if (unlikely(TIPC_SKB_CB(skb)->validated)) | ||
203 | return true; | ||
204 | if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) | ||
205 | return false; | ||
206 | |||
207 | hsz = msg_hdr_sz(buf_msg(skb)); | ||
208 | if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) | ||
209 | return false; | ||
210 | if (unlikely(!pskb_may_pull(skb, hsz))) | ||
211 | return false; | ||
212 | |||
213 | msg = buf_msg(skb); | ||
214 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | ||
215 | return false; | ||
216 | |||
217 | msz = msg_size(msg); | ||
218 | if (unlikely(msz < hsz)) | ||
219 | return false; | ||
220 | if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) | ||
221 | return false; | ||
222 | if (unlikely(skb->len < msz)) | ||
223 | return false; | ||
224 | |||
225 | TIPC_SKB_CB(skb)->validated = true; | ||
226 | return true; | ||
227 | } | ||
161 | 228 | ||
162 | /** | 229 | /** |
163 | * tipc_msg_build - create buffer chain containing specified header and data | 230 | * tipc_msg_build - create buffer chain containing specified header and data |
164 | * @mhdr: Message header, to be prepended to data | 231 | * @mhdr: Message header, to be prepended to data |
165 | * @m: User message | 232 | * @m: User message |
166 | * @offset: Posision in iov to start copying from | ||
167 | * @dsz: Total length of user data | 233 | * @dsz: Total length of user data |
168 | * @pktmax: Max packet size that can be used | 234 | * @pktmax: Max packet size that can be used |
169 | * @list: Buffer or chain of buffers to be returned to caller | 235 | * @list: Buffer or chain of buffers to be returned to caller |
170 | * | 236 | * |
171 | * Returns message data size or errno: -ENOMEM, -EFAULT | 237 | * Returns message data size or errno: -ENOMEM, -EFAULT |
172 | */ | 238 | */ |
173 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, | 239 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, |
174 | int dsz, int pktmax, struct sk_buff_head *list) | 240 | int offset, int dsz, int pktmax, struct sk_buff_head *list) |
175 | { | 241 | { |
176 | int mhsz = msg_hdr_sz(mhdr); | 242 | int mhsz = msg_hdr_sz(mhdr); |
177 | int msz = mhsz + dsz; | 243 | int msz = mhsz + dsz; |
@@ -191,26 +257,28 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, | |||
191 | skb = tipc_buf_acquire(msz); | 257 | skb = tipc_buf_acquire(msz); |
192 | if (unlikely(!skb)) | 258 | if (unlikely(!skb)) |
193 | return -ENOMEM; | 259 | return -ENOMEM; |
260 | skb_orphan(skb); | ||
194 | __skb_queue_tail(list, skb); | 261 | __skb_queue_tail(list, skb); |
195 | skb_copy_to_linear_data(skb, mhdr, mhsz); | 262 | skb_copy_to_linear_data(skb, mhdr, mhsz); |
196 | pktpos = skb->data + mhsz; | 263 | pktpos = skb->data + mhsz; |
197 | if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, | 264 | if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz) |
198 | dsz)) | ||
199 | return dsz; | 265 | return dsz; |
200 | rc = -EFAULT; | 266 | rc = -EFAULT; |
201 | goto error; | 267 | goto error; |
202 | } | 268 | } |
203 | 269 | ||
204 | /* Prepare reusable fragment header */ | 270 | /* Prepare reusable fragment header */ |
205 | tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | 271 | tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, |
206 | INT_H_SIZE, msg_destnode(mhdr)); | 272 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); |
207 | msg_set_size(&pkthdr, pktmax); | 273 | msg_set_size(&pkthdr, pktmax); |
208 | msg_set_fragm_no(&pkthdr, pktno); | 274 | msg_set_fragm_no(&pkthdr, pktno); |
275 | msg_set_importance(&pkthdr, msg_importance(mhdr)); | ||
209 | 276 | ||
210 | /* Prepare first fragment */ | 277 | /* Prepare first fragment */ |
211 | skb = tipc_buf_acquire(pktmax); | 278 | skb = tipc_buf_acquire(pktmax); |
212 | if (!skb) | 279 | if (!skb) |
213 | return -ENOMEM; | 280 | return -ENOMEM; |
281 | skb_orphan(skb); | ||
214 | __skb_queue_tail(list, skb); | 282 | __skb_queue_tail(list, skb); |
215 | pktpos = skb->data; | 283 | pktpos = skb->data; |
216 | skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); | 284 | skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); |
@@ -224,12 +292,11 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, | |||
224 | if (drem < pktrem) | 292 | if (drem < pktrem) |
225 | pktrem = drem; | 293 | pktrem = drem; |
226 | 294 | ||
227 | if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) { | 295 | if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) { |
228 | rc = -EFAULT; | 296 | rc = -EFAULT; |
229 | goto error; | 297 | goto error; |
230 | } | 298 | } |
231 | drem -= pktrem; | 299 | drem -= pktrem; |
232 | offset += pktrem; | ||
233 | 300 | ||
234 | if (!drem) | 301 | if (!drem) |
235 | break; | 302 | break; |
@@ -244,6 +311,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, | |||
244 | rc = -ENOMEM; | 311 | rc = -ENOMEM; |
245 | goto error; | 312 | goto error; |
246 | } | 313 | } |
314 | skb_orphan(skb); | ||
247 | __skb_queue_tail(list, skb); | 315 | __skb_queue_tail(list, skb); |
248 | msg_set_type(&pkthdr, FRAGMENT); | 316 | msg_set_type(&pkthdr, FRAGMENT); |
249 | msg_set_size(&pkthdr, pktsz); | 317 | msg_set_size(&pkthdr, pktsz); |
@@ -263,33 +331,36 @@ error: | |||
263 | 331 | ||
264 | /** | 332 | /** |
265 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one | 333 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one |
266 | * @list: the buffer chain of the existing buffer ("bundle") | 334 | * @bskb: the buffer to append to ("bundle") |
267 | * @skb: buffer to be appended | 335 | * @skb: buffer to be appended |
268 | * @mtu: max allowable size for the bundle buffer | 336 | * @mtu: max allowable size for the bundle buffer |
269 | * Consumes buffer if successful | 337 | * Consumes buffer if successful |
270 | * Returns true if bundling could be performed, otherwise false | 338 | * Returns true if bundling could be performed, otherwise false |
271 | */ | 339 | */ |
272 | bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | 340 | bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu) |
273 | { | 341 | { |
274 | struct sk_buff *bskb = skb_peek_tail(list); | 342 | struct tipc_msg *bmsg; |
275 | struct tipc_msg *bmsg = buf_msg(bskb); | ||
276 | struct tipc_msg *msg = buf_msg(skb); | 343 | struct tipc_msg *msg = buf_msg(skb); |
277 | unsigned int bsz = msg_size(bmsg); | 344 | unsigned int bsz; |
278 | unsigned int msz = msg_size(msg); | 345 | unsigned int msz = msg_size(msg); |
279 | u32 start = align(bsz); | 346 | u32 start, pad; |
280 | u32 max = mtu - INT_H_SIZE; | 347 | u32 max = mtu - INT_H_SIZE; |
281 | u32 pad = start - bsz; | ||
282 | 348 | ||
283 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) | 349 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) |
284 | return false; | 350 | return false; |
285 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) | 351 | if (!bskb) |
352 | return false; | ||
353 | bmsg = buf_msg(bskb); | ||
354 | bsz = msg_size(bmsg); | ||
355 | start = align(bsz); | ||
356 | pad = start - bsz; | ||
357 | |||
358 | if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) | ||
286 | return false; | 359 | return false; |
287 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | 360 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) |
288 | return false; | 361 | return false; |
289 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) | 362 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) |
290 | return false; | 363 | return false; |
291 | if (likely(!TIPC_SKB_CB(bskb)->bundling)) | ||
292 | return false; | ||
293 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) | 364 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) |
294 | return false; | 365 | return false; |
295 | if (unlikely(max < (start + msz))) | 366 | if (unlikely(max < (start + msz))) |
@@ -304,6 +375,46 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | |||
304 | } | 375 | } |
305 | 376 | ||
306 | /** | 377 | /** |
378 | * tipc_msg_extract(): extract bundled inner packet from buffer | ||
379 | * @skb: buffer to be extracted from. | ||
380 | * @iskb: extracted inner buffer, to be returned | ||
381 | * @pos: position in outer message of msg to be extracted. | ||
382 | * Returns position of next msg | ||
383 | * Consumes outer buffer when last packet extracted | ||
384 | * Returns true when when there is an extracted buffer, otherwise false | ||
385 | */ | ||
386 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) | ||
387 | { | ||
388 | struct tipc_msg *msg; | ||
389 | int imsz, offset; | ||
390 | |||
391 | *iskb = NULL; | ||
392 | if (unlikely(skb_linearize(skb))) | ||
393 | goto none; | ||
394 | |||
395 | msg = buf_msg(skb); | ||
396 | offset = msg_hdr_sz(msg) + *pos; | ||
397 | if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) | ||
398 | goto none; | ||
399 | |||
400 | *iskb = skb_clone(skb, GFP_ATOMIC); | ||
401 | if (unlikely(!*iskb)) | ||
402 | goto none; | ||
403 | skb_pull(*iskb, offset); | ||
404 | imsz = msg_size(buf_msg(*iskb)); | ||
405 | skb_trim(*iskb, imsz); | ||
406 | if (unlikely(!tipc_msg_validate(*iskb))) | ||
407 | goto none; | ||
408 | *pos += align(imsz); | ||
409 | return true; | ||
410 | none: | ||
411 | kfree_skb(skb); | ||
412 | kfree_skb(*iskb); | ||
413 | *iskb = NULL; | ||
414 | return false; | ||
415 | } | ||
416 | |||
417 | /** | ||
307 | * tipc_msg_make_bundle(): Create bundle buf and append message to its tail | 418 | * tipc_msg_make_bundle(): Create bundle buf and append message to its tail |
308 | * @list: the buffer chain | 419 | * @list: the buffer chain |
309 | * @skb: buffer to be appended and replaced | 420 | * @skb: buffer to be appended and replaced |
@@ -312,18 +423,17 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | |||
312 | * Replaces buffer if successful | 423 | * Replaces buffer if successful |
313 | * Returns true if success, otherwise false | 424 | * Returns true if success, otherwise false |
314 | */ | 425 | */ |
315 | bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, | 426 | bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode) |
316 | u32 mtu, u32 dnode) | ||
317 | { | 427 | { |
318 | struct sk_buff *bskb; | 428 | struct sk_buff *bskb; |
319 | struct tipc_msg *bmsg; | 429 | struct tipc_msg *bmsg; |
320 | struct tipc_msg *msg = buf_msg(skb); | 430 | struct tipc_msg *msg = buf_msg(*skb); |
321 | u32 msz = msg_size(msg); | 431 | u32 msz = msg_size(msg); |
322 | u32 max = mtu - INT_H_SIZE; | 432 | u32 max = mtu - INT_H_SIZE; |
323 | 433 | ||
324 | if (msg_user(msg) == MSG_FRAGMENTER) | 434 | if (msg_user(msg) == MSG_FRAGMENTER) |
325 | return false; | 435 | return false; |
326 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) | 436 | if (msg_user(msg) == TUNNEL_PROTOCOL) |
327 | return false; | 437 | return false; |
328 | if (msg_user(msg) == BCAST_PROTOCOL) | 438 | if (msg_user(msg) == BCAST_PROTOCOL) |
329 | return false; | 439 | return false; |
@@ -336,13 +446,14 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, | |||
336 | 446 | ||
337 | skb_trim(bskb, INT_H_SIZE); | 447 | skb_trim(bskb, INT_H_SIZE); |
338 | bmsg = buf_msg(bskb); | 448 | bmsg = buf_msg(bskb); |
339 | tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode); | 449 | tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, |
450 | INT_H_SIZE, dnode); | ||
340 | msg_set_seqno(bmsg, msg_seqno(msg)); | 451 | msg_set_seqno(bmsg, msg_seqno(msg)); |
341 | msg_set_ack(bmsg, msg_ack(msg)); | 452 | msg_set_ack(bmsg, msg_ack(msg)); |
342 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); | 453 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); |
343 | TIPC_SKB_CB(bskb)->bundling = true; | 454 | tipc_msg_bundle(bskb, *skb, mtu); |
344 | __skb_queue_tail(list, bskb); | 455 | *skb = bskb; |
345 | return tipc_msg_bundle(list, skb, mtu); | 456 | return true; |
346 | } | 457 | } |
347 | 458 | ||
348 | /** | 459 | /** |
@@ -353,28 +464,25 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, | |||
353 | * Consumes buffer if failure | 464 | * Consumes buffer if failure |
354 | * Returns true if success, otherwise false | 465 | * Returns true if success, otherwise false |
355 | */ | 466 | */ |
356 | bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) | 467 | bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, |
468 | int err) | ||
357 | { | 469 | { |
358 | struct tipc_msg *msg = buf_msg(buf); | 470 | struct tipc_msg *msg = buf_msg(buf); |
359 | uint imp = msg_importance(msg); | ||
360 | struct tipc_msg ohdr; | 471 | struct tipc_msg ohdr; |
361 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); | 472 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); |
362 | 473 | ||
363 | if (skb_linearize(buf)) | 474 | if (skb_linearize(buf)) |
364 | goto exit; | 475 | goto exit; |
476 | msg = buf_msg(buf); | ||
365 | if (msg_dest_droppable(msg)) | 477 | if (msg_dest_droppable(msg)) |
366 | goto exit; | 478 | goto exit; |
367 | if (msg_errcode(msg)) | 479 | if (msg_errcode(msg)) |
368 | goto exit; | 480 | goto exit; |
369 | |||
370 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); | 481 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); |
371 | imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); | ||
372 | if (msg_isdata(msg)) | ||
373 | msg_set_importance(msg, imp); | ||
374 | msg_set_errcode(msg, err); | 482 | msg_set_errcode(msg, err); |
375 | msg_set_origport(msg, msg_destport(&ohdr)); | 483 | msg_set_origport(msg, msg_destport(&ohdr)); |
376 | msg_set_destport(msg, msg_origport(&ohdr)); | 484 | msg_set_destport(msg, msg_origport(&ohdr)); |
377 | msg_set_prevnode(msg, tipc_own_addr); | 485 | msg_set_prevnode(msg, own_addr); |
378 | if (!msg_short(msg)) { | 486 | if (!msg_short(msg)) { |
379 | msg_set_orignode(msg, msg_destnode(&ohdr)); | 487 | msg_set_orignode(msg, msg_destnode(&ohdr)); |
380 | msg_set_destnode(msg, msg_orignode(&ohdr)); | 488 | msg_set_destnode(msg, msg_orignode(&ohdr)); |
@@ -386,43 +494,48 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) | |||
386 | return true; | 494 | return true; |
387 | exit: | 495 | exit: |
388 | kfree_skb(buf); | 496 | kfree_skb(buf); |
497 | *dnode = 0; | ||
389 | return false; | 498 | return false; |
390 | } | 499 | } |
391 | 500 | ||
392 | /** | 501 | /** |
393 | * tipc_msg_eval: determine fate of message that found no destination | 502 | * tipc_msg_lookup_dest(): try to find new destination for named message |
394 | * @buf: the buffer containing the message. | 503 | * @skb: the buffer containing the message. |
395 | * @dnode: return value: next-hop node, if message to be forwarded | 504 | * @dnode: return value: next-hop node, if destination found |
396 | * @err: error code to use, if message to be rejected | 505 | * @err: return value: error code to use, if message to be rejected |
397 | * | ||
398 | * Does not consume buffer | 506 | * Does not consume buffer |
399 | * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error | 507 | * Returns true if a destination is found, false otherwise |
400 | * code if message to be rejected | ||
401 | */ | 508 | */ |
402 | int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) | 509 | bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, |
510 | u32 *dnode, int *err) | ||
403 | { | 511 | { |
404 | struct tipc_msg *msg = buf_msg(buf); | 512 | struct tipc_msg *msg = buf_msg(skb); |
405 | u32 dport; | 513 | u32 dport; |
514 | u32 own_addr = tipc_own_addr(net); | ||
406 | 515 | ||
407 | if (msg_type(msg) != TIPC_NAMED_MSG) | 516 | if (!msg_isdata(msg)) |
408 | return -TIPC_ERR_NO_PORT; | 517 | return false; |
409 | if (skb_linearize(buf)) | 518 | if (!msg_named(msg)) |
410 | return -TIPC_ERR_NO_NAME; | 519 | return false; |
411 | if (msg_data_sz(msg) > MAX_FORWARD_SIZE) | 520 | if (msg_errcode(msg)) |
412 | return -TIPC_ERR_NO_NAME; | 521 | return false; |
413 | if (msg_reroute_cnt(msg) > 0) | 522 | *err = -TIPC_ERR_NO_NAME; |
414 | return -TIPC_ERR_NO_NAME; | 523 | if (skb_linearize(skb)) |
415 | 524 | return false; | |
416 | *dnode = addr_domain(msg_lookup_scope(msg)); | 525 | if (msg_reroute_cnt(msg)) |
417 | dport = tipc_nametbl_translate(msg_nametype(msg), | 526 | return false; |
418 | msg_nameinst(msg), | 527 | *dnode = addr_domain(net, msg_lookup_scope(msg)); |
419 | dnode); | 528 | dport = tipc_nametbl_translate(net, msg_nametype(msg), |
529 | msg_nameinst(msg), dnode); | ||
420 | if (!dport) | 530 | if (!dport) |
421 | return -TIPC_ERR_NO_NAME; | 531 | return false; |
422 | msg_incr_reroute_cnt(msg); | 532 | msg_incr_reroute_cnt(msg); |
533 | if (*dnode != own_addr) | ||
534 | msg_set_prevnode(msg, own_addr); | ||
423 | msg_set_destnode(msg, *dnode); | 535 | msg_set_destnode(msg, *dnode); |
424 | msg_set_destport(msg, dport); | 536 | msg_set_destport(msg, dport); |
425 | return TIPC_OK; | 537 | *err = TIPC_OK; |
538 | return true; | ||
426 | } | 539 | } |
427 | 540 | ||
428 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and | 541 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index d5c83d7ecb47..e1d3595e2ee9 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/msg.h: Include file for TIPC message header routines | 2 | * net/tipc/msg.h: Include file for TIPC message header routines |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2007, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2007, 2014-2015 Ericsson AB |
5 | * Copyright (c) 2005-2008, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005-2008, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -37,7 +37,7 @@ | |||
37 | #ifndef _TIPC_MSG_H | 37 | #ifndef _TIPC_MSG_H |
38 | #define _TIPC_MSG_H | 38 | #define _TIPC_MSG_H |
39 | 39 | ||
40 | #include "bearer.h" | 40 | #include <linux/tipc.h> |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Constants and routines used to read and write TIPC payload message headers | 43 | * Constants and routines used to read and write TIPC payload message headers |
@@ -45,6 +45,7 @@ | |||
45 | * Note: Some items are also used with TIPC internal message headers | 45 | * Note: Some items are also used with TIPC internal message headers |
46 | */ | 46 | */ |
47 | #define TIPC_VERSION 2 | 47 | #define TIPC_VERSION 2 |
48 | struct plist; | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * Payload message users are defined in TIPC's public API: | 51 | * Payload message users are defined in TIPC's public API: |
@@ -53,6 +54,8 @@ | |||
53 | * - TIPC_HIGH_IMPORTANCE | 54 | * - TIPC_HIGH_IMPORTANCE |
54 | * - TIPC_CRITICAL_IMPORTANCE | 55 | * - TIPC_CRITICAL_IMPORTANCE |
55 | */ | 56 | */ |
57 | #define TIPC_SYSTEM_IMPORTANCE 4 | ||
58 | |||
56 | 59 | ||
57 | /* | 60 | /* |
58 | * Payload message types | 61 | * Payload message types |
@@ -63,6 +66,19 @@ | |||
63 | #define TIPC_DIRECT_MSG 3 | 66 | #define TIPC_DIRECT_MSG 3 |
64 | 67 | ||
65 | /* | 68 | /* |
69 | * Internal message users | ||
70 | */ | ||
71 | #define BCAST_PROTOCOL 5 | ||
72 | #define MSG_BUNDLER 6 | ||
73 | #define LINK_PROTOCOL 7 | ||
74 | #define CONN_MANAGER 8 | ||
75 | #define TUNNEL_PROTOCOL 10 | ||
76 | #define NAME_DISTRIBUTOR 11 | ||
77 | #define MSG_FRAGMENTER 12 | ||
78 | #define LINK_CONFIG 13 | ||
79 | #define SOCK_WAKEUP 14 /* pseudo user */ | ||
80 | |||
81 | /* | ||
66 | * Message header sizes | 82 | * Message header sizes |
67 | */ | 83 | */ |
68 | #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ | 84 | #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ |
@@ -75,13 +91,39 @@ | |||
75 | 91 | ||
76 | #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) | 92 | #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) |
77 | 93 | ||
78 | #define TIPC_MEDIA_ADDR_OFFSET 5 | 94 | #define TIPC_MEDIA_INFO_OFFSET 5 |
79 | 95 | ||
96 | /** | ||
97 | * TIPC message buffer code | ||
98 | * | ||
99 | * TIPC message buffer headroom reserves space for the worst-case | ||
100 | * link-level device header (in case the message is sent off-node). | ||
101 | * | ||
102 | * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields | ||
103 | * are word aligned for quicker access | ||
104 | */ | ||
105 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) | ||
106 | |||
107 | struct tipc_skb_cb { | ||
108 | void *handle; | ||
109 | struct sk_buff *tail; | ||
110 | bool validated; | ||
111 | bool wakeup_pending; | ||
112 | bool bundling; | ||
113 | u16 chain_sz; | ||
114 | u16 chain_imp; | ||
115 | }; | ||
116 | |||
117 | #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) | ||
80 | 118 | ||
81 | struct tipc_msg { | 119 | struct tipc_msg { |
82 | __be32 hdr[15]; | 120 | __be32 hdr[15]; |
83 | }; | 121 | }; |
84 | 122 | ||
123 | static inline struct tipc_msg *buf_msg(struct sk_buff *skb) | ||
124 | { | ||
125 | return (struct tipc_msg *)skb->data; | ||
126 | } | ||
85 | 127 | ||
86 | static inline u32 msg_word(struct tipc_msg *m, u32 pos) | 128 | static inline u32 msg_word(struct tipc_msg *m, u32 pos) |
87 | { | 129 | { |
@@ -143,16 +185,6 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n) | |||
143 | msg_set_bits(m, 0, 25, 0xf, n); | 185 | msg_set_bits(m, 0, 25, 0xf, n); |
144 | } | 186 | } |
145 | 187 | ||
146 | static inline u32 msg_importance(struct tipc_msg *m) | ||
147 | { | ||
148 | return msg_bits(m, 0, 25, 0xf); | ||
149 | } | ||
150 | |||
151 | static inline void msg_set_importance(struct tipc_msg *m, u32 i) | ||
152 | { | ||
153 | msg_set_user(m, i); | ||
154 | } | ||
155 | |||
156 | static inline u32 msg_hdr_sz(struct tipc_msg *m) | 188 | static inline u32 msg_hdr_sz(struct tipc_msg *m) |
157 | { | 189 | { |
158 | return msg_bits(m, 0, 21, 0xf) << 2; | 190 | return msg_bits(m, 0, 21, 0xf) << 2; |
@@ -208,6 +240,15 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz) | |||
208 | m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz); | 240 | m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz); |
209 | } | 241 | } |
210 | 242 | ||
243 | static inline unchar *msg_data(struct tipc_msg *m) | ||
244 | { | ||
245 | return ((unchar *)m) + msg_hdr_sz(m); | ||
246 | } | ||
247 | |||
248 | static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | ||
249 | { | ||
250 | return (struct tipc_msg *)msg_data(m); | ||
251 | } | ||
211 | 252 | ||
212 | /* | 253 | /* |
213 | * Word 1 | 254 | * Word 1 |
@@ -309,6 +350,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) | |||
309 | /* | 350 | /* |
310 | * Words 3-10 | 351 | * Words 3-10 |
311 | */ | 352 | */ |
353 | static inline u32 msg_importance(struct tipc_msg *m) | ||
354 | { | ||
355 | if (unlikely(msg_user(m) == MSG_FRAGMENTER)) | ||
356 | return msg_bits(m, 5, 13, 0x7); | ||
357 | if (likely(msg_isdata(m) && !msg_errcode(m))) | ||
358 | return msg_user(m); | ||
359 | return TIPC_SYSTEM_IMPORTANCE; | ||
360 | } | ||
361 | |||
362 | static inline void msg_set_importance(struct tipc_msg *m, u32 i) | ||
363 | { | ||
364 | if (unlikely(msg_user(m) == MSG_FRAGMENTER)) | ||
365 | msg_set_bits(m, 5, 13, 0x7, i); | ||
366 | else if (likely(i < TIPC_SYSTEM_IMPORTANCE)) | ||
367 | msg_set_user(m, i); | ||
368 | else | ||
369 | pr_warn("Trying to set illegal importance in message\n"); | ||
370 | } | ||
371 | |||
312 | static inline u32 msg_prevnode(struct tipc_msg *m) | 372 | static inline u32 msg_prevnode(struct tipc_msg *m) |
313 | { | 373 | { |
314 | return msg_word(m, 3); | 374 | return msg_word(m, 3); |
@@ -321,6 +381,8 @@ static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) | |||
321 | 381 | ||
322 | static inline u32 msg_origport(struct tipc_msg *m) | 382 | static inline u32 msg_origport(struct tipc_msg *m) |
323 | { | 383 | { |
384 | if (msg_user(m) == MSG_FRAGMENTER) | ||
385 | m = msg_get_wrapped(m); | ||
324 | return msg_word(m, 4); | 386 | return msg_word(m, 4); |
325 | } | 387 | } |
326 | 388 | ||
@@ -416,35 +478,11 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) | |||
416 | msg_set_word(m, 10, n); | 478 | msg_set_word(m, 10, n); |
417 | } | 479 | } |
418 | 480 | ||
419 | static inline unchar *msg_data(struct tipc_msg *m) | ||
420 | { | ||
421 | return ((unchar *)m) + msg_hdr_sz(m); | ||
422 | } | ||
423 | |||
424 | static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | ||
425 | { | ||
426 | return (struct tipc_msg *)msg_data(m); | ||
427 | } | ||
428 | |||
429 | /* | 481 | /* |
430 | * Constants and routines used to read and write TIPC internal message headers | 482 | * Constants and routines used to read and write TIPC internal message headers |
431 | */ | 483 | */ |
432 | 484 | ||
433 | /* | 485 | /* |
434 | * Internal message users | ||
435 | */ | ||
436 | #define BCAST_PROTOCOL 5 | ||
437 | #define MSG_BUNDLER 6 | ||
438 | #define LINK_PROTOCOL 7 | ||
439 | #define CONN_MANAGER 8 | ||
440 | #define ROUTE_DISTRIBUTOR 9 /* obsoleted */ | ||
441 | #define CHANGEOVER_PROTOCOL 10 | ||
442 | #define NAME_DISTRIBUTOR 11 | ||
443 | #define MSG_FRAGMENTER 12 | ||
444 | #define LINK_CONFIG 13 | ||
445 | #define SOCK_WAKEUP 14 /* pseudo user */ | ||
446 | |||
447 | /* | ||
448 | * Connection management protocol message types | 486 | * Connection management protocol message types |
449 | */ | 487 | */ |
450 | #define CONN_PROBE 0 | 488 | #define CONN_PROBE 0 |
@@ -474,8 +512,8 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
474 | /* | 512 | /* |
475 | * Changeover tunnel message types | 513 | * Changeover tunnel message types |
476 | */ | 514 | */ |
477 | #define DUPLICATE_MSG 0 | 515 | #define SYNCH_MSG 0 |
478 | #define ORIGINAL_MSG 1 | 516 | #define FAILOVER_MSG 1 |
479 | 517 | ||
480 | /* | 518 | /* |
481 | * Config protocol message types | 519 | * Config protocol message types |
@@ -483,7 +521,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
483 | #define DSC_REQ_MSG 0 | 521 | #define DSC_REQ_MSG 0 |
484 | #define DSC_RESP_MSG 1 | 522 | #define DSC_RESP_MSG 1 |
485 | 523 | ||
486 | |||
487 | /* | 524 | /* |
488 | * Word 1 | 525 | * Word 1 |
489 | */ | 526 | */ |
@@ -507,6 +544,24 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n) | |||
507 | msg_set_bits(m, 1, 0, 0xffff, n); | 544 | msg_set_bits(m, 1, 0, 0xffff, n); |
508 | } | 545 | } |
509 | 546 | ||
547 | static inline u32 msg_node_capabilities(struct tipc_msg *m) | ||
548 | { | ||
549 | return msg_bits(m, 1, 15, 0x1fff); | ||
550 | } | ||
551 | |||
552 | static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n) | ||
553 | { | ||
554 | msg_set_bits(m, 1, 15, 0x1fff, n); | ||
555 | } | ||
556 | |||
557 | static inline bool msg_dup(struct tipc_msg *m) | ||
558 | { | ||
559 | if (likely(msg_user(m) != TUNNEL_PROTOCOL)) | ||
560 | return false; | ||
561 | if (msg_type(m) != SYNCH_MSG) | ||
562 | return false; | ||
563 | return true; | ||
564 | } | ||
510 | 565 | ||
511 | /* | 566 | /* |
512 | * Word 2 | 567 | * Word 2 |
@@ -661,7 +716,7 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r) | |||
661 | 716 | ||
662 | static inline char *msg_media_addr(struct tipc_msg *m) | 717 | static inline char *msg_media_addr(struct tipc_msg *m) |
663 | { | 718 | { |
664 | return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET]; | 719 | return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; |
665 | } | 720 | } |
666 | 721 | ||
667 | /* | 722 | /* |
@@ -707,41 +762,112 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n) | |||
707 | msg_set_bits(m, 9, 0, 0xffff, n); | 762 | msg_set_bits(m, 9, 0, 0xffff, n); |
708 | } | 763 | } |
709 | 764 | ||
710 | static inline u32 tipc_msg_tot_importance(struct tipc_msg *m) | 765 | struct sk_buff *tipc_buf_acquire(u32 size); |
711 | { | 766 | bool tipc_msg_validate(struct sk_buff *skb); |
712 | if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) | 767 | bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, |
713 | return msg_importance(msg_get_wrapped(m)); | 768 | int err); |
714 | return msg_importance(m); | 769 | void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, |
715 | } | 770 | u32 hsize, u32 destnode); |
716 | |||
717 | static inline u32 msg_tot_origport(struct tipc_msg *m) | ||
718 | { | ||
719 | if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) | ||
720 | return msg_origport(msg_get_wrapped(m)); | ||
721 | return msg_origport(m); | ||
722 | } | ||
723 | |||
724 | bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err); | ||
725 | |||
726 | int tipc_msg_eval(struct sk_buff *buf, u32 *dnode); | ||
727 | |||
728 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, | ||
729 | u32 destnode); | ||
730 | |||
731 | struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, | 771 | struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, |
732 | uint data_sz, u32 dnode, u32 onode, | 772 | uint data_sz, u32 dnode, u32 onode, |
733 | u32 dport, u32 oport, int errcode); | 773 | u32 dport, u32 oport, int errcode); |
734 | |||
735 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); | 774 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); |
775 | bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu); | ||
776 | |||
777 | bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode); | ||
778 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos); | ||
779 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, | ||
780 | int offset, int dsz, int mtu, struct sk_buff_head *list); | ||
781 | bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode, | ||
782 | int *err); | ||
783 | struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list); | ||
736 | 784 | ||
737 | bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); | 785 | /* tipc_skb_peek(): peek and reserve first buffer in list |
738 | 786 | * @list: list to be peeked in | |
739 | bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, | 787 | * Returns pointer to first buffer in list, if any |
740 | u32 mtu, u32 dnode); | 788 | */ |
741 | 789 | static inline struct sk_buff *tipc_skb_peek(struct sk_buff_head *list, | |
742 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, | 790 | spinlock_t *lock) |
743 | int dsz, int mtu, struct sk_buff_head *list); | 791 | { |
792 | struct sk_buff *skb; | ||
793 | |||
794 | spin_lock_bh(lock); | ||
795 | skb = skb_peek(list); | ||
796 | if (skb) | ||
797 | skb_get(skb); | ||
798 | spin_unlock_bh(lock); | ||
799 | return skb; | ||
800 | } | ||
801 | |||
802 | /* tipc_skb_peek_port(): find a destination port, ignoring all destinations | ||
803 | * up to and including 'filter'. | ||
804 | * Note: ignoring previously tried destinations minimizes the risk of | ||
805 | * contention on the socket lock | ||
806 | * @list: list to be peeked in | ||
807 | * @filter: last destination to be ignored from search | ||
808 | * Returns a destination port number, of applicable. | ||
809 | */ | ||
810 | static inline u32 tipc_skb_peek_port(struct sk_buff_head *list, u32 filter) | ||
811 | { | ||
812 | struct sk_buff *skb; | ||
813 | u32 dport = 0; | ||
814 | bool ignore = true; | ||
815 | |||
816 | spin_lock_bh(&list->lock); | ||
817 | skb_queue_walk(list, skb) { | ||
818 | dport = msg_destport(buf_msg(skb)); | ||
819 | if (!filter || skb_queue_is_last(list, skb)) | ||
820 | break; | ||
821 | if (dport == filter) | ||
822 | ignore = false; | ||
823 | else if (!ignore) | ||
824 | break; | ||
825 | } | ||
826 | spin_unlock_bh(&list->lock); | ||
827 | return dport; | ||
828 | } | ||
829 | |||
830 | /* tipc_skb_dequeue(): unlink first buffer with dest 'dport' from list | ||
831 | * @list: list to be unlinked from | ||
832 | * @dport: selection criteria for buffer to unlink | ||
833 | */ | ||
834 | static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list, | ||
835 | u32 dport) | ||
836 | { | ||
837 | struct sk_buff *_skb, *tmp, *skb = NULL; | ||
838 | |||
839 | spin_lock_bh(&list->lock); | ||
840 | skb_queue_walk_safe(list, _skb, tmp) { | ||
841 | if (msg_destport(buf_msg(_skb)) == dport) { | ||
842 | __skb_unlink(_skb, list); | ||
843 | skb = _skb; | ||
844 | break; | ||
845 | } | ||
846 | } | ||
847 | spin_unlock_bh(&list->lock); | ||
848 | return skb; | ||
849 | } | ||
850 | |||
851 | /* tipc_skb_queue_tail(): add buffer to tail of list; | ||
852 | * @list: list to be appended to | ||
853 | * @skb: buffer to append. Always appended | ||
854 | * @dport: the destination port of the buffer | ||
855 | * returns true if dport differs from previous destination | ||
856 | */ | ||
857 | static inline bool tipc_skb_queue_tail(struct sk_buff_head *list, | ||
858 | struct sk_buff *skb, u32 dport) | ||
859 | { | ||
860 | struct sk_buff *_skb = NULL; | ||
861 | bool rv = false; | ||
744 | 862 | ||
745 | struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list); | 863 | spin_lock_bh(&list->lock); |
864 | _skb = skb_peek_tail(list); | ||
865 | if (!_skb || (msg_destport(buf_msg(_skb)) != dport) || | ||
866 | (skb_queue_len(list) > 32)) | ||
867 | rv = true; | ||
868 | __skb_queue_tail(list, skb); | ||
869 | spin_unlock_bh(&list->lock); | ||
870 | return rv; | ||
871 | } | ||
746 | 872 | ||
747 | #endif | 873 | #endif |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index ba6083dca95b..41e7b7e4dda0 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -68,37 +68,41 @@ static void publ_to_item(struct distr_item *i, struct publication *p) | |||
68 | /** | 68 | /** |
69 | * named_prepare_buf - allocate & initialize a publication message | 69 | * named_prepare_buf - allocate & initialize a publication message |
70 | */ | 70 | */ |
71 | static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) | 71 | static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, |
72 | u32 dest) | ||
72 | { | 73 | { |
74 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
73 | struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); | 75 | struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); |
74 | struct tipc_msg *msg; | 76 | struct tipc_msg *msg; |
75 | 77 | ||
76 | if (buf != NULL) { | 78 | if (buf != NULL) { |
77 | msg = buf_msg(buf); | 79 | msg = buf_msg(buf); |
78 | tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest); | 80 | tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type, |
81 | INT_H_SIZE, dest); | ||
79 | msg_set_size(msg, INT_H_SIZE + size); | 82 | msg_set_size(msg, INT_H_SIZE + size); |
80 | } | 83 | } |
81 | return buf; | 84 | return buf; |
82 | } | 85 | } |
83 | 86 | ||
84 | void named_cluster_distribute(struct sk_buff *skb) | 87 | void named_cluster_distribute(struct net *net, struct sk_buff *skb) |
85 | { | 88 | { |
89 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
86 | struct sk_buff *oskb; | 90 | struct sk_buff *oskb; |
87 | struct tipc_node *node; | 91 | struct tipc_node *node; |
88 | u32 dnode; | 92 | u32 dnode; |
89 | 93 | ||
90 | rcu_read_lock(); | 94 | rcu_read_lock(); |
91 | list_for_each_entry_rcu(node, &tipc_node_list, list) { | 95 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
92 | dnode = node->addr; | 96 | dnode = node->addr; |
93 | if (in_own_node(dnode)) | 97 | if (in_own_node(net, dnode)) |
94 | continue; | 98 | continue; |
95 | if (!tipc_node_active_links(node)) | 99 | if (!tipc_node_active_links(node)) |
96 | continue; | 100 | continue; |
97 | oskb = skb_copy(skb, GFP_ATOMIC); | 101 | oskb = pskb_copy(skb, GFP_ATOMIC); |
98 | if (!oskb) | 102 | if (!oskb) |
99 | break; | 103 | break; |
100 | msg_set_destnode(buf_msg(oskb), dnode); | 104 | msg_set_destnode(buf_msg(oskb), dnode); |
101 | tipc_link_xmit_skb(oskb, dnode, dnode); | 105 | tipc_link_xmit_skb(net, oskb, dnode, dnode); |
102 | } | 106 | } |
103 | rcu_read_unlock(); | 107 | rcu_read_unlock(); |
104 | 108 | ||
@@ -108,18 +112,19 @@ void named_cluster_distribute(struct sk_buff *skb) | |||
108 | /** | 112 | /** |
109 | * tipc_named_publish - tell other nodes about a new publication by this node | 113 | * tipc_named_publish - tell other nodes about a new publication by this node |
110 | */ | 114 | */ |
111 | struct sk_buff *tipc_named_publish(struct publication *publ) | 115 | struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) |
112 | { | 116 | { |
117 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
113 | struct sk_buff *buf; | 118 | struct sk_buff *buf; |
114 | struct distr_item *item; | 119 | struct distr_item *item; |
115 | 120 | ||
116 | list_add_tail_rcu(&publ->local_list, | 121 | list_add_tail_rcu(&publ->local_list, |
117 | &tipc_nametbl->publ_list[publ->scope]); | 122 | &tn->nametbl->publ_list[publ->scope]); |
118 | 123 | ||
119 | if (publ->scope == TIPC_NODE_SCOPE) | 124 | if (publ->scope == TIPC_NODE_SCOPE) |
120 | return NULL; | 125 | return NULL; |
121 | 126 | ||
122 | buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); | 127 | buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0); |
123 | if (!buf) { | 128 | if (!buf) { |
124 | pr_warn("Publication distribution failure\n"); | 129 | pr_warn("Publication distribution failure\n"); |
125 | return NULL; | 130 | return NULL; |
@@ -133,7 +138,7 @@ struct sk_buff *tipc_named_publish(struct publication *publ) | |||
133 | /** | 138 | /** |
134 | * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node | 139 | * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node |
135 | */ | 140 | */ |
136 | struct sk_buff *tipc_named_withdraw(struct publication *publ) | 141 | struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) |
137 | { | 142 | { |
138 | struct sk_buff *buf; | 143 | struct sk_buff *buf; |
139 | struct distr_item *item; | 144 | struct distr_item *item; |
@@ -143,7 +148,7 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) | |||
143 | if (publ->scope == TIPC_NODE_SCOPE) | 148 | if (publ->scope == TIPC_NODE_SCOPE) |
144 | return NULL; | 149 | return NULL; |
145 | 150 | ||
146 | buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); | 151 | buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); |
147 | if (!buf) { | 152 | if (!buf) { |
148 | pr_warn("Withdrawal distribution failure\n"); | 153 | pr_warn("Withdrawal distribution failure\n"); |
149 | return NULL; | 154 | return NULL; |
@@ -160,19 +165,21 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) | |||
160 | * @dnode: node to be updated | 165 | * @dnode: node to be updated |
161 | * @pls: linked list of publication items to be packed into buffer chain | 166 | * @pls: linked list of publication items to be packed into buffer chain |
162 | */ | 167 | */ |
163 | static void named_distribute(struct sk_buff_head *list, u32 dnode, | 168 | static void named_distribute(struct net *net, struct sk_buff_head *list, |
164 | struct list_head *pls) | 169 | u32 dnode, struct list_head *pls) |
165 | { | 170 | { |
166 | struct publication *publ; | 171 | struct publication *publ; |
167 | struct sk_buff *skb = NULL; | 172 | struct sk_buff *skb = NULL; |
168 | struct distr_item *item = NULL; | 173 | struct distr_item *item = NULL; |
169 | uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; | 174 | uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) * |
175 | ITEM_SIZE; | ||
170 | uint msg_rem = msg_dsz; | 176 | uint msg_rem = msg_dsz; |
171 | 177 | ||
172 | list_for_each_entry(publ, pls, local_list) { | 178 | list_for_each_entry(publ, pls, local_list) { |
173 | /* Prepare next buffer: */ | 179 | /* Prepare next buffer: */ |
174 | if (!skb) { | 180 | if (!skb) { |
175 | skb = named_prepare_buf(PUBLICATION, msg_rem, dnode); | 181 | skb = named_prepare_buf(net, PUBLICATION, msg_rem, |
182 | dnode); | ||
176 | if (!skb) { | 183 | if (!skb) { |
177 | pr_warn("Bulk publication failure\n"); | 184 | pr_warn("Bulk publication failure\n"); |
178 | return; | 185 | return; |
@@ -202,30 +209,32 @@ static void named_distribute(struct sk_buff_head *list, u32 dnode, | |||
202 | /** | 209 | /** |
203 | * tipc_named_node_up - tell specified node about all publications by this node | 210 | * tipc_named_node_up - tell specified node about all publications by this node |
204 | */ | 211 | */ |
205 | void tipc_named_node_up(u32 dnode) | 212 | void tipc_named_node_up(struct net *net, u32 dnode) |
206 | { | 213 | { |
214 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
207 | struct sk_buff_head head; | 215 | struct sk_buff_head head; |
208 | 216 | ||
209 | __skb_queue_head_init(&head); | 217 | __skb_queue_head_init(&head); |
210 | 218 | ||
211 | rcu_read_lock(); | 219 | rcu_read_lock(); |
212 | named_distribute(&head, dnode, | 220 | named_distribute(net, &head, dnode, |
213 | &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); | 221 | &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]); |
214 | named_distribute(&head, dnode, | 222 | named_distribute(net, &head, dnode, |
215 | &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); | 223 | &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]); |
216 | rcu_read_unlock(); | 224 | rcu_read_unlock(); |
217 | 225 | ||
218 | tipc_link_xmit(&head, dnode, dnode); | 226 | tipc_link_xmit(net, &head, dnode, dnode); |
219 | } | 227 | } |
220 | 228 | ||
221 | static void tipc_publ_subscribe(struct publication *publ, u32 addr) | 229 | static void tipc_publ_subscribe(struct net *net, struct publication *publ, |
230 | u32 addr) | ||
222 | { | 231 | { |
223 | struct tipc_node *node; | 232 | struct tipc_node *node; |
224 | 233 | ||
225 | if (in_own_node(addr)) | 234 | if (in_own_node(net, addr)) |
226 | return; | 235 | return; |
227 | 236 | ||
228 | node = tipc_node_find(addr); | 237 | node = tipc_node_find(net, addr); |
229 | if (!node) { | 238 | if (!node) { |
230 | pr_warn("Node subscription rejected, unknown node 0x%x\n", | 239 | pr_warn("Node subscription rejected, unknown node 0x%x\n", |
231 | addr); | 240 | addr); |
@@ -235,19 +244,22 @@ static void tipc_publ_subscribe(struct publication *publ, u32 addr) | |||
235 | tipc_node_lock(node); | 244 | tipc_node_lock(node); |
236 | list_add_tail(&publ->nodesub_list, &node->publ_list); | 245 | list_add_tail(&publ->nodesub_list, &node->publ_list); |
237 | tipc_node_unlock(node); | 246 | tipc_node_unlock(node); |
247 | tipc_node_put(node); | ||
238 | } | 248 | } |
239 | 249 | ||
240 | static void tipc_publ_unsubscribe(struct publication *publ, u32 addr) | 250 | static void tipc_publ_unsubscribe(struct net *net, struct publication *publ, |
251 | u32 addr) | ||
241 | { | 252 | { |
242 | struct tipc_node *node; | 253 | struct tipc_node *node; |
243 | 254 | ||
244 | node = tipc_node_find(addr); | 255 | node = tipc_node_find(net, addr); |
245 | if (!node) | 256 | if (!node) |
246 | return; | 257 | return; |
247 | 258 | ||
248 | tipc_node_lock(node); | 259 | tipc_node_lock(node); |
249 | list_del_init(&publ->nodesub_list); | 260 | list_del_init(&publ->nodesub_list); |
250 | tipc_node_unlock(node); | 261 | tipc_node_unlock(node); |
262 | tipc_node_put(node); | ||
251 | } | 263 | } |
252 | 264 | ||
253 | /** | 265 | /** |
@@ -256,16 +268,17 @@ static void tipc_publ_unsubscribe(struct publication *publ, u32 addr) | |||
256 | * Invoked for each publication issued by a newly failed node. | 268 | * Invoked for each publication issued by a newly failed node. |
257 | * Removes publication structure from name table & deletes it. | 269 | * Removes publication structure from name table & deletes it. |
258 | */ | 270 | */ |
259 | static void tipc_publ_purge(struct publication *publ, u32 addr) | 271 | static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr) |
260 | { | 272 | { |
273 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
261 | struct publication *p; | 274 | struct publication *p; |
262 | 275 | ||
263 | spin_lock_bh(&tipc_nametbl_lock); | 276 | spin_lock_bh(&tn->nametbl_lock); |
264 | p = tipc_nametbl_remove_publ(publ->type, publ->lower, | 277 | p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, |
265 | publ->node, publ->ref, publ->key); | 278 | publ->node, publ->ref, publ->key); |
266 | if (p) | 279 | if (p) |
267 | tipc_publ_unsubscribe(p, addr); | 280 | tipc_publ_unsubscribe(net, p, addr); |
268 | spin_unlock_bh(&tipc_nametbl_lock); | 281 | spin_unlock_bh(&tn->nametbl_lock); |
269 | 282 | ||
270 | if (p != publ) { | 283 | if (p != publ) { |
271 | pr_err("Unable to remove publication from failed node\n" | 284 | pr_err("Unable to remove publication from failed node\n" |
@@ -277,12 +290,12 @@ static void tipc_publ_purge(struct publication *publ, u32 addr) | |||
277 | kfree_rcu(p, rcu); | 290 | kfree_rcu(p, rcu); |
278 | } | 291 | } |
279 | 292 | ||
280 | void tipc_publ_notify(struct list_head *nsub_list, u32 addr) | 293 | void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) |
281 | { | 294 | { |
282 | struct publication *publ, *tmp; | 295 | struct publication *publ, *tmp; |
283 | 296 | ||
284 | list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) | 297 | list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) |
285 | tipc_publ_purge(publ, addr); | 298 | tipc_publ_purge(net, publ, addr); |
286 | } | 299 | } |
287 | 300 | ||
288 | /** | 301 | /** |
@@ -292,25 +305,28 @@ void tipc_publ_notify(struct list_head *nsub_list, u32 addr) | |||
292 | * tipc_nametbl_lock must be held. | 305 | * tipc_nametbl_lock must be held. |
293 | * Returns the publication item if successful, otherwise NULL. | 306 | * Returns the publication item if successful, otherwise NULL. |
294 | */ | 307 | */ |
295 | static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) | 308 | static bool tipc_update_nametbl(struct net *net, struct distr_item *i, |
309 | u32 node, u32 dtype) | ||
296 | { | 310 | { |
297 | struct publication *publ = NULL; | 311 | struct publication *publ = NULL; |
298 | 312 | ||
299 | if (dtype == PUBLICATION) { | 313 | if (dtype == PUBLICATION) { |
300 | publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower), | 314 | publ = tipc_nametbl_insert_publ(net, ntohl(i->type), |
315 | ntohl(i->lower), | ||
301 | ntohl(i->upper), | 316 | ntohl(i->upper), |
302 | TIPC_CLUSTER_SCOPE, node, | 317 | TIPC_CLUSTER_SCOPE, node, |
303 | ntohl(i->ref), ntohl(i->key)); | 318 | ntohl(i->ref), ntohl(i->key)); |
304 | if (publ) { | 319 | if (publ) { |
305 | tipc_publ_subscribe(publ, node); | 320 | tipc_publ_subscribe(net, publ, node); |
306 | return true; | 321 | return true; |
307 | } | 322 | } |
308 | } else if (dtype == WITHDRAWAL) { | 323 | } else if (dtype == WITHDRAWAL) { |
309 | publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower), | 324 | publ = tipc_nametbl_remove_publ(net, ntohl(i->type), |
325 | ntohl(i->lower), | ||
310 | node, ntohl(i->ref), | 326 | node, ntohl(i->ref), |
311 | ntohl(i->key)); | 327 | ntohl(i->key)); |
312 | if (publ) { | 328 | if (publ) { |
313 | tipc_publ_unsubscribe(publ, node); | 329 | tipc_publ_unsubscribe(net, publ, node); |
314 | kfree_rcu(publ, rcu); | 330 | kfree_rcu(publ, rcu); |
315 | return true; | 331 | return true; |
316 | } | 332 | } |
@@ -343,7 +359,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) | |||
343 | * tipc_named_process_backlog - try to process any pending name table updates | 359 | * tipc_named_process_backlog - try to process any pending name table updates |
344 | * from the network. | 360 | * from the network. |
345 | */ | 361 | */ |
346 | void tipc_named_process_backlog(void) | 362 | void tipc_named_process_backlog(struct net *net) |
347 | { | 363 | { |
348 | struct distr_queue_item *e, *tmp; | 364 | struct distr_queue_item *e, *tmp; |
349 | char addr[16]; | 365 | char addr[16]; |
@@ -351,7 +367,7 @@ void tipc_named_process_backlog(void) | |||
351 | 367 | ||
352 | list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { | 368 | list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { |
353 | if (time_after(e->expires, now)) { | 369 | if (time_after(e->expires, now)) { |
354 | if (!tipc_update_nametbl(&e->i, e->node, e->dtype)) | 370 | if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) |
355 | continue; | 371 | continue; |
356 | } else { | 372 | } else { |
357 | tipc_addr_string_fill(addr, e->node); | 373 | tipc_addr_string_fill(addr, e->node); |
@@ -367,24 +383,34 @@ void tipc_named_process_backlog(void) | |||
367 | } | 383 | } |
368 | 384 | ||
369 | /** | 385 | /** |
370 | * tipc_named_rcv - process name table update message sent by another node | 386 | * tipc_named_rcv - process name table update messages sent by another node |
371 | */ | 387 | */ |
372 | void tipc_named_rcv(struct sk_buff *buf) | 388 | void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) |
373 | { | 389 | { |
374 | struct tipc_msg *msg = buf_msg(buf); | 390 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
375 | struct distr_item *item = (struct distr_item *)msg_data(msg); | 391 | struct tipc_msg *msg; |
376 | u32 count = msg_data_sz(msg) / ITEM_SIZE; | 392 | struct distr_item *item; |
377 | u32 node = msg_orignode(msg); | 393 | uint count; |
378 | 394 | u32 node; | |
379 | spin_lock_bh(&tipc_nametbl_lock); | 395 | struct sk_buff *skb; |
380 | while (count--) { | 396 | int mtype; |
381 | if (!tipc_update_nametbl(item, node, msg_type(msg))) | 397 | |
382 | tipc_named_add_backlog(item, msg_type(msg), node); | 398 | spin_lock_bh(&tn->nametbl_lock); |
383 | item++; | 399 | for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { |
400 | msg = buf_msg(skb); | ||
401 | mtype = msg_type(msg); | ||
402 | item = (struct distr_item *)msg_data(msg); | ||
403 | count = msg_data_sz(msg) / ITEM_SIZE; | ||
404 | node = msg_orignode(msg); | ||
405 | while (count--) { | ||
406 | if (!tipc_update_nametbl(net, item, node, mtype)) | ||
407 | tipc_named_add_backlog(item, mtype, node); | ||
408 | item++; | ||
409 | } | ||
410 | kfree_skb(skb); | ||
411 | tipc_named_process_backlog(net); | ||
384 | } | 412 | } |
385 | tipc_named_process_backlog(); | 413 | spin_unlock_bh(&tn->nametbl_lock); |
386 | spin_unlock_bh(&tipc_nametbl_lock); | ||
387 | kfree_skb(buf); | ||
388 | } | 414 | } |
389 | 415 | ||
390 | /** | 416 | /** |
@@ -394,17 +420,18 @@ void tipc_named_rcv(struct sk_buff *buf) | |||
394 | * All name table entries published by this node are updated to reflect | 420 | * All name table entries published by this node are updated to reflect |
395 | * the node's new network address. | 421 | * the node's new network address. |
396 | */ | 422 | */ |
397 | void tipc_named_reinit(void) | 423 | void tipc_named_reinit(struct net *net) |
398 | { | 424 | { |
425 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
399 | struct publication *publ; | 426 | struct publication *publ; |
400 | int scope; | 427 | int scope; |
401 | 428 | ||
402 | spin_lock_bh(&tipc_nametbl_lock); | 429 | spin_lock_bh(&tn->nametbl_lock); |
403 | 430 | ||
404 | for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) | 431 | for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) |
405 | list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope], | 432 | list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope], |
406 | local_list) | 433 | local_list) |
407 | publ->node = tipc_own_addr; | 434 | publ->node = tn->own_addr; |
408 | 435 | ||
409 | spin_unlock_bh(&tipc_nametbl_lock); | 436 | spin_unlock_bh(&tn->nametbl_lock); |
410 | } | 437 | } |
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index cef55cedcfb2..dd2d9fd80da2 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h | |||
@@ -67,13 +67,13 @@ struct distr_item { | |||
67 | __be32 key; | 67 | __be32 key; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct sk_buff *tipc_named_publish(struct publication *publ); | 70 | struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); |
71 | struct sk_buff *tipc_named_withdraw(struct publication *publ); | 71 | struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); |
72 | void named_cluster_distribute(struct sk_buff *buf); | 72 | void named_cluster_distribute(struct net *net, struct sk_buff *buf); |
73 | void tipc_named_node_up(u32 dnode); | 73 | void tipc_named_node_up(struct net *net, u32 dnode); |
74 | void tipc_named_rcv(struct sk_buff *buf); | 74 | void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); |
75 | void tipc_named_reinit(void); | 75 | void tipc_named_reinit(struct net *net); |
76 | void tipc_named_process_backlog(void); | 76 | void tipc_named_process_backlog(struct net *net); |
77 | void tipc_publ_notify(struct list_head *nsub_list, u32 addr); | 77 | void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); |
78 | 78 | ||
79 | #endif | 79 | #endif |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index c8df0223371a..ab0ac62a1287 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/name_table.c: TIPC name table code | 2 | * net/tipc/name_table.c: TIPC name table code |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2004-2008, 2010-2014, Wind River Systems | 5 | * Copyright (c) 2004-2008, 2010-2014, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -34,11 +34,15 @@ | |||
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <net/sock.h> | ||
37 | #include "core.h" | 38 | #include "core.h" |
38 | #include "config.h" | 39 | #include "netlink.h" |
39 | #include "name_table.h" | 40 | #include "name_table.h" |
40 | #include "name_distr.h" | 41 | #include "name_distr.h" |
41 | #include "subscr.h" | 42 | #include "subscr.h" |
43 | #include "bcast.h" | ||
44 | #include "addr.h" | ||
45 | #include <net/genetlink.h> | ||
42 | 46 | ||
43 | #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ | 47 | #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ |
44 | 48 | ||
@@ -105,9 +109,6 @@ struct name_seq { | |||
105 | struct rcu_head rcu; | 109 | struct rcu_head rcu; |
106 | }; | 110 | }; |
107 | 111 | ||
108 | struct name_table *tipc_nametbl; | ||
109 | DEFINE_SPINLOCK(tipc_nametbl_lock); | ||
110 | |||
111 | static int hash(int x) | 112 | static int hash(int x) |
112 | { | 113 | { |
113 | return x & (TIPC_NAMETBL_SIZE - 1); | 114 | return x & (TIPC_NAMETBL_SIZE - 1); |
@@ -228,9 +229,11 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) | |||
228 | /** | 229 | /** |
229 | * tipc_nameseq_insert_publ | 230 | * tipc_nameseq_insert_publ |
230 | */ | 231 | */ |
231 | static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, | 232 | static struct publication *tipc_nameseq_insert_publ(struct net *net, |
232 | u32 type, u32 lower, u32 upper, | 233 | struct name_seq *nseq, |
233 | u32 scope, u32 node, u32 port, u32 key) | 234 | u32 type, u32 lower, |
235 | u32 upper, u32 scope, | ||
236 | u32 node, u32 port, u32 key) | ||
234 | { | 237 | { |
235 | struct tipc_subscription *s; | 238 | struct tipc_subscription *s; |
236 | struct tipc_subscription *st; | 239 | struct tipc_subscription *st; |
@@ -315,12 +318,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, | |||
315 | list_add(&publ->zone_list, &info->zone_list); | 318 | list_add(&publ->zone_list, &info->zone_list); |
316 | info->zone_list_size++; | 319 | info->zone_list_size++; |
317 | 320 | ||
318 | if (in_own_cluster(node)) { | 321 | if (in_own_cluster(net, node)) { |
319 | list_add(&publ->cluster_list, &info->cluster_list); | 322 | list_add(&publ->cluster_list, &info->cluster_list); |
320 | info->cluster_list_size++; | 323 | info->cluster_list_size++; |
321 | } | 324 | } |
322 | 325 | ||
323 | if (in_own_node(node)) { | 326 | if (in_own_node(net, node)) { |
324 | list_add(&publ->node_list, &info->node_list); | 327 | list_add(&publ->node_list, &info->node_list); |
325 | info->node_list_size++; | 328 | info->node_list_size++; |
326 | } | 329 | } |
@@ -349,8 +352,10 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, | |||
349 | * A failed withdraw request simply returns a failure indication and lets the | 352 | * A failed withdraw request simply returns a failure indication and lets the |
350 | * caller issue any error or warning messages associated with such a problem. | 353 | * caller issue any error or warning messages associated with such a problem. |
351 | */ | 354 | */ |
352 | static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, | 355 | static struct publication *tipc_nameseq_remove_publ(struct net *net, |
353 | u32 node, u32 ref, u32 key) | 356 | struct name_seq *nseq, |
357 | u32 inst, u32 node, | ||
358 | u32 ref, u32 key) | ||
354 | { | 359 | { |
355 | struct publication *publ; | 360 | struct publication *publ; |
356 | struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); | 361 | struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); |
@@ -378,13 +383,13 @@ found: | |||
378 | info->zone_list_size--; | 383 | info->zone_list_size--; |
379 | 384 | ||
380 | /* Remove publication from cluster scope list, if present */ | 385 | /* Remove publication from cluster scope list, if present */ |
381 | if (in_own_cluster(node)) { | 386 | if (in_own_cluster(net, node)) { |
382 | list_del(&publ->cluster_list); | 387 | list_del(&publ->cluster_list); |
383 | info->cluster_list_size--; | 388 | info->cluster_list_size--; |
384 | } | 389 | } |
385 | 390 | ||
386 | /* Remove publication from node scope list, if present */ | 391 | /* Remove publication from node scope list, if present */ |
387 | if (in_own_node(node)) { | 392 | if (in_own_node(net, node)) { |
388 | list_del(&publ->node_list); | 393 | list_del(&publ->node_list); |
389 | info->node_list_size--; | 394 | info->node_list_size--; |
390 | } | 395 | } |
@@ -447,12 +452,13 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, | |||
447 | } | 452 | } |
448 | } | 453 | } |
449 | 454 | ||
450 | static struct name_seq *nametbl_find_seq(u32 type) | 455 | static struct name_seq *nametbl_find_seq(struct net *net, u32 type) |
451 | { | 456 | { |
457 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
452 | struct hlist_head *seq_head; | 458 | struct hlist_head *seq_head; |
453 | struct name_seq *ns; | 459 | struct name_seq *ns; |
454 | 460 | ||
455 | seq_head = &tipc_nametbl->seq_hlist[hash(type)]; | 461 | seq_head = &tn->nametbl->seq_hlist[hash(type)]; |
456 | hlist_for_each_entry_rcu(ns, seq_head, ns_list) { | 462 | hlist_for_each_entry_rcu(ns, seq_head, ns_list) { |
457 | if (ns->type == type) | 463 | if (ns->type == type) |
458 | return ns; | 464 | return ns; |
@@ -461,11 +467,13 @@ static struct name_seq *nametbl_find_seq(u32 type) | |||
461 | return NULL; | 467 | return NULL; |
462 | }; | 468 | }; |
463 | 469 | ||
464 | struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, | 470 | struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, |
465 | u32 scope, u32 node, u32 port, u32 key) | 471 | u32 lower, u32 upper, u32 scope, |
472 | u32 node, u32 port, u32 key) | ||
466 | { | 473 | { |
474 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
467 | struct publication *publ; | 475 | struct publication *publ; |
468 | struct name_seq *seq = nametbl_find_seq(type); | 476 | struct name_seq *seq = nametbl_find_seq(net, type); |
469 | int index = hash(type); | 477 | int index = hash(type); |
470 | 478 | ||
471 | if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || | 479 | if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || |
@@ -476,29 +484,29 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, | |||
476 | } | 484 | } |
477 | 485 | ||
478 | if (!seq) | 486 | if (!seq) |
479 | seq = tipc_nameseq_create(type, | 487 | seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]); |
480 | &tipc_nametbl->seq_hlist[index]); | ||
481 | if (!seq) | 488 | if (!seq) |
482 | return NULL; | 489 | return NULL; |
483 | 490 | ||
484 | spin_lock_bh(&seq->lock); | 491 | spin_lock_bh(&seq->lock); |
485 | publ = tipc_nameseq_insert_publ(seq, type, lower, upper, | 492 | publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper, |
486 | scope, node, port, key); | 493 | scope, node, port, key); |
487 | spin_unlock_bh(&seq->lock); | 494 | spin_unlock_bh(&seq->lock); |
488 | return publ; | 495 | return publ; |
489 | } | 496 | } |
490 | 497 | ||
491 | struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, | 498 | struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, |
492 | u32 node, u32 ref, u32 key) | 499 | u32 lower, u32 node, u32 ref, |
500 | u32 key) | ||
493 | { | 501 | { |
494 | struct publication *publ; | 502 | struct publication *publ; |
495 | struct name_seq *seq = nametbl_find_seq(type); | 503 | struct name_seq *seq = nametbl_find_seq(net, type); |
496 | 504 | ||
497 | if (!seq) | 505 | if (!seq) |
498 | return NULL; | 506 | return NULL; |
499 | 507 | ||
500 | spin_lock_bh(&seq->lock); | 508 | spin_lock_bh(&seq->lock); |
501 | publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); | 509 | publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key); |
502 | if (!seq->first_free && list_empty(&seq->subscriptions)) { | 510 | if (!seq->first_free && list_empty(&seq->subscriptions)) { |
503 | hlist_del_init_rcu(&seq->ns_list); | 511 | hlist_del_init_rcu(&seq->ns_list); |
504 | kfree(seq->sseqs); | 512 | kfree(seq->sseqs); |
@@ -523,8 +531,10 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, | |||
523 | * - if name translation is attempted and fails, sets 'destnode' to 0 | 531 | * - if name translation is attempted and fails, sets 'destnode' to 0 |
524 | * and returns 0 | 532 | * and returns 0 |
525 | */ | 533 | */ |
526 | u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) | 534 | u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, |
535 | u32 *destnode) | ||
527 | { | 536 | { |
537 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
528 | struct sub_seq *sseq; | 538 | struct sub_seq *sseq; |
529 | struct name_info *info; | 539 | struct name_info *info; |
530 | struct publication *publ; | 540 | struct publication *publ; |
@@ -532,11 +542,11 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) | |||
532 | u32 ref = 0; | 542 | u32 ref = 0; |
533 | u32 node = 0; | 543 | u32 node = 0; |
534 | 544 | ||
535 | if (!tipc_in_scope(*destnode, tipc_own_addr)) | 545 | if (!tipc_in_scope(*destnode, tn->own_addr)) |
536 | return 0; | 546 | return 0; |
537 | 547 | ||
538 | rcu_read_lock(); | 548 | rcu_read_lock(); |
539 | seq = nametbl_find_seq(type); | 549 | seq = nametbl_find_seq(net, type); |
540 | if (unlikely(!seq)) | 550 | if (unlikely(!seq)) |
541 | goto not_found; | 551 | goto not_found; |
542 | spin_lock_bh(&seq->lock); | 552 | spin_lock_bh(&seq->lock); |
@@ -569,13 +579,13 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) | |||
569 | } | 579 | } |
570 | 580 | ||
571 | /* Round-Robin Algorithm */ | 581 | /* Round-Robin Algorithm */ |
572 | else if (*destnode == tipc_own_addr) { | 582 | else if (*destnode == tn->own_addr) { |
573 | if (list_empty(&info->node_list)) | 583 | if (list_empty(&info->node_list)) |
574 | goto no_match; | 584 | goto no_match; |
575 | publ = list_first_entry(&info->node_list, struct publication, | 585 | publ = list_first_entry(&info->node_list, struct publication, |
576 | node_list); | 586 | node_list); |
577 | list_move_tail(&publ->node_list, &info->node_list); | 587 | list_move_tail(&publ->node_list, &info->node_list); |
578 | } else if (in_own_cluster_exact(*destnode)) { | 588 | } else if (in_own_cluster_exact(net, *destnode)) { |
579 | if (list_empty(&info->cluster_list)) | 589 | if (list_empty(&info->cluster_list)) |
580 | goto no_match; | 590 | goto no_match; |
581 | publ = list_first_entry(&info->cluster_list, struct publication, | 591 | publ = list_first_entry(&info->cluster_list, struct publication, |
@@ -609,8 +619,8 @@ not_found: | |||
609 | * | 619 | * |
610 | * Returns non-zero if any off-node ports overlap | 620 | * Returns non-zero if any off-node ports overlap |
611 | */ | 621 | */ |
612 | int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, | 622 | int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, |
613 | struct tipc_port_list *dports) | 623 | u32 limit, struct tipc_plist *dports) |
614 | { | 624 | { |
615 | struct name_seq *seq; | 625 | struct name_seq *seq; |
616 | struct sub_seq *sseq; | 626 | struct sub_seq *sseq; |
@@ -619,7 +629,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, | |||
619 | int res = 0; | 629 | int res = 0; |
620 | 630 | ||
621 | rcu_read_lock(); | 631 | rcu_read_lock(); |
622 | seq = nametbl_find_seq(type); | 632 | seq = nametbl_find_seq(net, type); |
623 | if (!seq) | 633 | if (!seq) |
624 | goto exit; | 634 | goto exit; |
625 | 635 | ||
@@ -635,7 +645,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, | |||
635 | info = sseq->info; | 645 | info = sseq->info; |
636 | list_for_each_entry(publ, &info->node_list, node_list) { | 646 | list_for_each_entry(publ, &info->node_list, node_list) { |
637 | if (publ->scope <= limit) | 647 | if (publ->scope <= limit) |
638 | tipc_port_list_add(dports, publ->ref); | 648 | tipc_plist_push(dports, publ->ref); |
639 | } | 649 | } |
640 | 650 | ||
641 | if (info->cluster_list_size != info->node_list_size) | 651 | if (info->cluster_list_size != info->node_list_size) |
@@ -650,50 +660,55 @@ exit: | |||
650 | /* | 660 | /* |
651 | * tipc_nametbl_publish - add name publication to network name tables | 661 | * tipc_nametbl_publish - add name publication to network name tables |
652 | */ | 662 | */ |
653 | struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, | 663 | struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, |
654 | u32 scope, u32 port_ref, u32 key) | 664 | u32 upper, u32 scope, u32 port_ref, |
665 | u32 key) | ||
655 | { | 666 | { |
656 | struct publication *publ; | 667 | struct publication *publ; |
657 | struct sk_buff *buf = NULL; | 668 | struct sk_buff *buf = NULL; |
669 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
658 | 670 | ||
659 | spin_lock_bh(&tipc_nametbl_lock); | 671 | spin_lock_bh(&tn->nametbl_lock); |
660 | if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) { | 672 | if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) { |
661 | pr_warn("Publication failed, local publication limit reached (%u)\n", | 673 | pr_warn("Publication failed, local publication limit reached (%u)\n", |
662 | TIPC_MAX_PUBLICATIONS); | 674 | TIPC_MAX_PUBLICATIONS); |
663 | spin_unlock_bh(&tipc_nametbl_lock); | 675 | spin_unlock_bh(&tn->nametbl_lock); |
664 | return NULL; | 676 | return NULL; |
665 | } | 677 | } |
666 | 678 | ||
667 | publ = tipc_nametbl_insert_publ(type, lower, upper, scope, | 679 | publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope, |
668 | tipc_own_addr, port_ref, key); | 680 | tn->own_addr, port_ref, key); |
669 | if (likely(publ)) { | 681 | if (likely(publ)) { |
670 | tipc_nametbl->local_publ_count++; | 682 | tn->nametbl->local_publ_count++; |
671 | buf = tipc_named_publish(publ); | 683 | buf = tipc_named_publish(net, publ); |
672 | /* Any pending external events? */ | 684 | /* Any pending external events? */ |
673 | tipc_named_process_backlog(); | 685 | tipc_named_process_backlog(net); |
674 | } | 686 | } |
675 | spin_unlock_bh(&tipc_nametbl_lock); | 687 | spin_unlock_bh(&tn->nametbl_lock); |
676 | 688 | ||
677 | if (buf) | 689 | if (buf) |
678 | named_cluster_distribute(buf); | 690 | named_cluster_distribute(net, buf); |
679 | return publ; | 691 | return publ; |
680 | } | 692 | } |
681 | 693 | ||
682 | /** | 694 | /** |
683 | * tipc_nametbl_withdraw - withdraw name publication from network name tables | 695 | * tipc_nametbl_withdraw - withdraw name publication from network name tables |
684 | */ | 696 | */ |
685 | int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) | 697 | int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, |
698 | u32 key) | ||
686 | { | 699 | { |
687 | struct publication *publ; | 700 | struct publication *publ; |
688 | struct sk_buff *skb = NULL; | 701 | struct sk_buff *skb = NULL; |
702 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
689 | 703 | ||
690 | spin_lock_bh(&tipc_nametbl_lock); | 704 | spin_lock_bh(&tn->nametbl_lock); |
691 | publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); | 705 | publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr, |
706 | ref, key); | ||
692 | if (likely(publ)) { | 707 | if (likely(publ)) { |
693 | tipc_nametbl->local_publ_count--; | 708 | tn->nametbl->local_publ_count--; |
694 | skb = tipc_named_withdraw(publ); | 709 | skb = tipc_named_withdraw(net, publ); |
695 | /* Any pending external events? */ | 710 | /* Any pending external events? */ |
696 | tipc_named_process_backlog(); | 711 | tipc_named_process_backlog(net); |
697 | list_del_init(&publ->pport_list); | 712 | list_del_init(&publ->pport_list); |
698 | kfree_rcu(publ, rcu); | 713 | kfree_rcu(publ, rcu); |
699 | } else { | 714 | } else { |
@@ -701,10 +716,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) | |||
701 | "(type=%u, lower=%u, ref=%u, key=%u)\n", | 716 | "(type=%u, lower=%u, ref=%u, key=%u)\n", |
702 | type, lower, ref, key); | 717 | type, lower, ref, key); |
703 | } | 718 | } |
704 | spin_unlock_bh(&tipc_nametbl_lock); | 719 | spin_unlock_bh(&tn->nametbl_lock); |
705 | 720 | ||
706 | if (skb) { | 721 | if (skb) { |
707 | named_cluster_distribute(skb); | 722 | named_cluster_distribute(net, skb); |
708 | return 1; | 723 | return 1; |
709 | } | 724 | } |
710 | return 0; | 725 | return 0; |
@@ -715,15 +730,15 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) | |||
715 | */ | 730 | */ |
716 | void tipc_nametbl_subscribe(struct tipc_subscription *s) | 731 | void tipc_nametbl_subscribe(struct tipc_subscription *s) |
717 | { | 732 | { |
733 | struct tipc_net *tn = net_generic(s->net, tipc_net_id); | ||
718 | u32 type = s->seq.type; | 734 | u32 type = s->seq.type; |
719 | int index = hash(type); | 735 | int index = hash(type); |
720 | struct name_seq *seq; | 736 | struct name_seq *seq; |
721 | 737 | ||
722 | spin_lock_bh(&tipc_nametbl_lock); | 738 | spin_lock_bh(&tn->nametbl_lock); |
723 | seq = nametbl_find_seq(type); | 739 | seq = nametbl_find_seq(s->net, type); |
724 | if (!seq) | 740 | if (!seq) |
725 | seq = tipc_nameseq_create(type, | 741 | seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]); |
726 | &tipc_nametbl->seq_hlist[index]); | ||
727 | if (seq) { | 742 | if (seq) { |
728 | spin_lock_bh(&seq->lock); | 743 | spin_lock_bh(&seq->lock); |
729 | tipc_nameseq_subscribe(seq, s); | 744 | tipc_nameseq_subscribe(seq, s); |
@@ -732,7 +747,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) | |||
732 | pr_warn("Failed to create subscription for {%u,%u,%u}\n", | 747 | pr_warn("Failed to create subscription for {%u,%u,%u}\n", |
733 | s->seq.type, s->seq.lower, s->seq.upper); | 748 | s->seq.type, s->seq.lower, s->seq.upper); |
734 | } | 749 | } |
735 | spin_unlock_bh(&tipc_nametbl_lock); | 750 | spin_unlock_bh(&tn->nametbl_lock); |
736 | } | 751 | } |
737 | 752 | ||
738 | /** | 753 | /** |
@@ -740,10 +755,11 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) | |||
740 | */ | 755 | */ |
741 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s) | 756 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s) |
742 | { | 757 | { |
758 | struct tipc_net *tn = net_generic(s->net, tipc_net_id); | ||
743 | struct name_seq *seq; | 759 | struct name_seq *seq; |
744 | 760 | ||
745 | spin_lock_bh(&tipc_nametbl_lock); | 761 | spin_lock_bh(&tn->nametbl_lock); |
746 | seq = nametbl_find_seq(s->seq.type); | 762 | seq = nametbl_find_seq(s->net, s->seq.type); |
747 | if (seq != NULL) { | 763 | if (seq != NULL) { |
748 | spin_lock_bh(&seq->lock); | 764 | spin_lock_bh(&seq->lock); |
749 | list_del_init(&s->nameseq_list); | 765 | list_del_init(&s->nameseq_list); |
@@ -756,193 +772,13 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) | |||
756 | spin_unlock_bh(&seq->lock); | 772 | spin_unlock_bh(&seq->lock); |
757 | } | 773 | } |
758 | } | 774 | } |
759 | spin_unlock_bh(&tipc_nametbl_lock); | 775 | spin_unlock_bh(&tn->nametbl_lock); |
760 | } | ||
761 | |||
762 | /** | ||
763 | * subseq_list - print specified sub-sequence contents into the given buffer | ||
764 | */ | ||
765 | static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth, | ||
766 | u32 index) | ||
767 | { | ||
768 | char portIdStr[27]; | ||
769 | const char *scope_str[] = {"", " zone", " cluster", " node"}; | ||
770 | struct publication *publ; | ||
771 | struct name_info *info; | ||
772 | int ret; | ||
773 | |||
774 | ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper); | ||
775 | |||
776 | if (depth == 2) { | ||
777 | ret += tipc_snprintf(buf - ret, len + ret, "\n"); | ||
778 | return ret; | ||
779 | } | ||
780 | |||
781 | info = sseq->info; | ||
782 | |||
783 | list_for_each_entry(publ, &info->zone_list, zone_list) { | ||
784 | sprintf(portIdStr, "<%u.%u.%u:%u>", | ||
785 | tipc_zone(publ->node), tipc_cluster(publ->node), | ||
786 | tipc_node(publ->node), publ->ref); | ||
787 | ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr); | ||
788 | if (depth > 3) { | ||
789 | ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s", | ||
790 | publ->key, scope_str[publ->scope]); | ||
791 | } | ||
792 | if (!list_is_last(&publ->zone_list, &info->zone_list)) | ||
793 | ret += tipc_snprintf(buf + ret, len - ret, | ||
794 | "\n%33s", " "); | ||
795 | } | ||
796 | |||
797 | ret += tipc_snprintf(buf + ret, len - ret, "\n"); | ||
798 | return ret; | ||
799 | } | ||
800 | |||
801 | /** | ||
802 | * nameseq_list - print specified name sequence contents into the given buffer | ||
803 | */ | ||
804 | static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth, | ||
805 | u32 type, u32 lowbound, u32 upbound, u32 index) | ||
806 | { | ||
807 | struct sub_seq *sseq; | ||
808 | char typearea[11]; | ||
809 | int ret = 0; | ||
810 | |||
811 | if (seq->first_free == 0) | ||
812 | return 0; | ||
813 | |||
814 | sprintf(typearea, "%-10u", seq->type); | ||
815 | |||
816 | if (depth == 1) { | ||
817 | ret += tipc_snprintf(buf, len, "%s\n", typearea); | ||
818 | return ret; | ||
819 | } | ||
820 | |||
821 | for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { | ||
822 | if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { | ||
823 | ret += tipc_snprintf(buf + ret, len - ret, "%s ", | ||
824 | typearea); | ||
825 | spin_lock_bh(&seq->lock); | ||
826 | ret += subseq_list(sseq, buf + ret, len - ret, | ||
827 | depth, index); | ||
828 | spin_unlock_bh(&seq->lock); | ||
829 | sprintf(typearea, "%10s", " "); | ||
830 | } | ||
831 | } | ||
832 | return ret; | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * nametbl_header - print name table header into the given buffer | ||
837 | */ | ||
838 | static int nametbl_header(char *buf, int len, u32 depth) | ||
839 | { | ||
840 | const char *header[] = { | ||
841 | "Type ", | ||
842 | "Lower Upper ", | ||
843 | "Port Identity ", | ||
844 | "Publication Scope" | ||
845 | }; | ||
846 | |||
847 | int i; | ||
848 | int ret = 0; | ||
849 | |||
850 | if (depth > 4) | ||
851 | depth = 4; | ||
852 | for (i = 0; i < depth; i++) | ||
853 | ret += tipc_snprintf(buf + ret, len - ret, header[i]); | ||
854 | ret += tipc_snprintf(buf + ret, len - ret, "\n"); | ||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | /** | ||
859 | * nametbl_list - print specified name table contents into the given buffer | ||
860 | */ | ||
861 | static int nametbl_list(char *buf, int len, u32 depth_info, | ||
862 | u32 type, u32 lowbound, u32 upbound) | ||
863 | { | ||
864 | struct hlist_head *seq_head; | ||
865 | struct name_seq *seq; | ||
866 | int all_types; | ||
867 | int ret = 0; | ||
868 | u32 depth; | ||
869 | u32 i; | ||
870 | |||
871 | all_types = (depth_info & TIPC_NTQ_ALLTYPES); | ||
872 | depth = (depth_info & ~TIPC_NTQ_ALLTYPES); | ||
873 | |||
874 | if (depth == 0) | ||
875 | return 0; | ||
876 | |||
877 | if (all_types) { | ||
878 | /* display all entries in name table to specified depth */ | ||
879 | ret += nametbl_header(buf, len, depth); | ||
880 | lowbound = 0; | ||
881 | upbound = ~0; | ||
882 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | ||
883 | seq_head = &tipc_nametbl->seq_hlist[i]; | ||
884 | hlist_for_each_entry_rcu(seq, seq_head, ns_list) { | ||
885 | ret += nameseq_list(seq, buf + ret, len - ret, | ||
886 | depth, seq->type, | ||
887 | lowbound, upbound, i); | ||
888 | } | ||
889 | } | ||
890 | } else { | ||
891 | /* display only the sequence that matches the specified type */ | ||
892 | if (upbound < lowbound) { | ||
893 | ret += tipc_snprintf(buf + ret, len - ret, | ||
894 | "invalid name sequence specified\n"); | ||
895 | return ret; | ||
896 | } | ||
897 | ret += nametbl_header(buf + ret, len - ret, depth); | ||
898 | i = hash(type); | ||
899 | seq_head = &tipc_nametbl->seq_hlist[i]; | ||
900 | hlist_for_each_entry_rcu(seq, seq_head, ns_list) { | ||
901 | if (seq->type == type) { | ||
902 | ret += nameseq_list(seq, buf + ret, len - ret, | ||
903 | depth, type, | ||
904 | lowbound, upbound, i); | ||
905 | break; | ||
906 | } | ||
907 | } | ||
908 | } | ||
909 | return ret; | ||
910 | } | 776 | } |
911 | 777 | ||
912 | struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) | 778 | int tipc_nametbl_init(struct net *net) |
913 | { | ||
914 | struct sk_buff *buf; | ||
915 | struct tipc_name_table_query *argv; | ||
916 | struct tlv_desc *rep_tlv; | ||
917 | char *pb; | ||
918 | int pb_len; | ||
919 | int str_len; | ||
920 | |||
921 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY)) | ||
922 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
923 | |||
924 | buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); | ||
925 | if (!buf) | ||
926 | return NULL; | ||
927 | |||
928 | rep_tlv = (struct tlv_desc *)buf->data; | ||
929 | pb = TLV_DATA(rep_tlv); | ||
930 | pb_len = ULTRA_STRING_MAX_LEN; | ||
931 | argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); | ||
932 | rcu_read_lock(); | ||
933 | str_len = nametbl_list(pb, pb_len, ntohl(argv->depth), | ||
934 | ntohl(argv->type), | ||
935 | ntohl(argv->lowbound), ntohl(argv->upbound)); | ||
936 | rcu_read_unlock(); | ||
937 | str_len += 1; /* for "\0" */ | ||
938 | skb_put(buf, TLV_SPACE(str_len)); | ||
939 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | ||
940 | |||
941 | return buf; | ||
942 | } | ||
943 | |||
944 | int tipc_nametbl_init(void) | ||
945 | { | 779 | { |
780 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
781 | struct name_table *tipc_nametbl; | ||
946 | int i; | 782 | int i; |
947 | 783 | ||
948 | tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC); | 784 | tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC); |
@@ -955,6 +791,8 @@ int tipc_nametbl_init(void) | |||
955 | INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); | 791 | INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); |
956 | INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); | 792 | INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); |
957 | INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]); | 793 | INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]); |
794 | tn->nametbl = tipc_nametbl; | ||
795 | spin_lock_init(&tn->nametbl_lock); | ||
958 | return 0; | 796 | return 0; |
959 | } | 797 | } |
960 | 798 | ||
@@ -963,7 +801,7 @@ int tipc_nametbl_init(void) | |||
963 | * | 801 | * |
964 | * tipc_nametbl_lock must be held when calling this function | 802 | * tipc_nametbl_lock must be held when calling this function |
965 | */ | 803 | */ |
966 | static void tipc_purge_publications(struct name_seq *seq) | 804 | static void tipc_purge_publications(struct net *net, struct name_seq *seq) |
967 | { | 805 | { |
968 | struct publication *publ, *safe; | 806 | struct publication *publ, *safe; |
969 | struct sub_seq *sseq; | 807 | struct sub_seq *sseq; |
@@ -973,7 +811,7 @@ static void tipc_purge_publications(struct name_seq *seq) | |||
973 | sseq = seq->sseqs; | 811 | sseq = seq->sseqs; |
974 | info = sseq->info; | 812 | info = sseq->info; |
975 | list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { | 813 | list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { |
976 | tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, | 814 | tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node, |
977 | publ->ref, publ->key); | 815 | publ->ref, publ->key); |
978 | kfree_rcu(publ, rcu); | 816 | kfree_rcu(publ, rcu); |
979 | } | 817 | } |
@@ -984,25 +822,27 @@ static void tipc_purge_publications(struct name_seq *seq) | |||
984 | kfree_rcu(seq, rcu); | 822 | kfree_rcu(seq, rcu); |
985 | } | 823 | } |
986 | 824 | ||
987 | void tipc_nametbl_stop(void) | 825 | void tipc_nametbl_stop(struct net *net) |
988 | { | 826 | { |
989 | u32 i; | 827 | u32 i; |
990 | struct name_seq *seq; | 828 | struct name_seq *seq; |
991 | struct hlist_head *seq_head; | 829 | struct hlist_head *seq_head; |
830 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
831 | struct name_table *tipc_nametbl = tn->nametbl; | ||
992 | 832 | ||
993 | /* Verify name table is empty and purge any lingering | 833 | /* Verify name table is empty and purge any lingering |
994 | * publications, then release the name table | 834 | * publications, then release the name table |
995 | */ | 835 | */ |
996 | spin_lock_bh(&tipc_nametbl_lock); | 836 | spin_lock_bh(&tn->nametbl_lock); |
997 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 837 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
998 | if (hlist_empty(&tipc_nametbl->seq_hlist[i])) | 838 | if (hlist_empty(&tipc_nametbl->seq_hlist[i])) |
999 | continue; | 839 | continue; |
1000 | seq_head = &tipc_nametbl->seq_hlist[i]; | 840 | seq_head = &tipc_nametbl->seq_hlist[i]; |
1001 | hlist_for_each_entry_rcu(seq, seq_head, ns_list) { | 841 | hlist_for_each_entry_rcu(seq, seq_head, ns_list) { |
1002 | tipc_purge_publications(seq); | 842 | tipc_purge_publications(net, seq); |
1003 | } | 843 | } |
1004 | } | 844 | } |
1005 | spin_unlock_bh(&tipc_nametbl_lock); | 845 | spin_unlock_bh(&tn->nametbl_lock); |
1006 | 846 | ||
1007 | synchronize_net(); | 847 | synchronize_net(); |
1008 | kfree(tipc_nametbl); | 848 | kfree(tipc_nametbl); |
@@ -1033,7 +873,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, | |||
1033 | *last_publ = p->key; | 873 | *last_publ = p->key; |
1034 | 874 | ||
1035 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, | 875 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, |
1036 | &tipc_genl_v2_family, NLM_F_MULTI, | 876 | &tipc_genl_family, NLM_F_MULTI, |
1037 | TIPC_NL_NAME_TABLE_GET); | 877 | TIPC_NL_NAME_TABLE_GET); |
1038 | if (!hdr) | 878 | if (!hdr) |
1039 | return -EMSGSIZE; | 879 | return -EMSGSIZE; |
@@ -1106,9 +946,10 @@ static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq, | |||
1106 | return 0; | 946 | return 0; |
1107 | } | 947 | } |
1108 | 948 | ||
1109 | static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type, | 949 | static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg, |
1110 | u32 *last_lower, u32 *last_publ) | 950 | u32 *last_type, u32 *last_lower, u32 *last_publ) |
1111 | { | 951 | { |
952 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
1112 | struct hlist_head *seq_head; | 953 | struct hlist_head *seq_head; |
1113 | struct name_seq *seq = NULL; | 954 | struct name_seq *seq = NULL; |
1114 | int err; | 955 | int err; |
@@ -1120,10 +961,10 @@ static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type, | |||
1120 | i = 0; | 961 | i = 0; |
1121 | 962 | ||
1122 | for (; i < TIPC_NAMETBL_SIZE; i++) { | 963 | for (; i < TIPC_NAMETBL_SIZE; i++) { |
1123 | seq_head = &tipc_nametbl->seq_hlist[i]; | 964 | seq_head = &tn->nametbl->seq_hlist[i]; |
1124 | 965 | ||
1125 | if (*last_type) { | 966 | if (*last_type) { |
1126 | seq = nametbl_find_seq(*last_type); | 967 | seq = nametbl_find_seq(net, *last_type); |
1127 | if (!seq) | 968 | if (!seq) |
1128 | return -EPIPE; | 969 | return -EPIPE; |
1129 | } else { | 970 | } else { |
@@ -1157,6 +998,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1157 | u32 last_type = cb->args[0]; | 998 | u32 last_type = cb->args[0]; |
1158 | u32 last_lower = cb->args[1]; | 999 | u32 last_lower = cb->args[1]; |
1159 | u32 last_publ = cb->args[2]; | 1000 | u32 last_publ = cb->args[2]; |
1001 | struct net *net = sock_net(skb->sk); | ||
1160 | struct tipc_nl_msg msg; | 1002 | struct tipc_nl_msg msg; |
1161 | 1003 | ||
1162 | if (done) | 1004 | if (done) |
@@ -1167,7 +1009,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1167 | msg.seq = cb->nlh->nlmsg_seq; | 1009 | msg.seq = cb->nlh->nlmsg_seq; |
1168 | 1010 | ||
1169 | rcu_read_lock(); | 1011 | rcu_read_lock(); |
1170 | err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ); | 1012 | err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ); |
1171 | if (!err) { | 1013 | if (!err) { |
1172 | done = 1; | 1014 | done = 1; |
1173 | } else if (err != -EMSGSIZE) { | 1015 | } else if (err != -EMSGSIZE) { |
@@ -1188,3 +1030,41 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1188 | 1030 | ||
1189 | return skb->len; | 1031 | return skb->len; |
1190 | } | 1032 | } |
1033 | |||
1034 | void tipc_plist_push(struct tipc_plist *pl, u32 port) | ||
1035 | { | ||
1036 | struct tipc_plist *nl; | ||
1037 | |||
1038 | if (likely(!pl->port)) { | ||
1039 | pl->port = port; | ||
1040 | return; | ||
1041 | } | ||
1042 | if (pl->port == port) | ||
1043 | return; | ||
1044 | list_for_each_entry(nl, &pl->list, list) { | ||
1045 | if (nl->port == port) | ||
1046 | return; | ||
1047 | } | ||
1048 | nl = kmalloc(sizeof(*nl), GFP_ATOMIC); | ||
1049 | if (nl) { | ||
1050 | nl->port = port; | ||
1051 | list_add(&nl->list, &pl->list); | ||
1052 | } | ||
1053 | } | ||
1054 | |||
1055 | u32 tipc_plist_pop(struct tipc_plist *pl) | ||
1056 | { | ||
1057 | struct tipc_plist *nl; | ||
1058 | u32 port = 0; | ||
1059 | |||
1060 | if (likely(list_empty(&pl->list))) { | ||
1061 | port = pl->port; | ||
1062 | pl->port = 0; | ||
1063 | return port; | ||
1064 | } | ||
1065 | nl = list_first_entry(&pl->list, typeof(*nl), list); | ||
1066 | port = nl->port; | ||
1067 | list_del(&nl->list); | ||
1068 | kfree(nl); | ||
1069 | return port; | ||
1070 | } | ||
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 5f0dee92010d..1524a73830f7 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/name_table.h: Include file for TIPC name table code | 2 | * net/tipc/name_table.h: Include file for TIPC name table code |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2004-2005, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2004-2005, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -38,7 +38,7 @@ | |||
38 | #define _TIPC_NAME_TABLE_H | 38 | #define _TIPC_NAME_TABLE_H |
39 | 39 | ||
40 | struct tipc_subscription; | 40 | struct tipc_subscription; |
41 | struct tipc_port_list; | 41 | struct tipc_plist; |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * TIPC name types reserved for internal TIPC use (both current and planned) | 44 | * TIPC name types reserved for internal TIPC use (both current and planned) |
@@ -95,26 +95,39 @@ struct name_table { | |||
95 | u32 local_publ_count; | 95 | u32 local_publ_count; |
96 | }; | 96 | }; |
97 | 97 | ||
98 | extern spinlock_t tipc_nametbl_lock; | ||
99 | extern struct name_table *tipc_nametbl; | ||
100 | |||
101 | int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); | 98 | int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); |
102 | 99 | ||
103 | struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); | 100 | u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node); |
104 | u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); | 101 | int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, |
105 | int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, | 102 | u32 limit, struct tipc_plist *dports); |
106 | struct tipc_port_list *dports); | 103 | struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, |
107 | struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, | 104 | u32 upper, u32 scope, u32 port_ref, |
108 | u32 scope, u32 port_ref, u32 key); | 105 | u32 key); |
109 | int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); | 106 | int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, |
110 | struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, | 107 | u32 key); |
111 | u32 scope, u32 node, u32 ref, | 108 | struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, |
109 | u32 lower, u32 upper, u32 scope, | ||
110 | u32 node, u32 ref, u32 key); | ||
111 | struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, | ||
112 | u32 lower, u32 node, u32 ref, | ||
112 | u32 key); | 113 | u32 key); |
113 | struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node, | ||
114 | u32 ref, u32 key); | ||
115 | void tipc_nametbl_subscribe(struct tipc_subscription *s); | 114 | void tipc_nametbl_subscribe(struct tipc_subscription *s); |
116 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s); | 115 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s); |
117 | int tipc_nametbl_init(void); | 116 | int tipc_nametbl_init(struct net *net); |
118 | void tipc_nametbl_stop(void); | 117 | void tipc_nametbl_stop(struct net *net); |
118 | |||
119 | struct tipc_plist { | ||
120 | struct list_head list; | ||
121 | u32 port; | ||
122 | }; | ||
123 | |||
124 | static inline void tipc_plist_init(struct tipc_plist *pl) | ||
125 | { | ||
126 | INIT_LIST_HEAD(&pl->list); | ||
127 | pl->port = 0; | ||
128 | } | ||
129 | |||
130 | void tipc_plist_push(struct tipc_plist *pl, u32 port); | ||
131 | u32 tipc_plist_pop(struct tipc_plist *pl); | ||
119 | 132 | ||
120 | #endif | 133 | #endif |
diff --git a/net/tipc/net.c b/net/tipc/net.c index cf13df3cde8f..a54f3cbe2246 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include "subscr.h" | 40 | #include "subscr.h" |
41 | #include "socket.h" | 41 | #include "socket.h" |
42 | #include "node.h" | 42 | #include "node.h" |
43 | #include "config.h" | ||
44 | 43 | ||
45 | static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { | 44 | static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { |
46 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, | 45 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, |
@@ -108,48 +107,54 @@ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { | |||
108 | * - A local spin_lock protecting the queue of subscriber events. | 107 | * - A local spin_lock protecting the queue of subscriber events. |
109 | */ | 108 | */ |
110 | 109 | ||
111 | int tipc_net_start(u32 addr) | 110 | int tipc_net_start(struct net *net, u32 addr) |
112 | { | 111 | { |
112 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
113 | char addr_string[16]; | 113 | char addr_string[16]; |
114 | int res; | 114 | int res; |
115 | 115 | ||
116 | tipc_own_addr = addr; | 116 | tn->own_addr = addr; |
117 | tipc_named_reinit(); | 117 | tipc_named_reinit(net); |
118 | tipc_sk_reinit(); | 118 | tipc_sk_reinit(net); |
119 | res = tipc_bclink_init(); | 119 | res = tipc_bclink_init(net); |
120 | if (res) | 120 | if (res) |
121 | return res; | 121 | return res; |
122 | 122 | ||
123 | tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, | 123 | tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr, |
124 | TIPC_ZONE_SCOPE, 0, tipc_own_addr); | 124 | TIPC_ZONE_SCOPE, 0, tn->own_addr); |
125 | 125 | ||
126 | pr_info("Started in network mode\n"); | 126 | pr_info("Started in network mode\n"); |
127 | pr_info("Own node address %s, network identity %u\n", | 127 | pr_info("Own node address %s, network identity %u\n", |
128 | tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); | 128 | tipc_addr_string_fill(addr_string, tn->own_addr), |
129 | tn->net_id); | ||
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
131 | 132 | ||
132 | void tipc_net_stop(void) | 133 | void tipc_net_stop(struct net *net) |
133 | { | 134 | { |
134 | if (!tipc_own_addr) | 135 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
136 | |||
137 | if (!tn->own_addr) | ||
135 | return; | 138 | return; |
136 | 139 | ||
137 | tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); | 140 | tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0, |
141 | tn->own_addr); | ||
138 | rtnl_lock(); | 142 | rtnl_lock(); |
139 | tipc_bearer_stop(); | 143 | tipc_bearer_stop(net); |
140 | tipc_bclink_stop(); | 144 | tipc_bclink_stop(net); |
141 | tipc_node_stop(); | 145 | tipc_node_stop(net); |
142 | rtnl_unlock(); | 146 | rtnl_unlock(); |
143 | 147 | ||
144 | pr_info("Left network mode\n"); | 148 | pr_info("Left network mode\n"); |
145 | } | 149 | } |
146 | 150 | ||
147 | static int __tipc_nl_add_net(struct tipc_nl_msg *msg) | 151 | static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg) |
148 | { | 152 | { |
153 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
149 | void *hdr; | 154 | void *hdr; |
150 | struct nlattr *attrs; | 155 | struct nlattr *attrs; |
151 | 156 | ||
152 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, | 157 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
153 | NLM_F_MULTI, TIPC_NL_NET_GET); | 158 | NLM_F_MULTI, TIPC_NL_NET_GET); |
154 | if (!hdr) | 159 | if (!hdr) |
155 | return -EMSGSIZE; | 160 | return -EMSGSIZE; |
@@ -158,7 +163,7 @@ static int __tipc_nl_add_net(struct tipc_nl_msg *msg) | |||
158 | if (!attrs) | 163 | if (!attrs) |
159 | goto msg_full; | 164 | goto msg_full; |
160 | 165 | ||
161 | if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id)) | 166 | if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id)) |
162 | goto attr_msg_full; | 167 | goto attr_msg_full; |
163 | 168 | ||
164 | nla_nest_end(msg->skb, attrs); | 169 | nla_nest_end(msg->skb, attrs); |
@@ -176,6 +181,7 @@ msg_full: | |||
176 | 181 | ||
177 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) | 182 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) |
178 | { | 183 | { |
184 | struct net *net = sock_net(skb->sk); | ||
179 | int err; | 185 | int err; |
180 | int done = cb->args[0]; | 186 | int done = cb->args[0]; |
181 | struct tipc_nl_msg msg; | 187 | struct tipc_nl_msg msg; |
@@ -187,7 +193,7 @@ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
187 | msg.portid = NETLINK_CB(cb->skb).portid; | 193 | msg.portid = NETLINK_CB(cb->skb).portid; |
188 | msg.seq = cb->nlh->nlmsg_seq; | 194 | msg.seq = cb->nlh->nlmsg_seq; |
189 | 195 | ||
190 | err = __tipc_nl_add_net(&msg); | 196 | err = __tipc_nl_add_net(net, &msg); |
191 | if (err) | 197 | if (err) |
192 | goto out; | 198 | goto out; |
193 | 199 | ||
@@ -200,8 +206,10 @@ out: | |||
200 | 206 | ||
201 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | 207 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) |
202 | { | 208 | { |
203 | int err; | 209 | struct net *net = sock_net(skb->sk); |
210 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
204 | struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; | 211 | struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; |
212 | int err; | ||
205 | 213 | ||
206 | if (!info->attrs[TIPC_NLA_NET]) | 214 | if (!info->attrs[TIPC_NLA_NET]) |
207 | return -EINVAL; | 215 | return -EINVAL; |
@@ -216,21 +224,21 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | |||
216 | u32 val; | 224 | u32 val; |
217 | 225 | ||
218 | /* Can't change net id once TIPC has joined a network */ | 226 | /* Can't change net id once TIPC has joined a network */ |
219 | if (tipc_own_addr) | 227 | if (tn->own_addr) |
220 | return -EPERM; | 228 | return -EPERM; |
221 | 229 | ||
222 | val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); | 230 | val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); |
223 | if (val < 1 || val > 9999) | 231 | if (val < 1 || val > 9999) |
224 | return -EINVAL; | 232 | return -EINVAL; |
225 | 233 | ||
226 | tipc_net_id = val; | 234 | tn->net_id = val; |
227 | } | 235 | } |
228 | 236 | ||
229 | if (attrs[TIPC_NLA_NET_ADDR]) { | 237 | if (attrs[TIPC_NLA_NET_ADDR]) { |
230 | u32 addr; | 238 | u32 addr; |
231 | 239 | ||
232 | /* Can't change net addr once TIPC has joined a network */ | 240 | /* Can't change net addr once TIPC has joined a network */ |
233 | if (tipc_own_addr) | 241 | if (tn->own_addr) |
234 | return -EPERM; | 242 | return -EPERM; |
235 | 243 | ||
236 | addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); | 244 | addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); |
@@ -238,7 +246,7 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | |||
238 | return -EINVAL; | 246 | return -EINVAL; |
239 | 247 | ||
240 | rtnl_lock(); | 248 | rtnl_lock(); |
241 | tipc_net_start(addr); | 249 | tipc_net_start(net, addr); |
242 | rtnl_unlock(); | 250 | rtnl_unlock(); |
243 | } | 251 | } |
244 | 252 | ||
diff --git a/net/tipc/net.h b/net/tipc/net.h index a81c1b9eb150..77a7a118911d 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h | |||
@@ -39,9 +39,9 @@ | |||
39 | 39 | ||
40 | #include <net/genetlink.h> | 40 | #include <net/genetlink.h> |
41 | 41 | ||
42 | int tipc_net_start(u32 addr); | 42 | int tipc_net_start(struct net *net, u32 addr); |
43 | 43 | ||
44 | void tipc_net_stop(void); | 44 | void tipc_net_stop(struct net *net); |
45 | 45 | ||
46 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); | 46 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); |
47 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); | 47 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index b891e3905bc4..7f6475efc984 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -35,7 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "config.h" | ||
39 | #include "socket.h" | 38 | #include "socket.h" |
40 | #include "name_table.h" | 39 | #include "name_table.h" |
41 | #include "bearer.h" | 40 | #include "bearer.h" |
@@ -44,36 +43,6 @@ | |||
44 | #include "net.h" | 43 | #include "net.h" |
45 | #include <net/genetlink.h> | 44 | #include <net/genetlink.h> |
46 | 45 | ||
47 | static int handle_cmd(struct sk_buff *skb, struct genl_info *info) | ||
48 | { | ||
49 | struct sk_buff *rep_buf; | ||
50 | struct nlmsghdr *rep_nlh; | ||
51 | struct nlmsghdr *req_nlh = info->nlhdr; | ||
52 | struct tipc_genlmsghdr *req_userhdr = info->userhdr; | ||
53 | int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN); | ||
54 | u16 cmd; | ||
55 | |||
56 | if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN))) | ||
57 | cmd = TIPC_CMD_NOT_NET_ADMIN; | ||
58 | else | ||
59 | cmd = req_userhdr->cmd; | ||
60 | |||
61 | rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd, | ||
62 | nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, | ||
63 | nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), | ||
64 | hdr_space); | ||
65 | |||
66 | if (rep_buf) { | ||
67 | skb_push(rep_buf, hdr_space); | ||
68 | rep_nlh = nlmsg_hdr(rep_buf); | ||
69 | memcpy(rep_nlh, req_nlh, hdr_space); | ||
70 | rep_nlh->nlmsg_len = rep_buf->len; | ||
71 | genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid); | ||
72 | } | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { | 46 | static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { |
78 | [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, }, | 47 | [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, }, |
79 | [TIPC_NLA_BEARER] = { .type = NLA_NESTED, }, | 48 | [TIPC_NLA_BEARER] = { .type = NLA_NESTED, }, |
@@ -86,32 +55,16 @@ static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { | |||
86 | [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, } | 55 | [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, } |
87 | }; | 56 | }; |
88 | 57 | ||
89 | /* Legacy ASCII API */ | ||
90 | static struct genl_family tipc_genl_family = { | ||
91 | .id = GENL_ID_GENERATE, | ||
92 | .name = TIPC_GENL_NAME, | ||
93 | .version = TIPC_GENL_VERSION, | ||
94 | .hdrsize = TIPC_GENL_HDRLEN, | ||
95 | .maxattr = 0, | ||
96 | }; | ||
97 | |||
98 | /* Legacy ASCII API */ | ||
99 | static struct genl_ops tipc_genl_ops[] = { | ||
100 | { | ||
101 | .cmd = TIPC_GENL_CMD, | ||
102 | .doit = handle_cmd, | ||
103 | }, | ||
104 | }; | ||
105 | |||
106 | /* Users of the legacy API (tipc-config) can't handle that we add operations, | 58 | /* Users of the legacy API (tipc-config) can't handle that we add operations, |
107 | * so we have a separate genl handling for the new API. | 59 | * so we have a separate genl handling for the new API. |
108 | */ | 60 | */ |
109 | struct genl_family tipc_genl_v2_family = { | 61 | struct genl_family tipc_genl_family = { |
110 | .id = GENL_ID_GENERATE, | 62 | .id = GENL_ID_GENERATE, |
111 | .name = TIPC_GENL_V2_NAME, | 63 | .name = TIPC_GENL_V2_NAME, |
112 | .version = TIPC_GENL_V2_VERSION, | 64 | .version = TIPC_GENL_V2_VERSION, |
113 | .hdrsize = 0, | 65 | .hdrsize = 0, |
114 | .maxattr = TIPC_NLA_MAX, | 66 | .maxattr = TIPC_NLA_MAX, |
67 | .netnsok = true, | ||
115 | }; | 68 | }; |
116 | 69 | ||
117 | static const struct genl_ops tipc_genl_v2_ops[] = { | 70 | static const struct genl_ops tipc_genl_v2_ops[] = { |
@@ -197,9 +150,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = { | |||
197 | 150 | ||
198 | int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) | 151 | int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) |
199 | { | 152 | { |
200 | u32 maxattr = tipc_genl_v2_family.maxattr; | 153 | u32 maxattr = tipc_genl_family.maxattr; |
201 | 154 | ||
202 | *attr = tipc_genl_v2_family.attrbuf; | 155 | *attr = tipc_genl_family.attrbuf; |
203 | if (!*attr) | 156 | if (!*attr) |
204 | return -EOPNOTSUPP; | 157 | return -EOPNOTSUPP; |
205 | 158 | ||
@@ -210,13 +163,7 @@ int tipc_netlink_start(void) | |||
210 | { | 163 | { |
211 | int res; | 164 | int res; |
212 | 165 | ||
213 | res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops); | 166 | res = genl_register_family_with_ops(&tipc_genl_family, |
214 | if (res) { | ||
215 | pr_err("Failed to register legacy interface\n"); | ||
216 | return res; | ||
217 | } | ||
218 | |||
219 | res = genl_register_family_with_ops(&tipc_genl_v2_family, | ||
220 | tipc_genl_v2_ops); | 167 | tipc_genl_v2_ops); |
221 | if (res) { | 168 | if (res) { |
222 | pr_err("Failed to register netlink interface\n"); | 169 | pr_err("Failed to register netlink interface\n"); |
@@ -228,5 +175,4 @@ int tipc_netlink_start(void) | |||
228 | void tipc_netlink_stop(void) | 175 | void tipc_netlink_stop(void) |
229 | { | 176 | { |
230 | genl_unregister_family(&tipc_genl_family); | 177 | genl_unregister_family(&tipc_genl_family); |
231 | genl_unregister_family(&tipc_genl_v2_family); | ||
232 | } | 178 | } |
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h index 1425c6869de0..08a1db67b927 100644 --- a/net/tipc/netlink.h +++ b/net/tipc/netlink.h | |||
@@ -36,7 +36,7 @@ | |||
36 | #ifndef _TIPC_NETLINK_H | 36 | #ifndef _TIPC_NETLINK_H |
37 | #define _TIPC_NETLINK_H | 37 | #define _TIPC_NETLINK_H |
38 | 38 | ||
39 | extern struct genl_family tipc_genl_v2_family; | 39 | extern struct genl_family tipc_genl_family; |
40 | int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf); | 40 | int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf); |
41 | 41 | ||
42 | struct tipc_nl_msg { | 42 | struct tipc_nl_msg { |
@@ -45,4 +45,9 @@ struct tipc_nl_msg { | |||
45 | u32 seq; | 45 | u32 seq; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | int tipc_netlink_start(void); | ||
49 | int tipc_netlink_compat_start(void); | ||
50 | void tipc_netlink_stop(void); | ||
51 | void tipc_netlink_compat_stop(void); | ||
52 | |||
48 | #endif | 53 | #endif |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c new file mode 100644 index 000000000000..ce9121e8e990 --- /dev/null +++ b/net/tipc/netlink_compat.c | |||
@@ -0,0 +1,1084 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, Ericsson AB | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * Redistribution and use in source and binary forms, with or without | ||
6 | * modification, are permitted provided that the following conditions are met: | ||
7 | * | ||
8 | * 1. Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * 3. Neither the names of the copyright holders nor the names of its | ||
14 | * contributors may be used to endorse or promote products derived from | ||
15 | * this software without specific prior written permission. | ||
16 | * | ||
17 | * Alternatively, this software may be distributed under the terms of the | ||
18 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
19 | * Software Foundation. | ||
20 | * | ||
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
25 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
31 | * POSSIBILITY OF SUCH DAMAGE. | ||
32 | */ | ||
33 | |||
34 | #include "core.h" | ||
35 | #include "bearer.h" | ||
36 | #include "link.h" | ||
37 | #include "name_table.h" | ||
38 | #include "socket.h" | ||
39 | #include "node.h" | ||
40 | #include "net.h" | ||
41 | #include <net/genetlink.h> | ||
42 | #include <linux/tipc_config.h> | ||
43 | |||
44 | /* The legacy API had an artificial message length limit called | ||
45 | * ULTRA_STRING_MAX_LEN. | ||
46 | */ | ||
47 | #define ULTRA_STRING_MAX_LEN 32768 | ||
48 | |||
49 | #define TIPC_SKB_MAX TLV_SPACE(ULTRA_STRING_MAX_LEN) | ||
50 | |||
51 | #define REPLY_TRUNCATED "<truncated>\n" | ||
52 | |||
53 | struct tipc_nl_compat_msg { | ||
54 | u16 cmd; | ||
55 | int rep_type; | ||
56 | int rep_size; | ||
57 | int req_type; | ||
58 | struct sk_buff *rep; | ||
59 | struct tlv_desc *req; | ||
60 | struct sock *dst_sk; | ||
61 | }; | ||
62 | |||
63 | struct tipc_nl_compat_cmd_dump { | ||
64 | int (*header)(struct tipc_nl_compat_msg *); | ||
65 | int (*dumpit)(struct sk_buff *, struct netlink_callback *); | ||
66 | int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs); | ||
67 | }; | ||
68 | |||
69 | struct tipc_nl_compat_cmd_doit { | ||
70 | int (*doit)(struct sk_buff *skb, struct genl_info *info); | ||
71 | int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg); | ||
72 | }; | ||
73 | |||
74 | static int tipc_skb_tailroom(struct sk_buff *skb) | ||
75 | { | ||
76 | int tailroom; | ||
77 | int limit; | ||
78 | |||
79 | tailroom = skb_tailroom(skb); | ||
80 | limit = TIPC_SKB_MAX - skb->len; | ||
81 | |||
82 | if (tailroom < limit) | ||
83 | return tailroom; | ||
84 | |||
85 | return limit; | ||
86 | } | ||
87 | |||
88 | static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) | ||
89 | { | ||
90 | struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); | ||
91 | |||
92 | if (tipc_skb_tailroom(skb) < TLV_SPACE(len)) | ||
93 | return -EMSGSIZE; | ||
94 | |||
95 | skb_put(skb, TLV_SPACE(len)); | ||
96 | tlv->tlv_type = htons(type); | ||
97 | tlv->tlv_len = htons(TLV_LENGTH(len)); | ||
98 | if (len && data) | ||
99 | memcpy(TLV_DATA(tlv), data, len); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static void tipc_tlv_init(struct sk_buff *skb, u16 type) | ||
105 | { | ||
106 | struct tlv_desc *tlv = (struct tlv_desc *)skb->data; | ||
107 | |||
108 | TLV_SET_LEN(tlv, 0); | ||
109 | TLV_SET_TYPE(tlv, type); | ||
110 | skb_put(skb, sizeof(struct tlv_desc)); | ||
111 | } | ||
112 | |||
113 | static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...) | ||
114 | { | ||
115 | int n; | ||
116 | u16 len; | ||
117 | u32 rem; | ||
118 | char *buf; | ||
119 | struct tlv_desc *tlv; | ||
120 | va_list args; | ||
121 | |||
122 | rem = tipc_skb_tailroom(skb); | ||
123 | |||
124 | tlv = (struct tlv_desc *)skb->data; | ||
125 | len = TLV_GET_LEN(tlv); | ||
126 | buf = TLV_DATA(tlv) + len; | ||
127 | |||
128 | va_start(args, fmt); | ||
129 | n = vscnprintf(buf, rem, fmt, args); | ||
130 | va_end(args); | ||
131 | |||
132 | TLV_SET_LEN(tlv, n + len); | ||
133 | skb_put(skb, n); | ||
134 | |||
135 | return n; | ||
136 | } | ||
137 | |||
138 | static struct sk_buff *tipc_tlv_alloc(int size) | ||
139 | { | ||
140 | int hdr_len; | ||
141 | struct sk_buff *buf; | ||
142 | |||
143 | size = TLV_SPACE(size); | ||
144 | hdr_len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN); | ||
145 | |||
146 | buf = alloc_skb(hdr_len + size, GFP_KERNEL); | ||
147 | if (!buf) | ||
148 | return NULL; | ||
149 | |||
150 | skb_reserve(buf, hdr_len); | ||
151 | |||
152 | return buf; | ||
153 | } | ||
154 | |||
155 | static struct sk_buff *tipc_get_err_tlv(char *str) | ||
156 | { | ||
157 | int str_len = strlen(str) + 1; | ||
158 | struct sk_buff *buf; | ||
159 | |||
160 | buf = tipc_tlv_alloc(TLV_SPACE(str_len)); | ||
161 | if (buf) | ||
162 | tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len); | ||
163 | |||
164 | return buf; | ||
165 | } | ||
166 | |||
167 | static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | ||
168 | struct tipc_nl_compat_msg *msg, | ||
169 | struct sk_buff *arg) | ||
170 | { | ||
171 | int len = 0; | ||
172 | int err; | ||
173 | struct sk_buff *buf; | ||
174 | struct nlmsghdr *nlmsg; | ||
175 | struct netlink_callback cb; | ||
176 | |||
177 | memset(&cb, 0, sizeof(cb)); | ||
178 | cb.nlh = (struct nlmsghdr *)arg->data; | ||
179 | cb.skb = arg; | ||
180 | |||
181 | buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
182 | if (!buf) | ||
183 | return -ENOMEM; | ||
184 | |||
185 | buf->sk = msg->dst_sk; | ||
186 | |||
187 | do { | ||
188 | int rem; | ||
189 | |||
190 | len = (*cmd->dumpit)(buf, &cb); | ||
191 | |||
192 | nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) { | ||
193 | struct nlattr **attrs; | ||
194 | |||
195 | err = tipc_nlmsg_parse(nlmsg, &attrs); | ||
196 | if (err) | ||
197 | goto err_out; | ||
198 | |||
199 | err = (*cmd->format)(msg, attrs); | ||
200 | if (err) | ||
201 | goto err_out; | ||
202 | |||
203 | if (tipc_skb_tailroom(msg->rep) <= 1) { | ||
204 | err = -EMSGSIZE; | ||
205 | goto err_out; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | skb_reset_tail_pointer(buf); | ||
210 | buf->len = 0; | ||
211 | |||
212 | } while (len); | ||
213 | |||
214 | err = 0; | ||
215 | |||
216 | err_out: | ||
217 | kfree_skb(buf); | ||
218 | |||
219 | if (err == -EMSGSIZE) { | ||
220 | /* The legacy API only considered messages filling | ||
221 | * "ULTRA_STRING_MAX_LEN" to be truncated. | ||
222 | */ | ||
223 | if ((TIPC_SKB_MAX - msg->rep->len) <= 1) { | ||
224 | char *tail = skb_tail_pointer(msg->rep); | ||
225 | |||
226 | if (*tail != '\0') | ||
227 | sprintf(tail - sizeof(REPLY_TRUNCATED) - 1, | ||
228 | REPLY_TRUNCATED); | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | return err; | ||
235 | } | ||
236 | |||
237 | static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | ||
238 | struct tipc_nl_compat_msg *msg) | ||
239 | { | ||
240 | int err; | ||
241 | struct sk_buff *arg; | ||
242 | |||
243 | if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) | ||
244 | return -EINVAL; | ||
245 | |||
246 | msg->rep = tipc_tlv_alloc(msg->rep_size); | ||
247 | if (!msg->rep) | ||
248 | return -ENOMEM; | ||
249 | |||
250 | if (msg->rep_type) | ||
251 | tipc_tlv_init(msg->rep, msg->rep_type); | ||
252 | |||
253 | if (cmd->header) | ||
254 | (*cmd->header)(msg); | ||
255 | |||
256 | arg = nlmsg_new(0, GFP_KERNEL); | ||
257 | if (!arg) { | ||
258 | kfree_skb(msg->rep); | ||
259 | return -ENOMEM; | ||
260 | } | ||
261 | |||
262 | err = __tipc_nl_compat_dumpit(cmd, msg, arg); | ||
263 | if (err) | ||
264 | kfree_skb(msg->rep); | ||
265 | |||
266 | kfree_skb(arg); | ||
267 | |||
268 | return err; | ||
269 | } | ||
270 | |||
271 | static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, | ||
272 | struct tipc_nl_compat_msg *msg) | ||
273 | { | ||
274 | int err; | ||
275 | struct sk_buff *doit_buf; | ||
276 | struct sk_buff *trans_buf; | ||
277 | struct nlattr **attrbuf; | ||
278 | struct genl_info info; | ||
279 | |||
280 | trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | ||
281 | if (!trans_buf) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | err = (*cmd->transcode)(trans_buf, msg); | ||
285 | if (err) | ||
286 | goto trans_out; | ||
287 | |||
288 | attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * | ||
289 | sizeof(struct nlattr *), GFP_KERNEL); | ||
290 | if (!attrbuf) { | ||
291 | err = -ENOMEM; | ||
292 | goto trans_out; | ||
293 | } | ||
294 | |||
295 | err = nla_parse(attrbuf, tipc_genl_family.maxattr, | ||
296 | (const struct nlattr *)trans_buf->data, | ||
297 | trans_buf->len, NULL); | ||
298 | if (err) | ||
299 | goto parse_out; | ||
300 | |||
301 | doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | ||
302 | if (!doit_buf) { | ||
303 | err = -ENOMEM; | ||
304 | goto parse_out; | ||
305 | } | ||
306 | |||
307 | doit_buf->sk = msg->dst_sk; | ||
308 | |||
309 | memset(&info, 0, sizeof(info)); | ||
310 | info.attrs = attrbuf; | ||
311 | |||
312 | err = (*cmd->doit)(doit_buf, &info); | ||
313 | |||
314 | kfree_skb(doit_buf); | ||
315 | parse_out: | ||
316 | kfree(attrbuf); | ||
317 | trans_out: | ||
318 | kfree_skb(trans_buf); | ||
319 | |||
320 | return err; | ||
321 | } | ||
322 | |||
323 | static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, | ||
324 | struct tipc_nl_compat_msg *msg) | ||
325 | { | ||
326 | int err; | ||
327 | |||
328 | if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type)) | ||
329 | return -EINVAL; | ||
330 | |||
331 | err = __tipc_nl_compat_doit(cmd, msg); | ||
332 | if (err) | ||
333 | return err; | ||
334 | |||
335 | /* The legacy API considered an empty message a success message */ | ||
336 | msg->rep = tipc_tlv_alloc(0); | ||
337 | if (!msg->rep) | ||
338 | return -ENOMEM; | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, | ||
344 | struct nlattr **attrs) | ||
345 | { | ||
346 | struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; | ||
347 | |||
348 | nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], | ||
349 | NULL); | ||
350 | |||
351 | return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, | ||
352 | nla_data(bearer[TIPC_NLA_BEARER_NAME]), | ||
353 | nla_len(bearer[TIPC_NLA_BEARER_NAME])); | ||
354 | } | ||
355 | |||
356 | static int tipc_nl_compat_bearer_enable(struct sk_buff *skb, | ||
357 | struct tipc_nl_compat_msg *msg) | ||
358 | { | ||
359 | struct nlattr *prop; | ||
360 | struct nlattr *bearer; | ||
361 | struct tipc_bearer_config *b; | ||
362 | |||
363 | b = (struct tipc_bearer_config *)TLV_DATA(msg->req); | ||
364 | |||
365 | bearer = nla_nest_start(skb, TIPC_NLA_BEARER); | ||
366 | if (!bearer) | ||
367 | return -EMSGSIZE; | ||
368 | |||
369 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) | ||
370 | return -EMSGSIZE; | ||
371 | |||
372 | if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain))) | ||
373 | return -EMSGSIZE; | ||
374 | |||
375 | if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) { | ||
376 | prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP); | ||
377 | if (!prop) | ||
378 | return -EMSGSIZE; | ||
379 | if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority))) | ||
380 | return -EMSGSIZE; | ||
381 | nla_nest_end(skb, prop); | ||
382 | } | ||
383 | nla_nest_end(skb, bearer); | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static int tipc_nl_compat_bearer_disable(struct sk_buff *skb, | ||
389 | struct tipc_nl_compat_msg *msg) | ||
390 | { | ||
391 | char *name; | ||
392 | struct nlattr *bearer; | ||
393 | |||
394 | name = (char *)TLV_DATA(msg->req); | ||
395 | |||
396 | bearer = nla_nest_start(skb, TIPC_NLA_BEARER); | ||
397 | if (!bearer) | ||
398 | return -EMSGSIZE; | ||
399 | |||
400 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) | ||
401 | return -EMSGSIZE; | ||
402 | |||
403 | nla_nest_end(skb, bearer); | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static inline u32 perc(u32 count, u32 total) | ||
409 | { | ||
410 | return (count * 100 + (total / 2)) / total; | ||
411 | } | ||
412 | |||
413 | static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg, | ||
414 | struct nlattr *prop[], struct nlattr *stats[]) | ||
415 | { | ||
416 | tipc_tlv_sprintf(msg->rep, " Window:%u packets\n", | ||
417 | nla_get_u32(prop[TIPC_NLA_PROP_WIN])); | ||
418 | |||
419 | tipc_tlv_sprintf(msg->rep, | ||
420 | " RX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
421 | nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]), | ||
422 | nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]), | ||
423 | nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]), | ||
424 | nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]), | ||
425 | nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED])); | ||
426 | |||
427 | tipc_tlv_sprintf(msg->rep, | ||
428 | " TX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
429 | nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]), | ||
430 | nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]), | ||
431 | nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]), | ||
432 | nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]), | ||
433 | nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED])); | ||
434 | |||
435 | tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n", | ||
436 | nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]), | ||
437 | nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]), | ||
438 | nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES])); | ||
439 | |||
440 | tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n", | ||
441 | nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]), | ||
442 | nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]), | ||
443 | nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED])); | ||
444 | |||
445 | tipc_tlv_sprintf(msg->rep, | ||
446 | " Congestion link:%u Send queue max:%u avg:%u", | ||
447 | nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]), | ||
448 | nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]), | ||
449 | nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE])); | ||
450 | } | ||
451 | |||
452 | static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | ||
453 | struct nlattr **attrs) | ||
454 | { | ||
455 | char *name; | ||
456 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; | ||
457 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; | ||
458 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; | ||
459 | |||
460 | nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); | ||
461 | |||
462 | nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], | ||
463 | NULL); | ||
464 | |||
465 | nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], | ||
466 | NULL); | ||
467 | |||
468 | name = (char *)TLV_DATA(msg->req); | ||
469 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) | ||
470 | return 0; | ||
471 | |||
472 | tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n", | ||
473 | nla_data(link[TIPC_NLA_LINK_NAME])); | ||
474 | |||
475 | if (link[TIPC_NLA_LINK_BROADCAST]) { | ||
476 | __fill_bc_link_stat(msg, prop, stats); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | if (link[TIPC_NLA_LINK_ACTIVE]) | ||
481 | tipc_tlv_sprintf(msg->rep, " ACTIVE"); | ||
482 | else if (link[TIPC_NLA_LINK_UP]) | ||
483 | tipc_tlv_sprintf(msg->rep, " STANDBY"); | ||
484 | else | ||
485 | tipc_tlv_sprintf(msg->rep, " DEFUNCT"); | ||
486 | |||
487 | tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u", | ||
488 | nla_get_u32(link[TIPC_NLA_LINK_MTU]), | ||
489 | nla_get_u32(prop[TIPC_NLA_PROP_PRIO])); | ||
490 | |||
491 | tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n", | ||
492 | nla_get_u32(prop[TIPC_NLA_PROP_TOL]), | ||
493 | nla_get_u32(prop[TIPC_NLA_PROP_WIN])); | ||
494 | |||
495 | tipc_tlv_sprintf(msg->rep, | ||
496 | " RX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
497 | nla_get_u32(link[TIPC_NLA_LINK_RX]) - | ||
498 | nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]), | ||
499 | nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]), | ||
500 | nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]), | ||
501 | nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]), | ||
502 | nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED])); | ||
503 | |||
504 | tipc_tlv_sprintf(msg->rep, | ||
505 | " TX packets:%u fragments:%u/%u bundles:%u/%u\n", | ||
506 | nla_get_u32(link[TIPC_NLA_LINK_TX]) - | ||
507 | nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]), | ||
508 | nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]), | ||
509 | nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]), | ||
510 | nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]), | ||
511 | nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED])); | ||
512 | |||
513 | tipc_tlv_sprintf(msg->rep, | ||
514 | " TX profile sample:%u packets average:%u octets\n", | ||
515 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]), | ||
516 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) / | ||
517 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])); | ||
518 | |||
519 | tipc_tlv_sprintf(msg->rep, | ||
520 | " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ", | ||
521 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]), | ||
522 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), | ||
523 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]), | ||
524 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), | ||
525 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]), | ||
526 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), | ||
527 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]), | ||
528 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]))); | ||
529 | |||
530 | tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n", | ||
531 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]), | ||
532 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), | ||
533 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]), | ||
534 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), | ||
535 | perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]), | ||
536 | nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]))); | ||
537 | |||
538 | tipc_tlv_sprintf(msg->rep, | ||
539 | " RX states:%u probes:%u naks:%u defs:%u dups:%u\n", | ||
540 | nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]), | ||
541 | nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]), | ||
542 | nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]), | ||
543 | nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]), | ||
544 | nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES])); | ||
545 | |||
546 | tipc_tlv_sprintf(msg->rep, | ||
547 | " TX states:%u probes:%u naks:%u acks:%u dups:%u\n", | ||
548 | nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]), | ||
549 | nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]), | ||
550 | nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]), | ||
551 | nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]), | ||
552 | nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED])); | ||
553 | |||
554 | tipc_tlv_sprintf(msg->rep, | ||
555 | " Congestion link:%u Send queue max:%u avg:%u", | ||
556 | nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]), | ||
557 | nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]), | ||
558 | nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE])); | ||
559 | |||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, | ||
564 | struct nlattr **attrs) | ||
565 | { | ||
566 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; | ||
567 | struct tipc_link_info link_info; | ||
568 | |||
569 | nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); | ||
570 | |||
571 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); | ||
572 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); | ||
573 | strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME])); | ||
574 | |||
575 | return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, | ||
576 | &link_info, sizeof(link_info)); | ||
577 | } | ||
578 | |||
579 | static int tipc_nl_compat_link_set(struct sk_buff *skb, | ||
580 | struct tipc_nl_compat_msg *msg) | ||
581 | { | ||
582 | struct nlattr *link; | ||
583 | struct nlattr *prop; | ||
584 | struct tipc_link_config *lc; | ||
585 | |||
586 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | ||
587 | |||
588 | link = nla_nest_start(skb, TIPC_NLA_LINK); | ||
589 | if (!link) | ||
590 | return -EMSGSIZE; | ||
591 | |||
592 | if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name)) | ||
593 | return -EMSGSIZE; | ||
594 | |||
595 | prop = nla_nest_start(skb, TIPC_NLA_LINK_PROP); | ||
596 | if (!prop) | ||
597 | return -EMSGSIZE; | ||
598 | |||
599 | if (msg->cmd == TIPC_CMD_SET_LINK_PRI) { | ||
600 | if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value))) | ||
601 | return -EMSGSIZE; | ||
602 | } else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) { | ||
603 | if (nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value))) | ||
604 | return -EMSGSIZE; | ||
605 | } else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) { | ||
606 | if (nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value))) | ||
607 | return -EMSGSIZE; | ||
608 | } | ||
609 | |||
610 | nla_nest_end(skb, prop); | ||
611 | nla_nest_end(skb, link); | ||
612 | |||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int tipc_nl_compat_link_reset_stats(struct sk_buff *skb, | ||
617 | struct tipc_nl_compat_msg *msg) | ||
618 | { | ||
619 | char *name; | ||
620 | struct nlattr *link; | ||
621 | |||
622 | name = (char *)TLV_DATA(msg->req); | ||
623 | |||
624 | link = nla_nest_start(skb, TIPC_NLA_LINK); | ||
625 | if (!link) | ||
626 | return -EMSGSIZE; | ||
627 | |||
628 | if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) | ||
629 | return -EMSGSIZE; | ||
630 | |||
631 | nla_nest_end(skb, link); | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) | ||
637 | { | ||
638 | int i; | ||
639 | u32 depth; | ||
640 | struct tipc_name_table_query *ntq; | ||
641 | static const char * const header[] = { | ||
642 | "Type ", | ||
643 | "Lower Upper ", | ||
644 | "Port Identity ", | ||
645 | "Publication Scope" | ||
646 | }; | ||
647 | |||
648 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); | ||
649 | |||
650 | depth = ntohl(ntq->depth); | ||
651 | |||
652 | if (depth > 4) | ||
653 | depth = 4; | ||
654 | for (i = 0; i < depth; i++) | ||
655 | tipc_tlv_sprintf(msg->rep, header[i]); | ||
656 | tipc_tlv_sprintf(msg->rep, "\n"); | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, | ||
662 | struct nlattr **attrs) | ||
663 | { | ||
664 | char port_str[27]; | ||
665 | struct tipc_name_table_query *ntq; | ||
666 | struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1]; | ||
667 | struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; | ||
668 | u32 node, depth, type, lowbound, upbound; | ||
669 | static const char * const scope_str[] = {"", " zone", " cluster", | ||
670 | " node"}; | ||
671 | |||
672 | nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, | ||
673 | attrs[TIPC_NLA_NAME_TABLE], NULL); | ||
674 | |||
675 | nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], | ||
676 | NULL); | ||
677 | |||
678 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); | ||
679 | |||
680 | depth = ntohl(ntq->depth); | ||
681 | type = ntohl(ntq->type); | ||
682 | lowbound = ntohl(ntq->lowbound); | ||
683 | upbound = ntohl(ntq->upbound); | ||
684 | |||
685 | if (!(depth & TIPC_NTQ_ALLTYPES) && | ||
686 | (type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]))) | ||
687 | return 0; | ||
688 | if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]))) | ||
689 | return 0; | ||
690 | if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]))) | ||
691 | return 0; | ||
692 | |||
693 | tipc_tlv_sprintf(msg->rep, "%-10u ", | ||
694 | nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])); | ||
695 | |||
696 | if (depth == 1) | ||
697 | goto out; | ||
698 | |||
699 | tipc_tlv_sprintf(msg->rep, "%-10u %-10u ", | ||
700 | nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]), | ||
701 | nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])); | ||
702 | |||
703 | if (depth == 2) | ||
704 | goto out; | ||
705 | |||
706 | node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]); | ||
707 | sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node), | ||
708 | tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF])); | ||
709 | tipc_tlv_sprintf(msg->rep, "%-26s ", port_str); | ||
710 | |||
711 | if (depth == 3) | ||
712 | goto out; | ||
713 | |||
714 | tipc_tlv_sprintf(msg->rep, "%-10u %s", | ||
715 | nla_get_u32(publ[TIPC_NLA_PUBL_REF]), | ||
716 | scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]); | ||
717 | out: | ||
718 | tipc_tlv_sprintf(msg->rep, "\n"); | ||
719 | |||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, | ||
724 | struct nlattr **attrs) | ||
725 | { | ||
726 | u32 type, lower, upper; | ||
727 | struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; | ||
728 | |||
729 | nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); | ||
730 | |||
731 | type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); | ||
732 | lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); | ||
733 | upper = nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]); | ||
734 | |||
735 | if (lower == upper) | ||
736 | tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower); | ||
737 | else | ||
738 | tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper); | ||
739 | |||
740 | return 0; | ||
741 | } | ||
742 | |||
743 | static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock) | ||
744 | { | ||
745 | int err; | ||
746 | void *hdr; | ||
747 | struct nlattr *nest; | ||
748 | struct sk_buff *args; | ||
749 | struct tipc_nl_compat_cmd_dump dump; | ||
750 | |||
751 | args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
752 | if (!args) | ||
753 | return -ENOMEM; | ||
754 | |||
755 | hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, | ||
756 | TIPC_NL_PUBL_GET); | ||
757 | |||
758 | nest = nla_nest_start(args, TIPC_NLA_SOCK); | ||
759 | if (!nest) { | ||
760 | kfree_skb(args); | ||
761 | return -EMSGSIZE; | ||
762 | } | ||
763 | |||
764 | if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) { | ||
765 | kfree_skb(args); | ||
766 | return -EMSGSIZE; | ||
767 | } | ||
768 | |||
769 | nla_nest_end(args, nest); | ||
770 | genlmsg_end(args, hdr); | ||
771 | |||
772 | dump.dumpit = tipc_nl_publ_dump; | ||
773 | dump.format = __tipc_nl_compat_publ_dump; | ||
774 | |||
775 | err = __tipc_nl_compat_dumpit(&dump, msg, args); | ||
776 | |||
777 | kfree_skb(args); | ||
778 | |||
779 | return err; | ||
780 | } | ||
781 | |||
782 | static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, | ||
783 | struct nlattr **attrs) | ||
784 | { | ||
785 | int err; | ||
786 | u32 sock_ref; | ||
787 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; | ||
788 | |||
789 | nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); | ||
790 | |||
791 | sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); | ||
792 | tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); | ||
793 | |||
794 | if (sock[TIPC_NLA_SOCK_CON]) { | ||
795 | u32 node; | ||
796 | struct nlattr *con[TIPC_NLA_CON_MAX + 1]; | ||
797 | |||
798 | nla_parse_nested(con, TIPC_NLA_CON_MAX, sock[TIPC_NLA_SOCK_CON], | ||
799 | NULL); | ||
800 | |||
801 | node = nla_get_u32(con[TIPC_NLA_CON_NODE]); | ||
802 | tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>", | ||
803 | tipc_zone(node), | ||
804 | tipc_cluster(node), | ||
805 | tipc_node(node), | ||
806 | nla_get_u32(con[TIPC_NLA_CON_SOCK])); | ||
807 | |||
808 | if (con[TIPC_NLA_CON_FLAG]) | ||
809 | tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n", | ||
810 | nla_get_u32(con[TIPC_NLA_CON_TYPE]), | ||
811 | nla_get_u32(con[TIPC_NLA_CON_INST])); | ||
812 | else | ||
813 | tipc_tlv_sprintf(msg->rep, "\n"); | ||
814 | } else if (sock[TIPC_NLA_SOCK_HAS_PUBL]) { | ||
815 | tipc_tlv_sprintf(msg->rep, " bound to"); | ||
816 | |||
817 | err = tipc_nl_compat_publ_dump(msg, sock_ref); | ||
818 | if (err) | ||
819 | return err; | ||
820 | } | ||
821 | tipc_tlv_sprintf(msg->rep, "\n"); | ||
822 | |||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, | ||
827 | struct nlattr **attrs) | ||
828 | { | ||
829 | struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; | ||
830 | |||
831 | nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], | ||
832 | NULL); | ||
833 | |||
834 | return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, | ||
835 | nla_data(media[TIPC_NLA_MEDIA_NAME]), | ||
836 | nla_len(media[TIPC_NLA_MEDIA_NAME])); | ||
837 | } | ||
838 | |||
839 | static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, | ||
840 | struct nlattr **attrs) | ||
841 | { | ||
842 | struct tipc_node_info node_info; | ||
843 | struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; | ||
844 | |||
845 | nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); | ||
846 | |||
847 | node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); | ||
848 | node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); | ||
849 | |||
850 | return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info, | ||
851 | sizeof(node_info)); | ||
852 | } | ||
853 | |||
854 | static int tipc_nl_compat_net_set(struct sk_buff *skb, | ||
855 | struct tipc_nl_compat_msg *msg) | ||
856 | { | ||
857 | u32 val; | ||
858 | struct nlattr *net; | ||
859 | |||
860 | val = ntohl(*(__be32 *)TLV_DATA(msg->req)); | ||
861 | |||
862 | net = nla_nest_start(skb, TIPC_NLA_NET); | ||
863 | if (!net) | ||
864 | return -EMSGSIZE; | ||
865 | |||
866 | if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) { | ||
867 | if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val)) | ||
868 | return -EMSGSIZE; | ||
869 | } else if (msg->cmd == TIPC_CMD_SET_NETID) { | ||
870 | if (nla_put_u32(skb, TIPC_NLA_NET_ID, val)) | ||
871 | return -EMSGSIZE; | ||
872 | } | ||
873 | nla_nest_end(skb, net); | ||
874 | |||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, | ||
879 | struct nlattr **attrs) | ||
880 | { | ||
881 | __be32 id; | ||
882 | struct nlattr *net[TIPC_NLA_NET_MAX + 1]; | ||
883 | |||
884 | nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL); | ||
885 | id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); | ||
886 | |||
887 | return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); | ||
888 | } | ||
889 | |||
890 | static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg) | ||
891 | { | ||
892 | msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN); | ||
893 | if (!msg->rep) | ||
894 | return -ENOMEM; | ||
895 | |||
896 | tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING); | ||
897 | tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n"); | ||
898 | |||
899 | return 0; | ||
900 | } | ||
901 | |||
902 | static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) | ||
903 | { | ||
904 | struct tipc_nl_compat_cmd_dump dump; | ||
905 | struct tipc_nl_compat_cmd_doit doit; | ||
906 | |||
907 | memset(&dump, 0, sizeof(dump)); | ||
908 | memset(&doit, 0, sizeof(doit)); | ||
909 | |||
910 | switch (msg->cmd) { | ||
911 | case TIPC_CMD_NOOP: | ||
912 | msg->rep = tipc_tlv_alloc(0); | ||
913 | if (!msg->rep) | ||
914 | return -ENOMEM; | ||
915 | return 0; | ||
916 | case TIPC_CMD_GET_BEARER_NAMES: | ||
917 | msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME); | ||
918 | dump.dumpit = tipc_nl_bearer_dump; | ||
919 | dump.format = tipc_nl_compat_bearer_dump; | ||
920 | return tipc_nl_compat_dumpit(&dump, msg); | ||
921 | case TIPC_CMD_ENABLE_BEARER: | ||
922 | msg->req_type = TIPC_TLV_BEARER_CONFIG; | ||
923 | doit.doit = tipc_nl_bearer_enable; | ||
924 | doit.transcode = tipc_nl_compat_bearer_enable; | ||
925 | return tipc_nl_compat_doit(&doit, msg); | ||
926 | case TIPC_CMD_DISABLE_BEARER: | ||
927 | msg->req_type = TIPC_TLV_BEARER_NAME; | ||
928 | doit.doit = tipc_nl_bearer_disable; | ||
929 | doit.transcode = tipc_nl_compat_bearer_disable; | ||
930 | return tipc_nl_compat_doit(&doit, msg); | ||
931 | case TIPC_CMD_SHOW_LINK_STATS: | ||
932 | msg->req_type = TIPC_TLV_LINK_NAME; | ||
933 | msg->rep_size = ULTRA_STRING_MAX_LEN; | ||
934 | msg->rep_type = TIPC_TLV_ULTRA_STRING; | ||
935 | dump.dumpit = tipc_nl_link_dump; | ||
936 | dump.format = tipc_nl_compat_link_stat_dump; | ||
937 | return tipc_nl_compat_dumpit(&dump, msg); | ||
938 | case TIPC_CMD_GET_LINKS: | ||
939 | msg->req_type = TIPC_TLV_NET_ADDR; | ||
940 | msg->rep_size = ULTRA_STRING_MAX_LEN; | ||
941 | dump.dumpit = tipc_nl_link_dump; | ||
942 | dump.format = tipc_nl_compat_link_dump; | ||
943 | return tipc_nl_compat_dumpit(&dump, msg); | ||
944 | case TIPC_CMD_SET_LINK_TOL: | ||
945 | case TIPC_CMD_SET_LINK_PRI: | ||
946 | case TIPC_CMD_SET_LINK_WINDOW: | ||
947 | msg->req_type = TIPC_TLV_LINK_CONFIG; | ||
948 | doit.doit = tipc_nl_link_set; | ||
949 | doit.transcode = tipc_nl_compat_link_set; | ||
950 | return tipc_nl_compat_doit(&doit, msg); | ||
951 | case TIPC_CMD_RESET_LINK_STATS: | ||
952 | msg->req_type = TIPC_TLV_LINK_NAME; | ||
953 | doit.doit = tipc_nl_link_reset_stats; | ||
954 | doit.transcode = tipc_nl_compat_link_reset_stats; | ||
955 | return tipc_nl_compat_doit(&doit, msg); | ||
956 | case TIPC_CMD_SHOW_NAME_TABLE: | ||
957 | msg->req_type = TIPC_TLV_NAME_TBL_QUERY; | ||
958 | msg->rep_size = ULTRA_STRING_MAX_LEN; | ||
959 | msg->rep_type = TIPC_TLV_ULTRA_STRING; | ||
960 | dump.header = tipc_nl_compat_name_table_dump_header; | ||
961 | dump.dumpit = tipc_nl_name_table_dump; | ||
962 | dump.format = tipc_nl_compat_name_table_dump; | ||
963 | return tipc_nl_compat_dumpit(&dump, msg); | ||
964 | case TIPC_CMD_SHOW_PORTS: | ||
965 | msg->rep_size = ULTRA_STRING_MAX_LEN; | ||
966 | msg->rep_type = TIPC_TLV_ULTRA_STRING; | ||
967 | dump.dumpit = tipc_nl_sk_dump; | ||
968 | dump.format = tipc_nl_compat_sk_dump; | ||
969 | return tipc_nl_compat_dumpit(&dump, msg); | ||
970 | case TIPC_CMD_GET_MEDIA_NAMES: | ||
971 | msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME); | ||
972 | dump.dumpit = tipc_nl_media_dump; | ||
973 | dump.format = tipc_nl_compat_media_dump; | ||
974 | return tipc_nl_compat_dumpit(&dump, msg); | ||
975 | case TIPC_CMD_GET_NODES: | ||
976 | msg->rep_size = ULTRA_STRING_MAX_LEN; | ||
977 | dump.dumpit = tipc_nl_node_dump; | ||
978 | dump.format = tipc_nl_compat_node_dump; | ||
979 | return tipc_nl_compat_dumpit(&dump, msg); | ||
980 | case TIPC_CMD_SET_NODE_ADDR: | ||
981 | msg->req_type = TIPC_TLV_NET_ADDR; | ||
982 | doit.doit = tipc_nl_net_set; | ||
983 | doit.transcode = tipc_nl_compat_net_set; | ||
984 | return tipc_nl_compat_doit(&doit, msg); | ||
985 | case TIPC_CMD_SET_NETID: | ||
986 | msg->req_type = TIPC_TLV_UNSIGNED; | ||
987 | doit.doit = tipc_nl_net_set; | ||
988 | doit.transcode = tipc_nl_compat_net_set; | ||
989 | return tipc_nl_compat_doit(&doit, msg); | ||
990 | case TIPC_CMD_GET_NETID: | ||
991 | msg->rep_size = sizeof(u32); | ||
992 | dump.dumpit = tipc_nl_net_dump; | ||
993 | dump.format = tipc_nl_compat_net_dump; | ||
994 | return tipc_nl_compat_dumpit(&dump, msg); | ||
995 | case TIPC_CMD_SHOW_STATS: | ||
996 | return tipc_cmd_show_stats_compat(msg); | ||
997 | } | ||
998 | |||
999 | return -EOPNOTSUPP; | ||
1000 | } | ||
1001 | |||
1002 | static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) | ||
1003 | { | ||
1004 | int err; | ||
1005 | int len; | ||
1006 | struct tipc_nl_compat_msg msg; | ||
1007 | struct nlmsghdr *req_nlh; | ||
1008 | struct nlmsghdr *rep_nlh; | ||
1009 | struct tipc_genlmsghdr *req_userhdr = info->userhdr; | ||
1010 | struct net *net = genl_info_net(info); | ||
1011 | |||
1012 | memset(&msg, 0, sizeof(msg)); | ||
1013 | |||
1014 | req_nlh = (struct nlmsghdr *)skb->data; | ||
1015 | msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN; | ||
1016 | msg.cmd = req_userhdr->cmd; | ||
1017 | msg.dst_sk = info->dst_sk; | ||
1018 | |||
1019 | if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) { | ||
1020 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN); | ||
1021 | err = -EACCES; | ||
1022 | goto send; | ||
1023 | } | ||
1024 | |||
1025 | len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); | ||
1026 | if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) { | ||
1027 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); | ||
1028 | err = -EOPNOTSUPP; | ||
1029 | goto send; | ||
1030 | } | ||
1031 | |||
1032 | err = tipc_nl_compat_handle(&msg); | ||
1033 | if (err == -EOPNOTSUPP) | ||
1034 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); | ||
1035 | else if (err == -EINVAL) | ||
1036 | msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR); | ||
1037 | send: | ||
1038 | if (!msg.rep) | ||
1039 | return err; | ||
1040 | |||
1041 | len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN); | ||
1042 | skb_push(msg.rep, len); | ||
1043 | rep_nlh = nlmsg_hdr(msg.rep); | ||
1044 | memcpy(rep_nlh, info->nlhdr, len); | ||
1045 | rep_nlh->nlmsg_len = msg.rep->len; | ||
1046 | genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid); | ||
1047 | |||
1048 | return err; | ||
1049 | } | ||
1050 | |||
1051 | static struct genl_family tipc_genl_compat_family = { | ||
1052 | .id = GENL_ID_GENERATE, | ||
1053 | .name = TIPC_GENL_NAME, | ||
1054 | .version = TIPC_GENL_VERSION, | ||
1055 | .hdrsize = TIPC_GENL_HDRLEN, | ||
1056 | .maxattr = 0, | ||
1057 | .netnsok = true, | ||
1058 | }; | ||
1059 | |||
1060 | static struct genl_ops tipc_genl_compat_ops[] = { | ||
1061 | { | ||
1062 | .cmd = TIPC_GENL_CMD, | ||
1063 | .doit = tipc_nl_compat_recv, | ||
1064 | }, | ||
1065 | }; | ||
1066 | |||
1067 | int tipc_netlink_compat_start(void) | ||
1068 | { | ||
1069 | int res; | ||
1070 | |||
1071 | res = genl_register_family_with_ops(&tipc_genl_compat_family, | ||
1072 | tipc_genl_compat_ops); | ||
1073 | if (res) { | ||
1074 | pr_err("Failed to register legacy compat interface\n"); | ||
1075 | return res; | ||
1076 | } | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | void tipc_netlink_compat_stop(void) | ||
1082 | { | ||
1083 | genl_unregister_family(&tipc_genl_compat_family); | ||
1084 | } | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 8d353ec77a66..22c059ad2999 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -35,21 +35,14 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "config.h" | 38 | #include "link.h" |
39 | #include "node.h" | 39 | #include "node.h" |
40 | #include "name_distr.h" | 40 | #include "name_distr.h" |
41 | #include "socket.h" | 41 | #include "socket.h" |
42 | 42 | ||
43 | #define NODE_HTABLE_SIZE 512 | ||
44 | |||
45 | static void node_lost_contact(struct tipc_node *n_ptr); | 43 | static void node_lost_contact(struct tipc_node *n_ptr); |
46 | static void node_established_contact(struct tipc_node *n_ptr); | 44 | static void node_established_contact(struct tipc_node *n_ptr); |
47 | 45 | static void tipc_node_delete(struct tipc_node *node); | |
48 | static struct hlist_head node_htable[NODE_HTABLE_SIZE]; | ||
49 | LIST_HEAD(tipc_node_list); | ||
50 | static u32 tipc_num_nodes; | ||
51 | static u32 tipc_num_links; | ||
52 | static DEFINE_SPINLOCK(node_list_lock); | ||
53 | 46 | ||
54 | struct tipc_sock_conn { | 47 | struct tipc_sock_conn { |
55 | u32 port; | 48 | u32 port; |
@@ -75,19 +68,39 @@ static unsigned int tipc_hashfn(u32 addr) | |||
75 | return addr & (NODE_HTABLE_SIZE - 1); | 68 | return addr & (NODE_HTABLE_SIZE - 1); |
76 | } | 69 | } |
77 | 70 | ||
71 | static void tipc_node_kref_release(struct kref *kref) | ||
72 | { | ||
73 | struct tipc_node *node = container_of(kref, struct tipc_node, kref); | ||
74 | |||
75 | tipc_node_delete(node); | ||
76 | } | ||
77 | |||
78 | void tipc_node_put(struct tipc_node *node) | ||
79 | { | ||
80 | kref_put(&node->kref, tipc_node_kref_release); | ||
81 | } | ||
82 | |||
83 | static void tipc_node_get(struct tipc_node *node) | ||
84 | { | ||
85 | kref_get(&node->kref); | ||
86 | } | ||
87 | |||
78 | /* | 88 | /* |
79 | * tipc_node_find - locate specified node object, if it exists | 89 | * tipc_node_find - locate specified node object, if it exists |
80 | */ | 90 | */ |
81 | struct tipc_node *tipc_node_find(u32 addr) | 91 | struct tipc_node *tipc_node_find(struct net *net, u32 addr) |
82 | { | 92 | { |
93 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
83 | struct tipc_node *node; | 94 | struct tipc_node *node; |
84 | 95 | ||
85 | if (unlikely(!in_own_cluster_exact(addr))) | 96 | if (unlikely(!in_own_cluster_exact(net, addr))) |
86 | return NULL; | 97 | return NULL; |
87 | 98 | ||
88 | rcu_read_lock(); | 99 | rcu_read_lock(); |
89 | hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { | 100 | hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], |
101 | hash) { | ||
90 | if (node->addr == addr) { | 102 | if (node->addr == addr) { |
103 | tipc_node_get(node); | ||
91 | rcu_read_unlock(); | 104 | rcu_read_unlock(); |
92 | return node; | 105 | return node; |
93 | } | 106 | } |
@@ -96,79 +109,80 @@ struct tipc_node *tipc_node_find(u32 addr) | |||
96 | return NULL; | 109 | return NULL; |
97 | } | 110 | } |
98 | 111 | ||
99 | struct tipc_node *tipc_node_create(u32 addr) | 112 | struct tipc_node *tipc_node_create(struct net *net, u32 addr) |
100 | { | 113 | { |
114 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
101 | struct tipc_node *n_ptr, *temp_node; | 115 | struct tipc_node *n_ptr, *temp_node; |
102 | 116 | ||
103 | spin_lock_bh(&node_list_lock); | 117 | spin_lock_bh(&tn->node_list_lock); |
104 | 118 | n_ptr = tipc_node_find(net, addr); | |
119 | if (n_ptr) | ||
120 | goto exit; | ||
105 | n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); | 121 | n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); |
106 | if (!n_ptr) { | 122 | if (!n_ptr) { |
107 | spin_unlock_bh(&node_list_lock); | ||
108 | pr_warn("Node creation failed, no memory\n"); | 123 | pr_warn("Node creation failed, no memory\n"); |
109 | return NULL; | 124 | goto exit; |
110 | } | 125 | } |
111 | |||
112 | n_ptr->addr = addr; | 126 | n_ptr->addr = addr; |
127 | n_ptr->net = net; | ||
128 | kref_init(&n_ptr->kref); | ||
113 | spin_lock_init(&n_ptr->lock); | 129 | spin_lock_init(&n_ptr->lock); |
114 | INIT_HLIST_NODE(&n_ptr->hash); | 130 | INIT_HLIST_NODE(&n_ptr->hash); |
115 | INIT_LIST_HEAD(&n_ptr->list); | 131 | INIT_LIST_HEAD(&n_ptr->list); |
116 | INIT_LIST_HEAD(&n_ptr->publ_list); | 132 | INIT_LIST_HEAD(&n_ptr->publ_list); |
117 | INIT_LIST_HEAD(&n_ptr->conn_sks); | 133 | INIT_LIST_HEAD(&n_ptr->conn_sks); |
118 | skb_queue_head_init(&n_ptr->waiting_sks); | 134 | __skb_queue_head_init(&n_ptr->bclink.deferdq); |
119 | __skb_queue_head_init(&n_ptr->bclink.deferred_queue); | 135 | hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); |
120 | 136 | list_for_each_entry_rcu(temp_node, &tn->node_list, list) { | |
121 | hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); | ||
122 | |||
123 | list_for_each_entry_rcu(temp_node, &tipc_node_list, list) { | ||
124 | if (n_ptr->addr < temp_node->addr) | 137 | if (n_ptr->addr < temp_node->addr) |
125 | break; | 138 | break; |
126 | } | 139 | } |
127 | list_add_tail_rcu(&n_ptr->list, &temp_node->list); | 140 | list_add_tail_rcu(&n_ptr->list, &temp_node->list); |
128 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; | 141 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; |
129 | n_ptr->signature = INVALID_NODE_SIG; | 142 | n_ptr->signature = INVALID_NODE_SIG; |
130 | 143 | tipc_node_get(n_ptr); | |
131 | tipc_num_nodes++; | 144 | exit: |
132 | 145 | spin_unlock_bh(&tn->node_list_lock); | |
133 | spin_unlock_bh(&node_list_lock); | ||
134 | return n_ptr; | 146 | return n_ptr; |
135 | } | 147 | } |
136 | 148 | ||
137 | static void tipc_node_delete(struct tipc_node *n_ptr) | 149 | static void tipc_node_delete(struct tipc_node *node) |
138 | { | 150 | { |
139 | list_del_rcu(&n_ptr->list); | 151 | list_del_rcu(&node->list); |
140 | hlist_del_rcu(&n_ptr->hash); | 152 | hlist_del_rcu(&node->hash); |
141 | kfree_rcu(n_ptr, rcu); | 153 | kfree_rcu(node, rcu); |
142 | |||
143 | tipc_num_nodes--; | ||
144 | } | 154 | } |
145 | 155 | ||
146 | void tipc_node_stop(void) | 156 | void tipc_node_stop(struct net *net) |
147 | { | 157 | { |
158 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
148 | struct tipc_node *node, *t_node; | 159 | struct tipc_node *node, *t_node; |
149 | 160 | ||
150 | spin_lock_bh(&node_list_lock); | 161 | spin_lock_bh(&tn->node_list_lock); |
151 | list_for_each_entry_safe(node, t_node, &tipc_node_list, list) | 162 | list_for_each_entry_safe(node, t_node, &tn->node_list, list) |
152 | tipc_node_delete(node); | 163 | tipc_node_put(node); |
153 | spin_unlock_bh(&node_list_lock); | 164 | spin_unlock_bh(&tn->node_list_lock); |
154 | } | 165 | } |
155 | 166 | ||
156 | int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) | 167 | int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) |
157 | { | 168 | { |
158 | struct tipc_node *node; | 169 | struct tipc_node *node; |
159 | struct tipc_sock_conn *conn; | 170 | struct tipc_sock_conn *conn; |
171 | int err = 0; | ||
160 | 172 | ||
161 | if (in_own_node(dnode)) | 173 | if (in_own_node(net, dnode)) |
162 | return 0; | 174 | return 0; |
163 | 175 | ||
164 | node = tipc_node_find(dnode); | 176 | node = tipc_node_find(net, dnode); |
165 | if (!node) { | 177 | if (!node) { |
166 | pr_warn("Connecting sock to node 0x%x failed\n", dnode); | 178 | pr_warn("Connecting sock to node 0x%x failed\n", dnode); |
167 | return -EHOSTUNREACH; | 179 | return -EHOSTUNREACH; |
168 | } | 180 | } |
169 | conn = kmalloc(sizeof(*conn), GFP_ATOMIC); | 181 | conn = kmalloc(sizeof(*conn), GFP_ATOMIC); |
170 | if (!conn) | 182 | if (!conn) { |
171 | return -EHOSTUNREACH; | 183 | err = -EHOSTUNREACH; |
184 | goto exit; | ||
185 | } | ||
172 | conn->peer_node = dnode; | 186 | conn->peer_node = dnode; |
173 | conn->port = port; | 187 | conn->port = port; |
174 | conn->peer_port = peer_port; | 188 | conn->peer_port = peer_port; |
@@ -176,18 +190,20 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) | |||
176 | tipc_node_lock(node); | 190 | tipc_node_lock(node); |
177 | list_add_tail(&conn->list, &node->conn_sks); | 191 | list_add_tail(&conn->list, &node->conn_sks); |
178 | tipc_node_unlock(node); | 192 | tipc_node_unlock(node); |
179 | return 0; | 193 | exit: |
194 | tipc_node_put(node); | ||
195 | return err; | ||
180 | } | 196 | } |
181 | 197 | ||
182 | void tipc_node_remove_conn(u32 dnode, u32 port) | 198 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) |
183 | { | 199 | { |
184 | struct tipc_node *node; | 200 | struct tipc_node *node; |
185 | struct tipc_sock_conn *conn, *safe; | 201 | struct tipc_sock_conn *conn, *safe; |
186 | 202 | ||
187 | if (in_own_node(dnode)) | 203 | if (in_own_node(net, dnode)) |
188 | return; | 204 | return; |
189 | 205 | ||
190 | node = tipc_node_find(dnode); | 206 | node = tipc_node_find(net, dnode); |
191 | if (!node) | 207 | if (!node) |
192 | return; | 208 | return; |
193 | 209 | ||
@@ -199,23 +215,7 @@ void tipc_node_remove_conn(u32 dnode, u32 port) | |||
199 | kfree(conn); | 215 | kfree(conn); |
200 | } | 216 | } |
201 | tipc_node_unlock(node); | 217 | tipc_node_unlock(node); |
202 | } | 218 | tipc_node_put(node); |
203 | |||
204 | void tipc_node_abort_sock_conns(struct list_head *conns) | ||
205 | { | ||
206 | struct tipc_sock_conn *conn, *safe; | ||
207 | struct sk_buff *buf; | ||
208 | |||
209 | list_for_each_entry_safe(conn, safe, conns, list) { | ||
210 | buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, | ||
211 | SHORT_H_SIZE, 0, tipc_own_addr, | ||
212 | conn->peer_node, conn->port, | ||
213 | conn->peer_port, TIPC_ERR_NO_NODE); | ||
214 | if (likely(buf)) | ||
215 | tipc_sk_rcv(buf); | ||
216 | list_del(&conn->list); | ||
217 | kfree(conn); | ||
218 | } | ||
219 | } | 219 | } |
220 | 220 | ||
221 | /** | 221 | /** |
@@ -231,8 +231,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
231 | n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; | 231 | n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; |
232 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; | 232 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; |
233 | 233 | ||
234 | pr_info("Established link <%s> on network plane %c\n", | 234 | pr_debug("Established link <%s> on network plane %c\n", |
235 | l_ptr->name, l_ptr->net_plane); | 235 | l_ptr->name, l_ptr->net_plane); |
236 | 236 | ||
237 | if (!active[0]) { | 237 | if (!active[0]) { |
238 | active[0] = active[1] = l_ptr; | 238 | active[0] = active[1] = l_ptr; |
@@ -240,7 +240,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
240 | goto exit; | 240 | goto exit; |
241 | } | 241 | } |
242 | if (l_ptr->priority < active[0]->priority) { | 242 | if (l_ptr->priority < active[0]->priority) { |
243 | pr_info("New link <%s> becomes standby\n", l_ptr->name); | 243 | pr_debug("New link <%s> becomes standby\n", l_ptr->name); |
244 | goto exit; | 244 | goto exit; |
245 | } | 245 | } |
246 | tipc_link_dup_queue_xmit(active[0], l_ptr); | 246 | tipc_link_dup_queue_xmit(active[0], l_ptr); |
@@ -248,14 +248,14 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
248 | active[0] = l_ptr; | 248 | active[0] = l_ptr; |
249 | goto exit; | 249 | goto exit; |
250 | } | 250 | } |
251 | pr_info("Old link <%s> becomes standby\n", active[0]->name); | 251 | pr_debug("Old link <%s> becomes standby\n", active[0]->name); |
252 | if (active[1] != active[0]) | 252 | if (active[1] != active[0]) |
253 | pr_info("Old link <%s> becomes standby\n", active[1]->name); | 253 | pr_debug("Old link <%s> becomes standby\n", active[1]->name); |
254 | active[0] = active[1] = l_ptr; | 254 | active[0] = active[1] = l_ptr; |
255 | exit: | 255 | exit: |
256 | /* Leave room for changeover header when returning 'mtu' to users: */ | 256 | /* Leave room for changeover header when returning 'mtu' to users: */ |
257 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 257 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; |
258 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 258 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; |
259 | } | 259 | } |
260 | 260 | ||
261 | /** | 261 | /** |
@@ -290,6 +290,7 @@ static void node_select_active_links(struct tipc_node *n_ptr) | |||
290 | */ | 290 | */ |
291 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 291 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
292 | { | 292 | { |
293 | struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); | ||
293 | struct tipc_link **active; | 294 | struct tipc_link **active; |
294 | 295 | ||
295 | n_ptr->working_links--; | 296 | n_ptr->working_links--; |
@@ -297,12 +298,12 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
297 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; | 298 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; |
298 | 299 | ||
299 | if (!tipc_link_is_active(l_ptr)) { | 300 | if (!tipc_link_is_active(l_ptr)) { |
300 | pr_info("Lost standby link <%s> on network plane %c\n", | 301 | pr_debug("Lost standby link <%s> on network plane %c\n", |
301 | l_ptr->name, l_ptr->net_plane); | 302 | l_ptr->name, l_ptr->net_plane); |
302 | return; | 303 | return; |
303 | } | 304 | } |
304 | pr_info("Lost link <%s> on network plane %c\n", | 305 | pr_debug("Lost link <%s> on network plane %c\n", |
305 | l_ptr->name, l_ptr->net_plane); | 306 | l_ptr->name, l_ptr->net_plane); |
306 | 307 | ||
307 | active = &n_ptr->active_links[0]; | 308 | active = &n_ptr->active_links[0]; |
308 | if (active[0] == l_ptr) | 309 | if (active[0] == l_ptr) |
@@ -318,13 +319,12 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
318 | 319 | ||
319 | /* Leave room for changeover header when returning 'mtu' to users: */ | 320 | /* Leave room for changeover header when returning 'mtu' to users: */ |
320 | if (active[0]) { | 321 | if (active[0]) { |
321 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 322 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; |
322 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 323 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; |
323 | return; | 324 | return; |
324 | } | 325 | } |
325 | |||
326 | /* Loopback link went down? No fragmentation needed from now on. */ | 326 | /* Loopback link went down? No fragmentation needed from now on. */ |
327 | if (n_ptr->addr == tipc_own_addr) { | 327 | if (n_ptr->addr == tn->own_addr) { |
328 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; | 328 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; |
329 | n_ptr->act_mtus[1] = MAX_MSG_SIZE; | 329 | n_ptr->act_mtus[1] = MAX_MSG_SIZE; |
330 | } | 330 | } |
@@ -343,9 +343,6 @@ int tipc_node_is_up(struct tipc_node *n_ptr) | |||
343 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 343 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
344 | { | 344 | { |
345 | n_ptr->links[l_ptr->bearer_id] = l_ptr; | 345 | n_ptr->links[l_ptr->bearer_id] = l_ptr; |
346 | spin_lock_bh(&node_list_lock); | ||
347 | tipc_num_links++; | ||
348 | spin_unlock_bh(&node_list_lock); | ||
349 | n_ptr->link_cnt++; | 346 | n_ptr->link_cnt++; |
350 | } | 347 | } |
351 | 348 | ||
@@ -357,9 +354,6 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
357 | if (l_ptr != n_ptr->links[i]) | 354 | if (l_ptr != n_ptr->links[i]) |
358 | continue; | 355 | continue; |
359 | n_ptr->links[i] = NULL; | 356 | n_ptr->links[i] = NULL; |
360 | spin_lock_bh(&node_list_lock); | ||
361 | tipc_num_links--; | ||
362 | spin_unlock_bh(&node_list_lock); | ||
363 | n_ptr->link_cnt--; | 357 | n_ptr->link_cnt--; |
364 | } | 358 | } |
365 | } | 359 | } |
@@ -368,161 +362,71 @@ static void node_established_contact(struct tipc_node *n_ptr) | |||
368 | { | 362 | { |
369 | n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP; | 363 | n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP; |
370 | n_ptr->bclink.oos_state = 0; | 364 | n_ptr->bclink.oos_state = 0; |
371 | n_ptr->bclink.acked = tipc_bclink_get_last_sent(); | 365 | n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net); |
372 | tipc_bclink_add_node(n_ptr->addr); | 366 | tipc_bclink_add_node(n_ptr->net, n_ptr->addr); |
373 | } | 367 | } |
374 | 368 | ||
375 | static void node_lost_contact(struct tipc_node *n_ptr) | 369 | static void node_lost_contact(struct tipc_node *n_ptr) |
376 | { | 370 | { |
377 | char addr_string[16]; | 371 | char addr_string[16]; |
378 | u32 i; | 372 | struct tipc_sock_conn *conn, *safe; |
373 | struct list_head *conns = &n_ptr->conn_sks; | ||
374 | struct sk_buff *skb; | ||
375 | struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); | ||
376 | uint i; | ||
379 | 377 | ||
380 | pr_info("Lost contact with %s\n", | 378 | pr_debug("Lost contact with %s\n", |
381 | tipc_addr_string_fill(addr_string, n_ptr->addr)); | 379 | tipc_addr_string_fill(addr_string, n_ptr->addr)); |
382 | 380 | ||
383 | /* Flush broadcast link info associated with lost node */ | 381 | /* Flush broadcast link info associated with lost node */ |
384 | if (n_ptr->bclink.recv_permitted) { | 382 | if (n_ptr->bclink.recv_permitted) { |
385 | __skb_queue_purge(&n_ptr->bclink.deferred_queue); | 383 | __skb_queue_purge(&n_ptr->bclink.deferdq); |
386 | 384 | ||
387 | if (n_ptr->bclink.reasm_buf) { | 385 | if (n_ptr->bclink.reasm_buf) { |
388 | kfree_skb(n_ptr->bclink.reasm_buf); | 386 | kfree_skb(n_ptr->bclink.reasm_buf); |
389 | n_ptr->bclink.reasm_buf = NULL; | 387 | n_ptr->bclink.reasm_buf = NULL; |
390 | } | 388 | } |
391 | 389 | ||
392 | tipc_bclink_remove_node(n_ptr->addr); | 390 | tipc_bclink_remove_node(n_ptr->net, n_ptr->addr); |
393 | tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); | 391 | tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); |
394 | 392 | ||
395 | n_ptr->bclink.recv_permitted = false; | 393 | n_ptr->bclink.recv_permitted = false; |
396 | } | 394 | } |
397 | 395 | ||
398 | /* Abort link changeover */ | 396 | /* Abort any ongoing link failover */ |
399 | for (i = 0; i < MAX_BEARERS; i++) { | 397 | for (i = 0; i < MAX_BEARERS; i++) { |
400 | struct tipc_link *l_ptr = n_ptr->links[i]; | 398 | struct tipc_link *l_ptr = n_ptr->links[i]; |
401 | if (!l_ptr) | 399 | if (!l_ptr) |
402 | continue; | 400 | continue; |
403 | l_ptr->reset_checkpoint = l_ptr->next_in_no; | 401 | l_ptr->flags &= ~LINK_FAILINGOVER; |
404 | l_ptr->exp_msg_count = 0; | 402 | l_ptr->failover_checkpt = 0; |
403 | l_ptr->failover_pkts = 0; | ||
404 | kfree_skb(l_ptr->failover_skb); | ||
405 | l_ptr->failover_skb = NULL; | ||
405 | tipc_link_reset_fragments(l_ptr); | 406 | tipc_link_reset_fragments(l_ptr); |
406 | } | 407 | } |
407 | 408 | ||
408 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; | 409 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; |
409 | 410 | ||
410 | /* Notify subscribers and prevent re-contact with node until | 411 | /* Prevent re-contact with node until cleanup is done */ |
411 | * cleanup is done. | 412 | n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN; |
412 | */ | ||
413 | n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN | | ||
414 | TIPC_NOTIFY_NODE_DOWN; | ||
415 | } | ||
416 | 413 | ||
417 | struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | 414 | /* Notify publications from this node */ |
418 | { | 415 | n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN; |
419 | u32 domain; | ||
420 | struct sk_buff *buf; | ||
421 | struct tipc_node *n_ptr; | ||
422 | struct tipc_node_info node_info; | ||
423 | u32 payload_size; | ||
424 | |||
425 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | ||
426 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
427 | |||
428 | domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); | ||
429 | if (!tipc_addr_domain_valid(domain)) | ||
430 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
431 | " (network address)"); | ||
432 | |||
433 | spin_lock_bh(&node_list_lock); | ||
434 | if (!tipc_num_nodes) { | ||
435 | spin_unlock_bh(&node_list_lock); | ||
436 | return tipc_cfg_reply_none(); | ||
437 | } | ||
438 | 416 | ||
439 | /* For now, get space for all other nodes */ | 417 | /* Notify sockets connected to node */ |
440 | payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; | 418 | list_for_each_entry_safe(conn, safe, conns, list) { |
441 | if (payload_size > 32768u) { | 419 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, |
442 | spin_unlock_bh(&node_list_lock); | 420 | SHORT_H_SIZE, 0, tn->own_addr, |
443 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 421 | conn->peer_node, conn->port, |
444 | " (too many nodes)"); | 422 | conn->peer_port, TIPC_ERR_NO_NODE); |
445 | } | 423 | if (likely(skb)) { |
446 | spin_unlock_bh(&node_list_lock); | 424 | skb_queue_tail(n_ptr->inputq, skb); |
447 | 425 | n_ptr->action_flags |= TIPC_MSG_EVT; | |
448 | buf = tipc_cfg_reply_alloc(payload_size); | ||
449 | if (!buf) | ||
450 | return NULL; | ||
451 | |||
452 | /* Add TLVs for all nodes in scope */ | ||
453 | rcu_read_lock(); | ||
454 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | ||
455 | if (!tipc_in_scope(domain, n_ptr->addr)) | ||
456 | continue; | ||
457 | node_info.addr = htonl(n_ptr->addr); | ||
458 | node_info.up = htonl(tipc_node_is_up(n_ptr)); | ||
459 | tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, | ||
460 | &node_info, sizeof(node_info)); | ||
461 | } | ||
462 | rcu_read_unlock(); | ||
463 | return buf; | ||
464 | } | ||
465 | |||
466 | struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | ||
467 | { | ||
468 | u32 domain; | ||
469 | struct sk_buff *buf; | ||
470 | struct tipc_node *n_ptr; | ||
471 | struct tipc_link_info link_info; | ||
472 | u32 payload_size; | ||
473 | |||
474 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | ||
475 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
476 | |||
477 | domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); | ||
478 | if (!tipc_addr_domain_valid(domain)) | ||
479 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
480 | " (network address)"); | ||
481 | |||
482 | if (!tipc_own_addr) | ||
483 | return tipc_cfg_reply_none(); | ||
484 | |||
485 | spin_lock_bh(&node_list_lock); | ||
486 | /* Get space for all unicast links + broadcast link */ | ||
487 | payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1)); | ||
488 | if (payload_size > 32768u) { | ||
489 | spin_unlock_bh(&node_list_lock); | ||
490 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | ||
491 | " (too many links)"); | ||
492 | } | ||
493 | spin_unlock_bh(&node_list_lock); | ||
494 | |||
495 | buf = tipc_cfg_reply_alloc(payload_size); | ||
496 | if (!buf) | ||
497 | return NULL; | ||
498 | |||
499 | /* Add TLV for broadcast link */ | ||
500 | link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); | ||
501 | link_info.up = htonl(1); | ||
502 | strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); | ||
503 | tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); | ||
504 | |||
505 | /* Add TLVs for any other links in scope */ | ||
506 | rcu_read_lock(); | ||
507 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | ||
508 | u32 i; | ||
509 | |||
510 | if (!tipc_in_scope(domain, n_ptr->addr)) | ||
511 | continue; | ||
512 | tipc_node_lock(n_ptr); | ||
513 | for (i = 0; i < MAX_BEARERS; i++) { | ||
514 | if (!n_ptr->links[i]) | ||
515 | continue; | ||
516 | link_info.dest = htonl(n_ptr->addr); | ||
517 | link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); | ||
518 | strcpy(link_info.str, n_ptr->links[i]->name); | ||
519 | tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, | ||
520 | &link_info, sizeof(link_info)); | ||
521 | } | 426 | } |
522 | tipc_node_unlock(n_ptr); | 427 | list_del(&conn->list); |
428 | kfree(conn); | ||
523 | } | 429 | } |
524 | rcu_read_unlock(); | ||
525 | return buf; | ||
526 | } | 430 | } |
527 | 431 | ||
528 | /** | 432 | /** |
@@ -534,78 +438,90 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
534 | * | 438 | * |
535 | * Returns 0 on success | 439 | * Returns 0 on success |
536 | */ | 440 | */ |
537 | int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) | 441 | int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, |
442 | char *linkname, size_t len) | ||
538 | { | 443 | { |
539 | struct tipc_link *link; | 444 | struct tipc_link *link; |
540 | struct tipc_node *node = tipc_node_find(addr); | 445 | int err = -EINVAL; |
446 | struct tipc_node *node = tipc_node_find(net, addr); | ||
447 | |||
448 | if (!node) | ||
449 | return err; | ||
450 | |||
451 | if (bearer_id >= MAX_BEARERS) | ||
452 | goto exit; | ||
541 | 453 | ||
542 | if ((bearer_id >= MAX_BEARERS) || !node) | ||
543 | return -EINVAL; | ||
544 | tipc_node_lock(node); | 454 | tipc_node_lock(node); |
545 | link = node->links[bearer_id]; | 455 | link = node->links[bearer_id]; |
546 | if (link) { | 456 | if (link) { |
547 | strncpy(linkname, link->name, len); | 457 | strncpy(linkname, link->name, len); |
548 | tipc_node_unlock(node); | 458 | err = 0; |
549 | return 0; | ||
550 | } | 459 | } |
460 | exit: | ||
551 | tipc_node_unlock(node); | 461 | tipc_node_unlock(node); |
552 | return -EINVAL; | 462 | tipc_node_put(node); |
463 | return err; | ||
553 | } | 464 | } |
554 | 465 | ||
555 | void tipc_node_unlock(struct tipc_node *node) | 466 | void tipc_node_unlock(struct tipc_node *node) |
556 | { | 467 | { |
557 | LIST_HEAD(nsub_list); | 468 | struct net *net = node->net; |
558 | LIST_HEAD(conn_sks); | ||
559 | struct sk_buff_head waiting_sks; | ||
560 | u32 addr = 0; | 469 | u32 addr = 0; |
561 | int flags = node->action_flags; | 470 | u32 flags = node->action_flags; |
562 | u32 link_id = 0; | 471 | u32 link_id = 0; |
472 | struct list_head *publ_list; | ||
473 | struct sk_buff_head *inputq = node->inputq; | ||
474 | struct sk_buff_head *namedq; | ||
563 | 475 | ||
564 | if (likely(!flags)) { | 476 | if (likely(!flags || (flags == TIPC_MSG_EVT))) { |
477 | node->action_flags = 0; | ||
565 | spin_unlock_bh(&node->lock); | 478 | spin_unlock_bh(&node->lock); |
479 | if (flags == TIPC_MSG_EVT) | ||
480 | tipc_sk_rcv(net, inputq); | ||
566 | return; | 481 | return; |
567 | } | 482 | } |
568 | 483 | ||
569 | addr = node->addr; | 484 | addr = node->addr; |
570 | link_id = node->link_id; | 485 | link_id = node->link_id; |
571 | __skb_queue_head_init(&waiting_sks); | 486 | namedq = node->namedq; |
487 | publ_list = &node->publ_list; | ||
572 | 488 | ||
573 | if (flags & TIPC_WAKEUP_USERS) | 489 | node->action_flags &= ~(TIPC_MSG_EVT | |
574 | skb_queue_splice_init(&node->waiting_sks, &waiting_sks); | 490 | TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | |
575 | 491 | TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP | | |
576 | if (flags & TIPC_NOTIFY_NODE_DOWN) { | 492 | TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT | |
577 | list_replace_init(&node->publ_list, &nsub_list); | 493 | TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET); |
578 | list_replace_init(&node->conn_sks, &conn_sks); | ||
579 | } | ||
580 | node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | | ||
581 | TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP | | ||
582 | TIPC_NOTIFY_LINK_DOWN | | ||
583 | TIPC_WAKEUP_BCAST_USERS); | ||
584 | 494 | ||
585 | spin_unlock_bh(&node->lock); | 495 | spin_unlock_bh(&node->lock); |
586 | 496 | ||
587 | while (!skb_queue_empty(&waiting_sks)) | 497 | if (flags & TIPC_NOTIFY_NODE_DOWN) |
588 | tipc_sk_rcv(__skb_dequeue(&waiting_sks)); | 498 | tipc_publ_notify(net, publ_list, addr); |
589 | |||
590 | if (!list_empty(&conn_sks)) | ||
591 | tipc_node_abort_sock_conns(&conn_sks); | ||
592 | |||
593 | if (!list_empty(&nsub_list)) | ||
594 | tipc_publ_notify(&nsub_list, addr); | ||
595 | 499 | ||
596 | if (flags & TIPC_WAKEUP_BCAST_USERS) | 500 | if (flags & TIPC_WAKEUP_BCAST_USERS) |
597 | tipc_bclink_wakeup_users(); | 501 | tipc_bclink_wakeup_users(net); |
598 | 502 | ||
599 | if (flags & TIPC_NOTIFY_NODE_UP) | 503 | if (flags & TIPC_NOTIFY_NODE_UP) |
600 | tipc_named_node_up(addr); | 504 | tipc_named_node_up(net, addr); |
601 | 505 | ||
602 | if (flags & TIPC_NOTIFY_LINK_UP) | 506 | if (flags & TIPC_NOTIFY_LINK_UP) |
603 | tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, | 507 | tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, |
604 | TIPC_NODE_SCOPE, link_id, addr); | 508 | TIPC_NODE_SCOPE, link_id, addr); |
605 | 509 | ||
606 | if (flags & TIPC_NOTIFY_LINK_DOWN) | 510 | if (flags & TIPC_NOTIFY_LINK_DOWN) |
607 | tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, | 511 | tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, |
608 | link_id, addr); | 512 | link_id, addr); |
513 | |||
514 | if (flags & TIPC_MSG_EVT) | ||
515 | tipc_sk_rcv(net, inputq); | ||
516 | |||
517 | if (flags & TIPC_NAMED_MSG_EVT) | ||
518 | tipc_named_rcv(net, namedq); | ||
519 | |||
520 | if (flags & TIPC_BCAST_MSG_EVT) | ||
521 | tipc_bclink_input(net); | ||
522 | |||
523 | if (flags & TIPC_BCAST_RESET) | ||
524 | tipc_link_reset_all(node); | ||
609 | } | 525 | } |
610 | 526 | ||
611 | /* Caller should hold node lock for the passed node */ | 527 | /* Caller should hold node lock for the passed node */ |
@@ -614,7 +530,7 @@ static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) | |||
614 | void *hdr; | 530 | void *hdr; |
615 | struct nlattr *attrs; | 531 | struct nlattr *attrs; |
616 | 532 | ||
617 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, | 533 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
618 | NLM_F_MULTI, TIPC_NL_NODE_GET); | 534 | NLM_F_MULTI, TIPC_NL_NODE_GET); |
619 | if (!hdr) | 535 | if (!hdr) |
620 | return -EMSGSIZE; | 536 | return -EMSGSIZE; |
@@ -645,6 +561,8 @@ msg_full: | |||
645 | int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | 561 | int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) |
646 | { | 562 | { |
647 | int err; | 563 | int err; |
564 | struct net *net = sock_net(skb->sk); | ||
565 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
648 | int done = cb->args[0]; | 566 | int done = cb->args[0]; |
649 | int last_addr = cb->args[1]; | 567 | int last_addr = cb->args[1]; |
650 | struct tipc_node *node; | 568 | struct tipc_node *node; |
@@ -658,20 +576,24 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
658 | msg.seq = cb->nlh->nlmsg_seq; | 576 | msg.seq = cb->nlh->nlmsg_seq; |
659 | 577 | ||
660 | rcu_read_lock(); | 578 | rcu_read_lock(); |
661 | 579 | if (last_addr) { | |
662 | if (last_addr && !tipc_node_find(last_addr)) { | 580 | node = tipc_node_find(net, last_addr); |
663 | rcu_read_unlock(); | 581 | if (!node) { |
664 | /* We never set seq or call nl_dump_check_consistent() this | 582 | rcu_read_unlock(); |
665 | * means that setting prev_seq here will cause the consistence | 583 | /* We never set seq or call nl_dump_check_consistent() |
666 | * check to fail in the netlink callback handler. Resulting in | 584 | * this means that setting prev_seq here will cause the |
667 | * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if | 585 | * consistence check to fail in the netlink callback |
668 | * the node state changed while we released the lock. | 586 | * handler. Resulting in the NLMSG_DONE message having |
669 | */ | 587 | * the NLM_F_DUMP_INTR flag set if the node state |
670 | cb->prev_seq = 1; | 588 | * changed while we released the lock. |
671 | return -EPIPE; | 589 | */ |
590 | cb->prev_seq = 1; | ||
591 | return -EPIPE; | ||
592 | } | ||
593 | tipc_node_put(node); | ||
672 | } | 594 | } |
673 | 595 | ||
674 | list_for_each_entry_rcu(node, &tipc_node_list, list) { | 596 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
675 | if (last_addr) { | 597 | if (last_addr) { |
676 | if (node->addr == last_addr) | 598 | if (node->addr == last_addr) |
677 | last_addr = 0; | 599 | last_addr = 0; |
diff --git a/net/tipc/node.h b/net/tipc/node.h index cbe0e950f1cc..02d5c20dc551 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/node.h: Include file for TIPC node management routines | 2 | * net/tipc/node.h: Include file for TIPC node management routines |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
5 | * Copyright (c) 2005, 2010-2014, Wind River Systems | 5 | * Copyright (c) 2005, 2010-2014, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -42,10 +42,10 @@ | |||
42 | #include "bearer.h" | 42 | #include "bearer.h" |
43 | #include "msg.h" | 43 | #include "msg.h" |
44 | 44 | ||
45 | /* | 45 | /* Out-of-range value for node signature */ |
46 | * Out-of-range value for node signature | 46 | #define INVALID_NODE_SIG 0x10000 |
47 | */ | 47 | |
48 | #define INVALID_NODE_SIG 0x10000 | 48 | #define NODE_HTABLE_SIZE 512 |
49 | 49 | ||
50 | /* Flags used to take different actions according to flag type | 50 | /* Flags used to take different actions according to flag type |
51 | * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down | 51 | * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down |
@@ -55,14 +55,17 @@ | |||
55 | * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type | 55 | * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type |
56 | */ | 56 | */ |
57 | enum { | 57 | enum { |
58 | TIPC_MSG_EVT = 1, | ||
58 | TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), | 59 | TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), |
59 | TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2), | 60 | TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2), |
60 | TIPC_NOTIFY_NODE_DOWN = (1 << 3), | 61 | TIPC_NOTIFY_NODE_DOWN = (1 << 3), |
61 | TIPC_NOTIFY_NODE_UP = (1 << 4), | 62 | TIPC_NOTIFY_NODE_UP = (1 << 4), |
62 | TIPC_WAKEUP_USERS = (1 << 5), | 63 | TIPC_WAKEUP_BCAST_USERS = (1 << 5), |
63 | TIPC_WAKEUP_BCAST_USERS = (1 << 6), | 64 | TIPC_NOTIFY_LINK_UP = (1 << 6), |
64 | TIPC_NOTIFY_LINK_UP = (1 << 7), | 65 | TIPC_NOTIFY_LINK_DOWN = (1 << 7), |
65 | TIPC_NOTIFY_LINK_DOWN = (1 << 8) | 66 | TIPC_NAMED_MSG_EVT = (1 << 8), |
67 | TIPC_BCAST_MSG_EVT = (1 << 9), | ||
68 | TIPC_BCAST_RESET = (1 << 10) | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | /** | 71 | /** |
@@ -73,6 +76,7 @@ enum { | |||
73 | * @oos_state: state tracker for handling OOS b'cast messages | 76 | * @oos_state: state tracker for handling OOS b'cast messages |
74 | * @deferred_queue: deferred queue saved OOS b'cast message received from node | 77 | * @deferred_queue: deferred queue saved OOS b'cast message received from node |
75 | * @reasm_buf: broadcast reassembly queue head from node | 78 | * @reasm_buf: broadcast reassembly queue head from node |
79 | * @inputq_map: bitmap indicating which inqueues should be kicked | ||
76 | * @recv_permitted: true if node is allowed to receive b'cast messages | 80 | * @recv_permitted: true if node is allowed to receive b'cast messages |
77 | */ | 81 | */ |
78 | struct tipc_node_bclink { | 82 | struct tipc_node_bclink { |
@@ -81,16 +85,22 @@ struct tipc_node_bclink { | |||
81 | u32 last_sent; | 85 | u32 last_sent; |
82 | u32 oos_state; | 86 | u32 oos_state; |
83 | u32 deferred_size; | 87 | u32 deferred_size; |
84 | struct sk_buff_head deferred_queue; | 88 | struct sk_buff_head deferdq; |
85 | struct sk_buff *reasm_buf; | 89 | struct sk_buff *reasm_buf; |
90 | int inputq_map; | ||
86 | bool recv_permitted; | 91 | bool recv_permitted; |
87 | }; | 92 | }; |
88 | 93 | ||
89 | /** | 94 | /** |
90 | * struct tipc_node - TIPC node structure | 95 | * struct tipc_node - TIPC node structure |
91 | * @addr: network address of node | 96 | * @addr: network address of node |
97 | * @ref: reference counter to node object | ||
92 | * @lock: spinlock governing access to structure | 98 | * @lock: spinlock governing access to structure |
99 | * @net: the applicable net namespace | ||
93 | * @hash: links to adjacent nodes in unsorted hash chain | 100 | * @hash: links to adjacent nodes in unsorted hash chain |
101 | * @inputq: pointer to input queue containing messages for msg event | ||
102 | * @namedq: pointer to name table input queue with name table messages | ||
103 | * @curr_link: the link holding the node lock, if any | ||
94 | * @active_links: pointers to active links to node | 104 | * @active_links: pointers to active links to node |
95 | * @links: pointers to all links to node | 105 | * @links: pointers to all links to node |
96 | * @action_flags: bit mask of different types of node actions | 106 | * @action_flags: bit mask of different types of node actions |
@@ -98,6 +108,7 @@ struct tipc_node_bclink { | |||
98 | * @list: links to adjacent nodes in sorted list of cluster's nodes | 108 | * @list: links to adjacent nodes in sorted list of cluster's nodes |
99 | * @working_links: number of working links to node (both active and standby) | 109 | * @working_links: number of working links to node (both active and standby) |
100 | * @link_cnt: number of links to node | 110 | * @link_cnt: number of links to node |
111 | * @capabilities: bitmap, indicating peer node's functional capabilities | ||
101 | * @signature: node instance identifier | 112 | * @signature: node instance identifier |
102 | * @link_id: local and remote bearer ids of changing link, if any | 113 | * @link_id: local and remote bearer ids of changing link, if any |
103 | * @publ_list: list of publications | 114 | * @publ_list: list of publications |
@@ -105,41 +116,43 @@ struct tipc_node_bclink { | |||
105 | */ | 116 | */ |
106 | struct tipc_node { | 117 | struct tipc_node { |
107 | u32 addr; | 118 | u32 addr; |
119 | struct kref kref; | ||
108 | spinlock_t lock; | 120 | spinlock_t lock; |
121 | struct net *net; | ||
109 | struct hlist_node hash; | 122 | struct hlist_node hash; |
123 | struct sk_buff_head *inputq; | ||
124 | struct sk_buff_head *namedq; | ||
110 | struct tipc_link *active_links[2]; | 125 | struct tipc_link *active_links[2]; |
111 | u32 act_mtus[2]; | 126 | u32 act_mtus[2]; |
112 | struct tipc_link *links[MAX_BEARERS]; | 127 | struct tipc_link *links[MAX_BEARERS]; |
113 | unsigned int action_flags; | 128 | int action_flags; |
114 | struct tipc_node_bclink bclink; | 129 | struct tipc_node_bclink bclink; |
115 | struct list_head list; | 130 | struct list_head list; |
116 | int link_cnt; | 131 | int link_cnt; |
117 | int working_links; | 132 | u16 working_links; |
133 | u16 capabilities; | ||
118 | u32 signature; | 134 | u32 signature; |
119 | u32 link_id; | 135 | u32 link_id; |
120 | struct list_head publ_list; | 136 | struct list_head publ_list; |
121 | struct sk_buff_head waiting_sks; | ||
122 | struct list_head conn_sks; | 137 | struct list_head conn_sks; |
123 | struct rcu_head rcu; | 138 | struct rcu_head rcu; |
124 | }; | 139 | }; |
125 | 140 | ||
126 | extern struct list_head tipc_node_list; | 141 | struct tipc_node *tipc_node_find(struct net *net, u32 addr); |
127 | 142 | void tipc_node_put(struct tipc_node *node); | |
128 | struct tipc_node *tipc_node_find(u32 addr); | 143 | struct tipc_node *tipc_node_create(struct net *net, u32 addr); |
129 | struct tipc_node *tipc_node_create(u32 addr); | 144 | void tipc_node_stop(struct net *net); |
130 | void tipc_node_stop(void); | ||
131 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); | 145 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); |
132 | void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); | 146 | void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); |
133 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr); | 147 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr); |
134 | void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr); | 148 | void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr); |
135 | int tipc_node_active_links(struct tipc_node *n_ptr); | 149 | int tipc_node_active_links(struct tipc_node *n_ptr); |
136 | int tipc_node_is_up(struct tipc_node *n_ptr); | 150 | int tipc_node_is_up(struct tipc_node *n_ptr); |
137 | struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); | 151 | int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node, |
138 | struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); | 152 | char *linkname, size_t len); |
139 | int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len); | ||
140 | void tipc_node_unlock(struct tipc_node *node); | 153 | void tipc_node_unlock(struct tipc_node *node); |
141 | int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); | 154 | int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); |
142 | void tipc_node_remove_conn(u32 dnode, u32 port); | 155 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); |
143 | 156 | ||
144 | int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); | 157 | int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); |
145 | 158 | ||
@@ -154,17 +167,19 @@ static inline bool tipc_node_blocked(struct tipc_node *node) | |||
154 | TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN)); | 167 | TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN)); |
155 | } | 168 | } |
156 | 169 | ||
157 | static inline uint tipc_node_get_mtu(u32 addr, u32 selector) | 170 | static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector) |
158 | { | 171 | { |
159 | struct tipc_node *node; | 172 | struct tipc_node *node; |
160 | u32 mtu; | 173 | u32 mtu; |
161 | 174 | ||
162 | node = tipc_node_find(addr); | 175 | node = tipc_node_find(net, addr); |
163 | 176 | ||
164 | if (likely(node)) | 177 | if (likely(node)) { |
165 | mtu = node->act_mtus[selector & 1]; | 178 | mtu = node->act_mtus[selector & 1]; |
166 | else | 179 | tipc_node_put(node); |
180 | } else { | ||
167 | mtu = MAX_MSG_SIZE; | 181 | mtu = MAX_MSG_SIZE; |
182 | } | ||
168 | 183 | ||
169 | return mtu; | 184 | return mtu; |
170 | } | 185 | } |
diff --git a/net/tipc/server.c b/net/tipc/server.c index a538a02f869b..ab6183cdb121 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
@@ -35,12 +35,15 @@ | |||
35 | 35 | ||
36 | #include "server.h" | 36 | #include "server.h" |
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "socket.h" | ||
38 | #include <net/sock.h> | 39 | #include <net/sock.h> |
40 | #include <linux/module.h> | ||
39 | 41 | ||
40 | /* Number of messages to send before rescheduling */ | 42 | /* Number of messages to send before rescheduling */ |
41 | #define MAX_SEND_MSG_COUNT 25 | 43 | #define MAX_SEND_MSG_COUNT 25 |
42 | #define MAX_RECV_MSG_COUNT 25 | 44 | #define MAX_RECV_MSG_COUNT 25 |
43 | #define CF_CONNECTED 1 | 45 | #define CF_CONNECTED 1 |
46 | #define CF_SERVER 2 | ||
44 | 47 | ||
45 | #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) | 48 | #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) |
46 | 49 | ||
@@ -87,9 +90,19 @@ static void tipc_clean_outqueues(struct tipc_conn *con); | |||
87 | static void tipc_conn_kref_release(struct kref *kref) | 90 | static void tipc_conn_kref_release(struct kref *kref) |
88 | { | 91 | { |
89 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 92 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); |
93 | struct sockaddr_tipc *saddr = con->server->saddr; | ||
94 | struct socket *sock = con->sock; | ||
95 | struct sock *sk; | ||
90 | 96 | ||
91 | if (con->sock) { | 97 | if (sock) { |
92 | tipc_sock_release_local(con->sock); | 98 | sk = sock->sk; |
99 | if (test_bit(CF_SERVER, &con->flags)) { | ||
100 | __module_get(sock->ops->owner); | ||
101 | __module_get(sk->sk_prot_creator->owner); | ||
102 | } | ||
103 | saddr->scope = -TIPC_NODE_SCOPE; | ||
104 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); | ||
105 | sk_release_kernel(sk); | ||
93 | con->sock = NULL; | 106 | con->sock = NULL; |
94 | } | 107 | } |
95 | 108 | ||
@@ -255,7 +268,8 @@ static int tipc_receive_from_sock(struct tipc_conn *con) | |||
255 | goto out_close; | 268 | goto out_close; |
256 | } | 269 | } |
257 | 270 | ||
258 | s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret); | 271 | s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr, |
272 | con->usr_data, buf, ret); | ||
259 | 273 | ||
260 | kmem_cache_free(s->rcvbuf_cache, buf); | 274 | kmem_cache_free(s->rcvbuf_cache, buf); |
261 | 275 | ||
@@ -279,7 +293,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con) | |||
279 | struct tipc_conn *newcon; | 293 | struct tipc_conn *newcon; |
280 | int ret; | 294 | int ret; |
281 | 295 | ||
282 | ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK); | 296 | ret = kernel_accept(sock, &newsock, O_NONBLOCK); |
283 | if (ret < 0) | 297 | if (ret < 0) |
284 | return ret; | 298 | return ret; |
285 | 299 | ||
@@ -307,9 +321,12 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |||
307 | struct socket *sock = NULL; | 321 | struct socket *sock = NULL; |
308 | int ret; | 322 | int ret; |
309 | 323 | ||
310 | ret = tipc_sock_create_local(s->type, &sock); | 324 | ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock); |
311 | if (ret < 0) | 325 | if (ret < 0) |
312 | return NULL; | 326 | return NULL; |
327 | |||
328 | sk_change_net(sock->sk, s->net); | ||
329 | |||
313 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, | 330 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, |
314 | (char *)&s->imp, sizeof(s->imp)); | 331 | (char *)&s->imp, sizeof(s->imp)); |
315 | if (ret < 0) | 332 | if (ret < 0) |
@@ -335,11 +352,31 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |||
335 | pr_err("Unknown socket type %d\n", s->type); | 352 | pr_err("Unknown socket type %d\n", s->type); |
336 | goto create_err; | 353 | goto create_err; |
337 | } | 354 | } |
355 | |||
356 | /* As server's listening socket owner and creator is the same module, | ||
357 | * we have to decrease TIPC module reference count to guarantee that | ||
358 | * it remains zero after the server socket is created, otherwise, | ||
359 | * executing "rmmod" command is unable to make TIPC module deleted | ||
360 | * after TIPC module is inserted successfully. | ||
361 | * | ||
362 | * However, the reference count is ever increased twice in | ||
363 | * sock_create_kern(): one is to increase the reference count of owner | ||
364 | * of TIPC socket's proto_ops struct; another is to increment the | ||
365 | * reference count of owner of TIPC proto struct. Therefore, we must | ||
366 | * decrement the module reference count twice to ensure that it keeps | ||
367 | * zero after server's listening socket is created. Of course, we | ||
368 | * must bump the module reference count twice as well before the socket | ||
369 | * is closed. | ||
370 | */ | ||
371 | module_put(sock->ops->owner); | ||
372 | module_put(sock->sk->sk_prot_creator->owner); | ||
373 | set_bit(CF_SERVER, &con->flags); | ||
374 | |||
338 | return sock; | 375 | return sock; |
339 | 376 | ||
340 | create_err: | 377 | create_err: |
341 | sock_release(sock); | 378 | kernel_sock_shutdown(sock, SHUT_RDWR); |
342 | con->sock = NULL; | 379 | sk_release_kernel(sock->sk); |
343 | return NULL; | 380 | return NULL; |
344 | } | 381 | } |
345 | 382 | ||
diff --git a/net/tipc/server.h b/net/tipc/server.h index be817b0b547e..9015faedb1b0 100644 --- a/net/tipc/server.h +++ b/net/tipc/server.h | |||
@@ -36,7 +36,9 @@ | |||
36 | #ifndef _TIPC_SERVER_H | 36 | #ifndef _TIPC_SERVER_H |
37 | #define _TIPC_SERVER_H | 37 | #define _TIPC_SERVER_H |
38 | 38 | ||
39 | #include "core.h" | 39 | #include <linux/idr.h> |
40 | #include <linux/tipc.h> | ||
41 | #include <net/net_namespace.h> | ||
40 | 42 | ||
41 | #define TIPC_SERVER_NAME_LEN 32 | 43 | #define TIPC_SERVER_NAME_LEN 32 |
42 | 44 | ||
@@ -45,6 +47,7 @@ | |||
45 | * @conn_idr: identifier set of connection | 47 | * @conn_idr: identifier set of connection |
46 | * @idr_lock: protect the connection identifier set | 48 | * @idr_lock: protect the connection identifier set |
47 | * @idr_in_use: amount of allocated identifier entry | 49 | * @idr_in_use: amount of allocated identifier entry |
50 | * @net: network namspace instance | ||
48 | * @rcvbuf_cache: memory cache of server receive buffer | 51 | * @rcvbuf_cache: memory cache of server receive buffer |
49 | * @rcv_wq: receive workqueue | 52 | * @rcv_wq: receive workqueue |
50 | * @send_wq: send workqueue | 53 | * @send_wq: send workqueue |
@@ -61,16 +64,18 @@ struct tipc_server { | |||
61 | struct idr conn_idr; | 64 | struct idr conn_idr; |
62 | spinlock_t idr_lock; | 65 | spinlock_t idr_lock; |
63 | int idr_in_use; | 66 | int idr_in_use; |
67 | struct net *net; | ||
64 | struct kmem_cache *rcvbuf_cache; | 68 | struct kmem_cache *rcvbuf_cache; |
65 | struct workqueue_struct *rcv_wq; | 69 | struct workqueue_struct *rcv_wq; |
66 | struct workqueue_struct *send_wq; | 70 | struct workqueue_struct *send_wq; |
67 | int max_rcvbuf_size; | 71 | int max_rcvbuf_size; |
68 | void *(*tipc_conn_new) (int conid); | 72 | void *(*tipc_conn_new)(int conid); |
69 | void (*tipc_conn_shutdown) (int conid, void *usr_data); | 73 | void (*tipc_conn_shutdown)(int conid, void *usr_data); |
70 | void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr, | 74 | void (*tipc_conn_recvmsg)(struct net *net, int conid, |
71 | void *usr_data, void *buf, size_t len); | 75 | struct sockaddr_tipc *addr, void *usr_data, |
76 | void *buf, size_t len); | ||
72 | struct sockaddr_tipc *saddr; | 77 | struct sockaddr_tipc *saddr; |
73 | const char name[TIPC_SERVER_NAME_LEN]; | 78 | char name[TIPC_SERVER_NAME_LEN]; |
74 | int imp; | 79 | int imp; |
75 | int type; | 80 | int type; |
76 | }; | 81 | }; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 4731cad99d1c..ee90d74d7516 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/socket.c: TIPC socket API | 2 | * net/tipc/socket.c: TIPC socket API |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2007, 2012-2014, Ericsson AB | 4 | * Copyright (c) 2001-2007, 2012-2015, Ericsson AB |
5 | * Copyright (c) 2004-2008, 2010-2013, Wind River Systems | 5 | * Copyright (c) 2004-2008, 2010-2013, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -34,22 +34,24 @@ | |||
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/rhashtable.h> | ||
37 | #include "core.h" | 38 | #include "core.h" |
38 | #include "name_table.h" | 39 | #include "name_table.h" |
39 | #include "node.h" | 40 | #include "node.h" |
40 | #include "link.h" | 41 | #include "link.h" |
41 | #include <linux/export.h> | 42 | #include "name_distr.h" |
42 | #include "config.h" | ||
43 | #include "socket.h" | 43 | #include "socket.h" |
44 | 44 | ||
45 | #define SS_LISTENING -1 /* socket is listening */ | 45 | #define SS_LISTENING -1 /* socket is listening */ |
46 | #define SS_READY -2 /* socket is connectionless */ | 46 | #define SS_READY -2 /* socket is connectionless */ |
47 | 47 | ||
48 | #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ | 48 | #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ |
49 | #define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */ | 49 | #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */ |
50 | #define TIPC_FWD_MSG 1 | 50 | #define TIPC_FWD_MSG 1 |
51 | #define TIPC_CONN_OK 0 | 51 | #define TIPC_CONN_OK 0 |
52 | #define TIPC_CONN_PROBING 1 | 52 | #define TIPC_CONN_PROBING 1 |
53 | #define TIPC_MAX_PORT 0xffffffff | ||
54 | #define TIPC_MIN_PORT 1 | ||
53 | 55 | ||
54 | /** | 56 | /** |
55 | * struct tipc_sock - TIPC socket structure | 57 | * struct tipc_sock - TIPC socket structure |
@@ -59,21 +61,21 @@ | |||
59 | * @conn_instance: TIPC instance used when connection was established | 61 | * @conn_instance: TIPC instance used when connection was established |
60 | * @published: non-zero if port has one or more associated names | 62 | * @published: non-zero if port has one or more associated names |
61 | * @max_pkt: maximum packet size "hint" used when building messages sent by port | 63 | * @max_pkt: maximum packet size "hint" used when building messages sent by port |
62 | * @ref: unique reference to port in TIPC object registry | 64 | * @portid: unique port identity in TIPC socket hash table |
63 | * @phdr: preformatted message header used when sending messages | 65 | * @phdr: preformatted message header used when sending messages |
64 | * @port_list: adjacent ports in TIPC's global list of ports | 66 | * @port_list: adjacent ports in TIPC's global list of ports |
65 | * @publications: list of publications for port | 67 | * @publications: list of publications for port |
66 | * @pub_count: total # of publications port has made during its lifetime | 68 | * @pub_count: total # of publications port has made during its lifetime |
67 | * @probing_state: | 69 | * @probing_state: |
68 | * @probing_interval: | 70 | * @probing_intv: |
69 | * @timer: | ||
70 | * @port: port - interacts with 'sk' and with the rest of the TIPC stack | ||
71 | * @peer_name: the peer of the connection, if any | ||
72 | * @conn_timeout: the time we can wait for an unresponded setup request | 71 | * @conn_timeout: the time we can wait for an unresponded setup request |
73 | * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue | 72 | * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue |
74 | * @link_cong: non-zero if owner must sleep because of link congestion | 73 | * @link_cong: non-zero if owner must sleep because of link congestion |
75 | * @sent_unacked: # messages sent by socket, and not yet acked by peer | 74 | * @sent_unacked: # messages sent by socket, and not yet acked by peer |
76 | * @rcv_unacked: # messages read by user, but not yet acked back to peer | 75 | * @rcv_unacked: # messages read by user, but not yet acked back to peer |
76 | * @remote: 'connected' peer for dgram/rdm | ||
77 | * @node: hash table node | ||
78 | * @rcu: rcu struct for tipc_sock | ||
77 | */ | 79 | */ |
78 | struct tipc_sock { | 80 | struct tipc_sock { |
79 | struct sock sk; | 81 | struct sock sk; |
@@ -82,19 +84,21 @@ struct tipc_sock { | |||
82 | u32 conn_instance; | 84 | u32 conn_instance; |
83 | int published; | 85 | int published; |
84 | u32 max_pkt; | 86 | u32 max_pkt; |
85 | u32 ref; | 87 | u32 portid; |
86 | struct tipc_msg phdr; | 88 | struct tipc_msg phdr; |
87 | struct list_head sock_list; | 89 | struct list_head sock_list; |
88 | struct list_head publications; | 90 | struct list_head publications; |
89 | u32 pub_count; | 91 | u32 pub_count; |
90 | u32 probing_state; | 92 | u32 probing_state; |
91 | u32 probing_interval; | 93 | unsigned long probing_intv; |
92 | struct timer_list timer; | ||
93 | uint conn_timeout; | 94 | uint conn_timeout; |
94 | atomic_t dupl_rcvcnt; | 95 | atomic_t dupl_rcvcnt; |
95 | bool link_cong; | 96 | bool link_cong; |
96 | uint sent_unacked; | 97 | uint sent_unacked; |
97 | uint rcv_unacked; | 98 | uint rcv_unacked; |
99 | struct sockaddr_tipc remote; | ||
100 | struct rhash_head node; | ||
101 | struct rcu_head rcu; | ||
98 | }; | 102 | }; |
99 | 103 | ||
100 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 104 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
@@ -103,23 +107,22 @@ static void tipc_write_space(struct sock *sk); | |||
103 | static int tipc_release(struct socket *sock); | 107 | static int tipc_release(struct socket *sock); |
104 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); | 108 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); |
105 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); | 109 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); |
106 | static void tipc_sk_timeout(unsigned long ref); | 110 | static void tipc_sk_timeout(unsigned long data); |
107 | static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, | 111 | static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, |
108 | struct tipc_name_seq const *seq); | 112 | struct tipc_name_seq const *seq); |
109 | static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | 113 | static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, |
110 | struct tipc_name_seq const *seq); | 114 | struct tipc_name_seq const *seq); |
111 | static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk); | 115 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); |
112 | static void tipc_sk_ref_discard(u32 ref); | 116 | static int tipc_sk_insert(struct tipc_sock *tsk); |
113 | static struct tipc_sock *tipc_sk_get(u32 ref); | 117 | static void tipc_sk_remove(struct tipc_sock *tsk); |
114 | static struct tipc_sock *tipc_sk_get_next(u32 *ref); | 118 | static int __tipc_send_stream(struct socket *sock, struct msghdr *m, |
115 | static void tipc_sk_put(struct tipc_sock *tsk); | 119 | size_t dsz); |
120 | static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); | ||
116 | 121 | ||
117 | static const struct proto_ops packet_ops; | 122 | static const struct proto_ops packet_ops; |
118 | static const struct proto_ops stream_ops; | 123 | static const struct proto_ops stream_ops; |
119 | static const struct proto_ops msg_ops; | 124 | static const struct proto_ops msg_ops; |
120 | |||
121 | static struct proto tipc_proto; | 125 | static struct proto tipc_proto; |
122 | static struct proto tipc_proto_kern; | ||
123 | 126 | ||
124 | static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | 127 | static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { |
125 | [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, | 128 | [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, |
@@ -129,6 +132,8 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | |||
129 | [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } | 132 | [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } |
130 | }; | 133 | }; |
131 | 134 | ||
135 | static const struct rhashtable_params tsk_rht_params; | ||
136 | |||
132 | /* | 137 | /* |
133 | * Revised TIPC socket locking policy: | 138 | * Revised TIPC socket locking policy: |
134 | * | 139 | * |
@@ -174,6 +179,11 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | |||
174 | * - port reference | 179 | * - port reference |
175 | */ | 180 | */ |
176 | 181 | ||
182 | static u32 tsk_own_node(struct tipc_sock *tsk) | ||
183 | { | ||
184 | return msg_prevnode(&tsk->phdr); | ||
185 | } | ||
186 | |||
177 | static u32 tsk_peer_node(struct tipc_sock *tsk) | 187 | static u32 tsk_peer_node(struct tipc_sock *tsk) |
178 | { | 188 | { |
179 | return msg_destnode(&tsk->phdr); | 189 | return msg_destnode(&tsk->phdr); |
@@ -246,10 +256,11 @@ static void tsk_rej_rx_queue(struct sock *sk) | |||
246 | { | 256 | { |
247 | struct sk_buff *skb; | 257 | struct sk_buff *skb; |
248 | u32 dnode; | 258 | u32 dnode; |
259 | u32 own_node = tsk_own_node(tipc_sk(sk)); | ||
249 | 260 | ||
250 | while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { | 261 | while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { |
251 | if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) | 262 | if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT)) |
252 | tipc_link_xmit_skb(skb, dnode, 0); | 263 | tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0); |
253 | } | 264 | } |
254 | } | 265 | } |
255 | 266 | ||
@@ -260,6 +271,7 @@ static void tsk_rej_rx_queue(struct sock *sk) | |||
260 | */ | 271 | */ |
261 | static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) | 272 | static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) |
262 | { | 273 | { |
274 | struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id); | ||
263 | u32 peer_port = tsk_peer_port(tsk); | 275 | u32 peer_port = tsk_peer_port(tsk); |
264 | u32 orig_node; | 276 | u32 orig_node; |
265 | u32 peer_node; | 277 | u32 peer_node; |
@@ -276,10 +288,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) | |||
276 | if (likely(orig_node == peer_node)) | 288 | if (likely(orig_node == peer_node)) |
277 | return true; | 289 | return true; |
278 | 290 | ||
279 | if (!orig_node && (peer_node == tipc_own_addr)) | 291 | if (!orig_node && (peer_node == tn->own_addr)) |
280 | return true; | 292 | return true; |
281 | 293 | ||
282 | if (!peer_node && (orig_node == tipc_own_addr)) | 294 | if (!peer_node && (orig_node == tn->own_addr)) |
283 | return true; | 295 | return true; |
284 | 296 | ||
285 | return false; | 297 | return false; |
@@ -300,12 +312,12 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) | |||
300 | static int tipc_sk_create(struct net *net, struct socket *sock, | 312 | static int tipc_sk_create(struct net *net, struct socket *sock, |
301 | int protocol, int kern) | 313 | int protocol, int kern) |
302 | { | 314 | { |
315 | struct tipc_net *tn; | ||
303 | const struct proto_ops *ops; | 316 | const struct proto_ops *ops; |
304 | socket_state state; | 317 | socket_state state; |
305 | struct sock *sk; | 318 | struct sock *sk; |
306 | struct tipc_sock *tsk; | 319 | struct tipc_sock *tsk; |
307 | struct tipc_msg *msg; | 320 | struct tipc_msg *msg; |
308 | u32 ref; | ||
309 | 321 | ||
310 | /* Validate arguments */ | 322 | /* Validate arguments */ |
311 | if (unlikely(protocol != 0)) | 323 | if (unlikely(protocol != 0)) |
@@ -330,33 +342,28 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
330 | } | 342 | } |
331 | 343 | ||
332 | /* Allocate socket's protocol area */ | 344 | /* Allocate socket's protocol area */ |
333 | if (!kern) | 345 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); |
334 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); | ||
335 | else | ||
336 | sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern); | ||
337 | |||
338 | if (sk == NULL) | 346 | if (sk == NULL) |
339 | return -ENOMEM; | 347 | return -ENOMEM; |
340 | 348 | ||
341 | tsk = tipc_sk(sk); | 349 | tsk = tipc_sk(sk); |
342 | ref = tipc_sk_ref_acquire(tsk); | ||
343 | if (!ref) { | ||
344 | pr_warn("Socket create failed; reference table exhausted\n"); | ||
345 | return -ENOMEM; | ||
346 | } | ||
347 | tsk->max_pkt = MAX_PKT_DEFAULT; | 350 | tsk->max_pkt = MAX_PKT_DEFAULT; |
348 | tsk->ref = ref; | ||
349 | INIT_LIST_HEAD(&tsk->publications); | 351 | INIT_LIST_HEAD(&tsk->publications); |
350 | msg = &tsk->phdr; | 352 | msg = &tsk->phdr; |
351 | tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, | 353 | tn = net_generic(sock_net(sk), tipc_net_id); |
354 | tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, | ||
352 | NAMED_H_SIZE, 0); | 355 | NAMED_H_SIZE, 0); |
353 | msg_set_origport(msg, ref); | ||
354 | 356 | ||
355 | /* Finish initializing socket data structures */ | 357 | /* Finish initializing socket data structures */ |
356 | sock->ops = ops; | 358 | sock->ops = ops; |
357 | sock->state = state; | 359 | sock->state = state; |
358 | sock_init_data(sock, sk); | 360 | sock_init_data(sock, sk); |
359 | k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref); | 361 | if (tipc_sk_insert(tsk)) { |
362 | pr_warn("Socket create failed; port numbrer exhausted\n"); | ||
363 | return -EINVAL; | ||
364 | } | ||
365 | msg_set_origport(msg, tsk->portid); | ||
366 | setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); | ||
360 | sk->sk_backlog_rcv = tipc_backlog_rcv; | 367 | sk->sk_backlog_rcv = tipc_backlog_rcv; |
361 | sk->sk_rcvbuf = sysctl_tipc_rmem[1]; | 368 | sk->sk_rcvbuf = sysctl_tipc_rmem[1]; |
362 | sk->sk_data_ready = tipc_data_ready; | 369 | sk->sk_data_ready = tipc_data_ready; |
@@ -373,73 +380,11 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
373 | return 0; | 380 | return 0; |
374 | } | 381 | } |
375 | 382 | ||
376 | /** | 383 | static void tipc_sk_callback(struct rcu_head *head) |
377 | * tipc_sock_create_local - create TIPC socket from inside TIPC module | ||
378 | * @type: socket type - SOCK_RDM or SOCK_SEQPACKET | ||
379 | * | ||
380 | * We cannot use sock_creat_kern here because it bumps module user count. | ||
381 | * Since socket owner and creator is the same module we must make sure | ||
382 | * that module count remains zero for module local sockets, otherwise | ||
383 | * we cannot do rmmod. | ||
384 | * | ||
385 | * Returns 0 on success, errno otherwise | ||
386 | */ | ||
387 | int tipc_sock_create_local(int type, struct socket **res) | ||
388 | { | ||
389 | int rc; | ||
390 | |||
391 | rc = sock_create_lite(AF_TIPC, type, 0, res); | ||
392 | if (rc < 0) { | ||
393 | pr_err("Failed to create kernel socket\n"); | ||
394 | return rc; | ||
395 | } | ||
396 | tipc_sk_create(&init_net, *res, 0, 1); | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * tipc_sock_release_local - release socket created by tipc_sock_create_local | ||
403 | * @sock: the socket to be released. | ||
404 | * | ||
405 | * Module reference count is not incremented when such sockets are created, | ||
406 | * so we must keep it from being decremented when they are released. | ||
407 | */ | ||
408 | void tipc_sock_release_local(struct socket *sock) | ||
409 | { | 384 | { |
410 | tipc_release(sock); | 385 | struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); |
411 | sock->ops = NULL; | ||
412 | sock_release(sock); | ||
413 | } | ||
414 | 386 | ||
415 | /** | 387 | sock_put(&tsk->sk); |
416 | * tipc_sock_accept_local - accept a connection on a socket created | ||
417 | * with tipc_sock_create_local. Use this function to avoid that | ||
418 | * module reference count is inadvertently incremented. | ||
419 | * | ||
420 | * @sock: the accepting socket | ||
421 | * @newsock: reference to the new socket to be created | ||
422 | * @flags: socket flags | ||
423 | */ | ||
424 | |||
425 | int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, | ||
426 | int flags) | ||
427 | { | ||
428 | struct sock *sk = sock->sk; | ||
429 | int ret; | ||
430 | |||
431 | ret = sock_create_lite(sk->sk_family, sk->sk_type, | ||
432 | sk->sk_protocol, newsock); | ||
433 | if (ret < 0) | ||
434 | return ret; | ||
435 | |||
436 | ret = tipc_accept(sock, *newsock, flags); | ||
437 | if (ret < 0) { | ||
438 | sock_release(*newsock); | ||
439 | return ret; | ||
440 | } | ||
441 | (*newsock)->ops = sock->ops; | ||
442 | return ret; | ||
443 | } | 388 | } |
444 | 389 | ||
445 | /** | 390 | /** |
@@ -461,9 +406,10 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, | |||
461 | static int tipc_release(struct socket *sock) | 406 | static int tipc_release(struct socket *sock) |
462 | { | 407 | { |
463 | struct sock *sk = sock->sk; | 408 | struct sock *sk = sock->sk; |
409 | struct net *net; | ||
464 | struct tipc_sock *tsk; | 410 | struct tipc_sock *tsk; |
465 | struct sk_buff *skb; | 411 | struct sk_buff *skb; |
466 | u32 dnode; | 412 | u32 dnode, probing_state; |
467 | 413 | ||
468 | /* | 414 | /* |
469 | * Exit if socket isn't fully initialized (occurs when a failed accept() | 415 | * Exit if socket isn't fully initialized (occurs when a failed accept() |
@@ -472,6 +418,7 @@ static int tipc_release(struct socket *sock) | |||
472 | if (sk == NULL) | 418 | if (sk == NULL) |
473 | return 0; | 419 | return 0; |
474 | 420 | ||
421 | net = sock_net(sk); | ||
475 | tsk = tipc_sk(sk); | 422 | tsk = tipc_sk(sk); |
476 | lock_sock(sk); | 423 | lock_sock(sk); |
477 | 424 | ||
@@ -491,26 +438,29 @@ static int tipc_release(struct socket *sock) | |||
491 | (sock->state == SS_CONNECTED)) { | 438 | (sock->state == SS_CONNECTED)) { |
492 | sock->state = SS_DISCONNECTING; | 439 | sock->state = SS_DISCONNECTING; |
493 | tsk->connected = 0; | 440 | tsk->connected = 0; |
494 | tipc_node_remove_conn(dnode, tsk->ref); | 441 | tipc_node_remove_conn(net, dnode, tsk->portid); |
495 | } | 442 | } |
496 | if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) | 443 | if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, |
497 | tipc_link_xmit_skb(skb, dnode, 0); | 444 | TIPC_ERR_NO_PORT)) |
445 | tipc_link_xmit_skb(net, skb, dnode, 0); | ||
498 | } | 446 | } |
499 | } | 447 | } |
500 | 448 | ||
501 | tipc_sk_withdraw(tsk, 0, NULL); | 449 | tipc_sk_withdraw(tsk, 0, NULL); |
502 | tipc_sk_ref_discard(tsk->ref); | 450 | probing_state = tsk->probing_state; |
503 | k_cancel_timer(&tsk->timer); | 451 | if (del_timer_sync(&sk->sk_timer) && |
452 | probing_state != TIPC_CONN_PROBING) | ||
453 | sock_put(sk); | ||
454 | tipc_sk_remove(tsk); | ||
504 | if (tsk->connected) { | 455 | if (tsk->connected) { |
505 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, | 456 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, |
506 | SHORT_H_SIZE, 0, dnode, tipc_own_addr, | 457 | TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, |
507 | tsk_peer_port(tsk), | 458 | tsk_own_node(tsk), tsk_peer_port(tsk), |
508 | tsk->ref, TIPC_ERR_NO_PORT); | 459 | tsk->portid, TIPC_ERR_NO_PORT); |
509 | if (skb) | 460 | if (skb) |
510 | tipc_link_xmit_skb(skb, dnode, tsk->ref); | 461 | tipc_link_xmit_skb(net, skb, dnode, tsk->portid); |
511 | tipc_node_remove_conn(dnode, tsk->ref); | 462 | tipc_node_remove_conn(net, dnode, tsk->portid); |
512 | } | 463 | } |
513 | k_term_timer(&tsk->timer); | ||
514 | 464 | ||
515 | /* Discard any remaining (connection-based) messages in receive queue */ | 465 | /* Discard any remaining (connection-based) messages in receive queue */ |
516 | __skb_queue_purge(&sk->sk_receive_queue); | 466 | __skb_queue_purge(&sk->sk_receive_queue); |
@@ -518,7 +468,8 @@ static int tipc_release(struct socket *sock) | |||
518 | /* Reject any messages that accumulated in backlog queue */ | 468 | /* Reject any messages that accumulated in backlog queue */ |
519 | sock->state = SS_DISCONNECTING; | 469 | sock->state = SS_DISCONNECTING; |
520 | release_sock(sk); | 470 | release_sock(sk); |
521 | sock_put(sk); | 471 | |
472 | call_rcu(&tsk->rcu, tipc_sk_callback); | ||
522 | sock->sk = NULL; | 473 | sock->sk = NULL; |
523 | 474 | ||
524 | return 0; | 475 | return 0; |
@@ -602,6 +553,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, | |||
602 | { | 553 | { |
603 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 554 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
604 | struct tipc_sock *tsk = tipc_sk(sock->sk); | 555 | struct tipc_sock *tsk = tipc_sk(sock->sk); |
556 | struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id); | ||
605 | 557 | ||
606 | memset(addr, 0, sizeof(*addr)); | 558 | memset(addr, 0, sizeof(*addr)); |
607 | if (peer) { | 559 | if (peer) { |
@@ -611,8 +563,8 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, | |||
611 | addr->addr.id.ref = tsk_peer_port(tsk); | 563 | addr->addr.id.ref = tsk_peer_port(tsk); |
612 | addr->addr.id.node = tsk_peer_node(tsk); | 564 | addr->addr.id.node = tsk_peer_node(tsk); |
613 | } else { | 565 | } else { |
614 | addr->addr.id.ref = tsk->ref; | 566 | addr->addr.id.ref = tsk->portid; |
615 | addr->addr.id.node = tipc_own_addr; | 567 | addr->addr.id.node = tn->own_addr; |
616 | } | 568 | } |
617 | 569 | ||
618 | *uaddr_len = sizeof(*addr); | 570 | *uaddr_len = sizeof(*addr); |
@@ -711,8 +663,11 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, | |||
711 | struct msghdr *msg, size_t dsz, long timeo) | 663 | struct msghdr *msg, size_t dsz, long timeo) |
712 | { | 664 | { |
713 | struct sock *sk = sock->sk; | 665 | struct sock *sk = sock->sk; |
714 | struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; | 666 | struct tipc_sock *tsk = tipc_sk(sk); |
715 | struct sk_buff_head head; | 667 | struct net *net = sock_net(sk); |
668 | struct tipc_msg *mhdr = &tsk->phdr; | ||
669 | struct sk_buff_head *pktchain = &sk->sk_write_queue; | ||
670 | struct iov_iter save = msg->msg_iter; | ||
716 | uint mtu; | 671 | uint mtu; |
717 | int rc; | 672 | int rc; |
718 | 673 | ||
@@ -727,83 +682,97 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, | |||
727 | 682 | ||
728 | new_mtu: | 683 | new_mtu: |
729 | mtu = tipc_bclink_get_mtu(); | 684 | mtu = tipc_bclink_get_mtu(); |
730 | __skb_queue_head_init(&head); | 685 | rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain); |
731 | rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head); | ||
732 | if (unlikely(rc < 0)) | 686 | if (unlikely(rc < 0)) |
733 | return rc; | 687 | return rc; |
734 | 688 | ||
735 | do { | 689 | do { |
736 | rc = tipc_bclink_xmit(&head); | 690 | rc = tipc_bclink_xmit(net, pktchain); |
737 | if (likely(rc >= 0)) { | 691 | if (likely(rc >= 0)) { |
738 | rc = dsz; | 692 | rc = dsz; |
739 | break; | 693 | break; |
740 | } | 694 | } |
741 | if (rc == -EMSGSIZE) | 695 | if (rc == -EMSGSIZE) { |
696 | msg->msg_iter = save; | ||
742 | goto new_mtu; | 697 | goto new_mtu; |
698 | } | ||
743 | if (rc != -ELINKCONG) | 699 | if (rc != -ELINKCONG) |
744 | break; | 700 | break; |
745 | tipc_sk(sk)->link_cong = 1; | 701 | tipc_sk(sk)->link_cong = 1; |
746 | rc = tipc_wait_for_sndmsg(sock, &timeo); | 702 | rc = tipc_wait_for_sndmsg(sock, &timeo); |
747 | if (rc) | 703 | if (rc) |
748 | __skb_queue_purge(&head); | 704 | __skb_queue_purge(pktchain); |
749 | } while (!rc); | 705 | } while (!rc); |
750 | return rc; | 706 | return rc; |
751 | } | 707 | } |
752 | 708 | ||
753 | /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets | 709 | /** |
710 | * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets | ||
711 | * @arrvq: queue with arriving messages, to be cloned after destination lookup | ||
712 | * @inputq: queue with cloned messages, delivered to socket after dest lookup | ||
713 | * | ||
714 | * Multi-threaded: parallel calls with reference to same queues may occur | ||
754 | */ | 715 | */ |
755 | void tipc_sk_mcast_rcv(struct sk_buff *buf) | 716 | void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, |
717 | struct sk_buff_head *inputq) | ||
756 | { | 718 | { |
757 | struct tipc_msg *msg = buf_msg(buf); | 719 | struct tipc_msg *msg; |
758 | struct tipc_port_list dports = {0, NULL, }; | 720 | struct tipc_plist dports; |
759 | struct tipc_port_list *item; | 721 | u32 portid; |
760 | struct sk_buff *b; | ||
761 | uint i, last, dst = 0; | ||
762 | u32 scope = TIPC_CLUSTER_SCOPE; | 722 | u32 scope = TIPC_CLUSTER_SCOPE; |
763 | 723 | struct sk_buff_head tmpq; | |
764 | if (in_own_node(msg_orignode(msg))) | 724 | uint hsz; |
765 | scope = TIPC_NODE_SCOPE; | 725 | struct sk_buff *skb, *_skb; |
766 | 726 | ||
767 | /* Create destination port list: */ | 727 | __skb_queue_head_init(&tmpq); |
768 | tipc_nametbl_mc_translate(msg_nametype(msg), | 728 | tipc_plist_init(&dports); |
769 | msg_namelower(msg), | 729 | |
770 | msg_nameupper(msg), | 730 | skb = tipc_skb_peek(arrvq, &inputq->lock); |
771 | scope, | 731 | for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { |
772 | &dports); | 732 | msg = buf_msg(skb); |
773 | last = dports.count; | 733 | hsz = skb_headroom(skb) + msg_hdr_sz(msg); |
774 | if (!last) { | 734 | |
775 | kfree_skb(buf); | 735 | if (in_own_node(net, msg_orignode(msg))) |
776 | return; | 736 | scope = TIPC_NODE_SCOPE; |
777 | } | 737 | |
778 | 738 | /* Create destination port list and message clones: */ | |
779 | for (item = &dports; item; item = item->next) { | 739 | tipc_nametbl_mc_translate(net, |
780 | for (i = 0; i < PLSIZE && ++dst <= last; i++) { | 740 | msg_nametype(msg), msg_namelower(msg), |
781 | b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf; | 741 | msg_nameupper(msg), scope, &dports); |
782 | if (!b) { | 742 | portid = tipc_plist_pop(&dports); |
783 | pr_warn("Failed do clone mcast rcv buffer\n"); | 743 | for (; portid; portid = tipc_plist_pop(&dports)) { |
744 | _skb = __pskb_copy(skb, hsz, GFP_ATOMIC); | ||
745 | if (_skb) { | ||
746 | msg_set_destport(buf_msg(_skb), portid); | ||
747 | __skb_queue_tail(&tmpq, _skb); | ||
784 | continue; | 748 | continue; |
785 | } | 749 | } |
786 | msg_set_destport(msg, item->ports[i]); | 750 | pr_warn("Failed to clone mcast rcv buffer\n"); |
787 | tipc_sk_rcv(b); | 751 | } |
752 | /* Append to inputq if not already done by other thread */ | ||
753 | spin_lock_bh(&inputq->lock); | ||
754 | if (skb_peek(arrvq) == skb) { | ||
755 | skb_queue_splice_tail_init(&tmpq, inputq); | ||
756 | kfree_skb(__skb_dequeue(arrvq)); | ||
788 | } | 757 | } |
758 | spin_unlock_bh(&inputq->lock); | ||
759 | __skb_queue_purge(&tmpq); | ||
760 | kfree_skb(skb); | ||
789 | } | 761 | } |
790 | tipc_port_list_free(&dports); | 762 | tipc_sk_rcv(net, inputq); |
791 | } | 763 | } |
792 | 764 | ||
793 | /** | 765 | /** |
794 | * tipc_sk_proto_rcv - receive a connection mng protocol message | 766 | * tipc_sk_proto_rcv - receive a connection mng protocol message |
795 | * @tsk: receiving socket | 767 | * @tsk: receiving socket |
796 | * @dnode: node to send response message to, if any | 768 | * @skb: pointer to message buffer. Set to NULL if buffer is consumed. |
797 | * @buf: buffer containing protocol message | ||
798 | * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if | ||
799 | * (CONN_PROBE_REPLY) message should be forwarded. | ||
800 | */ | 769 | */ |
801 | static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode, | 770 | static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb) |
802 | struct sk_buff *buf) | ||
803 | { | 771 | { |
804 | struct tipc_msg *msg = buf_msg(buf); | 772 | struct tipc_msg *msg = buf_msg(*skb); |
805 | int conn_cong; | 773 | int conn_cong; |
806 | 774 | u32 dnode; | |
775 | u32 own_node = tsk_own_node(tsk); | ||
807 | /* Ignore if connection cannot be validated: */ | 776 | /* Ignore if connection cannot be validated: */ |
808 | if (!tsk_peer_msg(tsk, msg)) | 777 | if (!tsk_peer_msg(tsk, msg)) |
809 | goto exit; | 778 | goto exit; |
@@ -816,15 +785,15 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode, | |||
816 | if (conn_cong) | 785 | if (conn_cong) |
817 | tsk->sk.sk_write_space(&tsk->sk); | 786 | tsk->sk.sk_write_space(&tsk->sk); |
818 | } else if (msg_type(msg) == CONN_PROBE) { | 787 | } else if (msg_type(msg) == CONN_PROBE) { |
819 | if (!tipc_msg_reverse(buf, dnode, TIPC_OK)) | 788 | if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) { |
820 | return TIPC_OK; | 789 | msg_set_type(msg, CONN_PROBE_REPLY); |
821 | msg_set_type(msg, CONN_PROBE_REPLY); | 790 | return; |
822 | return TIPC_FWD_MSG; | 791 | } |
823 | } | 792 | } |
824 | /* Do nothing if msg_type() == CONN_PROBE_REPLY */ | 793 | /* Do nothing if msg_type() == CONN_PROBE_REPLY */ |
825 | exit: | 794 | exit: |
826 | kfree_skb(buf); | 795 | kfree_skb(*skb); |
827 | return TIPC_OK; | 796 | *skb = NULL; |
828 | } | 797 | } |
829 | 798 | ||
830 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) | 799 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) |
@@ -854,7 +823,6 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) | |||
854 | 823 | ||
855 | /** | 824 | /** |
856 | * tipc_sendmsg - send message in connectionless manner | 825 | * tipc_sendmsg - send message in connectionless manner |
857 | * @iocb: if NULL, indicates that socket lock is already held | ||
858 | * @sock: socket structure | 826 | * @sock: socket structure |
859 | * @m: message to send | 827 | * @m: message to send |
860 | * @dsz: amount of user data to be sent | 828 | * @dsz: amount of user data to be sent |
@@ -866,58 +834,63 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) | |||
866 | * | 834 | * |
867 | * Returns the number of bytes sent on success, or errno otherwise | 835 | * Returns the number of bytes sent on success, or errno otherwise |
868 | */ | 836 | */ |
869 | static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, | 837 | static int tipc_sendmsg(struct socket *sock, |
870 | struct msghdr *m, size_t dsz) | 838 | struct msghdr *m, size_t dsz) |
871 | { | 839 | { |
840 | struct sock *sk = sock->sk; | ||
841 | int ret; | ||
842 | |||
843 | lock_sock(sk); | ||
844 | ret = __tipc_sendmsg(sock, m, dsz); | ||
845 | release_sock(sk); | ||
846 | |||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) | ||
851 | { | ||
872 | DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); | 852 | DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); |
873 | struct sock *sk = sock->sk; | 853 | struct sock *sk = sock->sk; |
874 | struct tipc_sock *tsk = tipc_sk(sk); | 854 | struct tipc_sock *tsk = tipc_sk(sk); |
855 | struct net *net = sock_net(sk); | ||
875 | struct tipc_msg *mhdr = &tsk->phdr; | 856 | struct tipc_msg *mhdr = &tsk->phdr; |
876 | u32 dnode, dport; | 857 | u32 dnode, dport; |
877 | struct sk_buff_head head; | 858 | struct sk_buff_head *pktchain = &sk->sk_write_queue; |
878 | struct sk_buff *skb; | 859 | struct sk_buff *skb; |
879 | struct tipc_name_seq *seq = &dest->addr.nameseq; | 860 | struct tipc_name_seq *seq; |
861 | struct iov_iter save; | ||
880 | u32 mtu; | 862 | u32 mtu; |
881 | long timeo; | 863 | long timeo; |
882 | int rc; | 864 | int rc; |
883 | 865 | ||
884 | if (unlikely(!dest)) | ||
885 | return -EDESTADDRREQ; | ||
886 | |||
887 | if (unlikely((m->msg_namelen < sizeof(*dest)) || | ||
888 | (dest->family != AF_TIPC))) | ||
889 | return -EINVAL; | ||
890 | |||
891 | if (dsz > TIPC_MAX_USER_MSG_SIZE) | 866 | if (dsz > TIPC_MAX_USER_MSG_SIZE) |
892 | return -EMSGSIZE; | 867 | return -EMSGSIZE; |
893 | 868 | if (unlikely(!dest)) { | |
894 | if (iocb) | 869 | if (tsk->connected && sock->state == SS_READY) |
895 | lock_sock(sk); | 870 | dest = &tsk->remote; |
896 | 871 | else | |
872 | return -EDESTADDRREQ; | ||
873 | } else if (unlikely(m->msg_namelen < sizeof(*dest)) || | ||
874 | dest->family != AF_TIPC) { | ||
875 | return -EINVAL; | ||
876 | } | ||
897 | if (unlikely(sock->state != SS_READY)) { | 877 | if (unlikely(sock->state != SS_READY)) { |
898 | if (sock->state == SS_LISTENING) { | 878 | if (sock->state == SS_LISTENING) |
899 | rc = -EPIPE; | 879 | return -EPIPE; |
900 | goto exit; | 880 | if (sock->state != SS_UNCONNECTED) |
901 | } | 881 | return -EISCONN; |
902 | if (sock->state != SS_UNCONNECTED) { | 882 | if (tsk->published) |
903 | rc = -EISCONN; | 883 | return -EOPNOTSUPP; |
904 | goto exit; | ||
905 | } | ||
906 | if (tsk->published) { | ||
907 | rc = -EOPNOTSUPP; | ||
908 | goto exit; | ||
909 | } | ||
910 | if (dest->addrtype == TIPC_ADDR_NAME) { | 884 | if (dest->addrtype == TIPC_ADDR_NAME) { |
911 | tsk->conn_type = dest->addr.name.name.type; | 885 | tsk->conn_type = dest->addr.name.name.type; |
912 | tsk->conn_instance = dest->addr.name.name.instance; | 886 | tsk->conn_instance = dest->addr.name.name.instance; |
913 | } | 887 | } |
914 | } | 888 | } |
915 | 889 | seq = &dest->addr.nameseq; | |
916 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); | 890 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); |
917 | 891 | ||
918 | if (dest->addrtype == TIPC_ADDR_MCAST) { | 892 | if (dest->addrtype == TIPC_ADDR_MCAST) { |
919 | rc = tipc_sendmcast(sock, seq, m, dsz, timeo); | 893 | return tipc_sendmcast(sock, seq, m, dsz, timeo); |
920 | goto exit; | ||
921 | } else if (dest->addrtype == TIPC_ADDR_NAME) { | 894 | } else if (dest->addrtype == TIPC_ADDR_NAME) { |
922 | u32 type = dest->addr.name.name.type; | 895 | u32 type = dest->addr.name.name.type; |
923 | u32 inst = dest->addr.name.name.instance; | 896 | u32 inst = dest->addr.name.name.instance; |
@@ -929,13 +902,11 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
929 | msg_set_nametype(mhdr, type); | 902 | msg_set_nametype(mhdr, type); |
930 | msg_set_nameinst(mhdr, inst); | 903 | msg_set_nameinst(mhdr, inst); |
931 | msg_set_lookup_scope(mhdr, tipc_addr_scope(domain)); | 904 | msg_set_lookup_scope(mhdr, tipc_addr_scope(domain)); |
932 | dport = tipc_nametbl_translate(type, inst, &dnode); | 905 | dport = tipc_nametbl_translate(net, type, inst, &dnode); |
933 | msg_set_destnode(mhdr, dnode); | 906 | msg_set_destnode(mhdr, dnode); |
934 | msg_set_destport(mhdr, dport); | 907 | msg_set_destport(mhdr, dport); |
935 | if (unlikely(!dport && !dnode)) { | 908 | if (unlikely(!dport && !dnode)) |
936 | rc = -EHOSTUNREACH; | 909 | return -EHOSTUNREACH; |
937 | goto exit; | ||
938 | } | ||
939 | } else if (dest->addrtype == TIPC_ADDR_ID) { | 910 | } else if (dest->addrtype == TIPC_ADDR_ID) { |
940 | dnode = dest->addr.id.node; | 911 | dnode = dest->addr.id.node; |
941 | msg_set_type(mhdr, TIPC_DIRECT_MSG); | 912 | msg_set_type(mhdr, TIPC_DIRECT_MSG); |
@@ -945,35 +916,34 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
945 | msg_set_hdr_sz(mhdr, BASIC_H_SIZE); | 916 | msg_set_hdr_sz(mhdr, BASIC_H_SIZE); |
946 | } | 917 | } |
947 | 918 | ||
919 | save = m->msg_iter; | ||
948 | new_mtu: | 920 | new_mtu: |
949 | mtu = tipc_node_get_mtu(dnode, tsk->ref); | 921 | mtu = tipc_node_get_mtu(net, dnode, tsk->portid); |
950 | __skb_queue_head_init(&head); | 922 | rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); |
951 | rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head); | ||
952 | if (rc < 0) | 923 | if (rc < 0) |
953 | goto exit; | 924 | return rc; |
954 | 925 | ||
955 | do { | 926 | do { |
956 | skb = skb_peek(&head); | 927 | skb = skb_peek(pktchain); |
957 | TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; | 928 | TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; |
958 | rc = tipc_link_xmit(&head, dnode, tsk->ref); | 929 | rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid); |
959 | if (likely(rc >= 0)) { | 930 | if (likely(rc >= 0)) { |
960 | if (sock->state != SS_READY) | 931 | if (sock->state != SS_READY) |
961 | sock->state = SS_CONNECTING; | 932 | sock->state = SS_CONNECTING; |
962 | rc = dsz; | 933 | rc = dsz; |
963 | break; | 934 | break; |
964 | } | 935 | } |
965 | if (rc == -EMSGSIZE) | 936 | if (rc == -EMSGSIZE) { |
937 | m->msg_iter = save; | ||
966 | goto new_mtu; | 938 | goto new_mtu; |
939 | } | ||
967 | if (rc != -ELINKCONG) | 940 | if (rc != -ELINKCONG) |
968 | break; | 941 | break; |
969 | tsk->link_cong = 1; | 942 | tsk->link_cong = 1; |
970 | rc = tipc_wait_for_sndmsg(sock, &timeo); | 943 | rc = tipc_wait_for_sndmsg(sock, &timeo); |
971 | if (rc) | 944 | if (rc) |
972 | __skb_queue_purge(&head); | 945 | __skb_queue_purge(pktchain); |
973 | } while (!rc); | 946 | } while (!rc); |
974 | exit: | ||
975 | if (iocb) | ||
976 | release_sock(sk); | ||
977 | 947 | ||
978 | return rc; | 948 | return rc; |
979 | } | 949 | } |
@@ -1010,7 +980,6 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) | |||
1010 | 980 | ||
1011 | /** | 981 | /** |
1012 | * tipc_send_stream - send stream-oriented data | 982 | * tipc_send_stream - send stream-oriented data |
1013 | * @iocb: (unused) | ||
1014 | * @sock: socket structure | 983 | * @sock: socket structure |
1015 | * @m: data to send | 984 | * @m: data to send |
1016 | * @dsz: total length of data to be transmitted | 985 | * @dsz: total length of data to be transmitted |
@@ -1020,23 +989,36 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) | |||
1020 | * Returns the number of bytes sent on success (or partial success), | 989 | * Returns the number of bytes sent on success (or partial success), |
1021 | * or errno if no data sent | 990 | * or errno if no data sent |
1022 | */ | 991 | */ |
1023 | static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, | 992 | static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) |
1024 | struct msghdr *m, size_t dsz) | 993 | { |
994 | struct sock *sk = sock->sk; | ||
995 | int ret; | ||
996 | |||
997 | lock_sock(sk); | ||
998 | ret = __tipc_send_stream(sock, m, dsz); | ||
999 | release_sock(sk); | ||
1000 | |||
1001 | return ret; | ||
1002 | } | ||
1003 | |||
1004 | static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) | ||
1025 | { | 1005 | { |
1026 | struct sock *sk = sock->sk; | 1006 | struct sock *sk = sock->sk; |
1007 | struct net *net = sock_net(sk); | ||
1027 | struct tipc_sock *tsk = tipc_sk(sk); | 1008 | struct tipc_sock *tsk = tipc_sk(sk); |
1028 | struct tipc_msg *mhdr = &tsk->phdr; | 1009 | struct tipc_msg *mhdr = &tsk->phdr; |
1029 | struct sk_buff_head head; | 1010 | struct sk_buff_head *pktchain = &sk->sk_write_queue; |
1030 | DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); | 1011 | DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); |
1031 | u32 ref = tsk->ref; | 1012 | u32 portid = tsk->portid; |
1032 | int rc = -EINVAL; | 1013 | int rc = -EINVAL; |
1033 | long timeo; | 1014 | long timeo; |
1034 | u32 dnode; | 1015 | u32 dnode; |
1035 | uint mtu, send, sent = 0; | 1016 | uint mtu, send, sent = 0; |
1017 | struct iov_iter save; | ||
1036 | 1018 | ||
1037 | /* Handle implied connection establishment */ | 1019 | /* Handle implied connection establishment */ |
1038 | if (unlikely(dest)) { | 1020 | if (unlikely(dest)) { |
1039 | rc = tipc_sendmsg(iocb, sock, m, dsz); | 1021 | rc = __tipc_sendmsg(sock, m, dsz); |
1040 | if (dsz && (dsz == rc)) | 1022 | if (dsz && (dsz == rc)) |
1041 | tsk->sent_unacked = 1; | 1023 | tsk->sent_unacked = 1; |
1042 | return rc; | 1024 | return rc; |
@@ -1044,30 +1026,26 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, | |||
1044 | if (dsz > (uint)INT_MAX) | 1026 | if (dsz > (uint)INT_MAX) |
1045 | return -EMSGSIZE; | 1027 | return -EMSGSIZE; |
1046 | 1028 | ||
1047 | if (iocb) | ||
1048 | lock_sock(sk); | ||
1049 | |||
1050 | if (unlikely(sock->state != SS_CONNECTED)) { | 1029 | if (unlikely(sock->state != SS_CONNECTED)) { |
1051 | if (sock->state == SS_DISCONNECTING) | 1030 | if (sock->state == SS_DISCONNECTING) |
1052 | rc = -EPIPE; | 1031 | return -EPIPE; |
1053 | else | 1032 | else |
1054 | rc = -ENOTCONN; | 1033 | return -ENOTCONN; |
1055 | goto exit; | ||
1056 | } | 1034 | } |
1057 | 1035 | ||
1058 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); | 1036 | timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); |
1059 | dnode = tsk_peer_node(tsk); | 1037 | dnode = tsk_peer_node(tsk); |
1060 | 1038 | ||
1061 | next: | 1039 | next: |
1040 | save = m->msg_iter; | ||
1062 | mtu = tsk->max_pkt; | 1041 | mtu = tsk->max_pkt; |
1063 | send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); | 1042 | send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); |
1064 | __skb_queue_head_init(&head); | 1043 | rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); |
1065 | rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head); | ||
1066 | if (unlikely(rc < 0)) | 1044 | if (unlikely(rc < 0)) |
1067 | goto exit; | 1045 | return rc; |
1068 | do { | 1046 | do { |
1069 | if (likely(!tsk_conn_cong(tsk))) { | 1047 | if (likely(!tsk_conn_cong(tsk))) { |
1070 | rc = tipc_link_xmit(&head, dnode, ref); | 1048 | rc = tipc_link_xmit(net, pktchain, dnode, portid); |
1071 | if (likely(!rc)) { | 1049 | if (likely(!rc)) { |
1072 | tsk->sent_unacked++; | 1050 | tsk->sent_unacked++; |
1073 | sent += send; | 1051 | sent += send; |
@@ -1076,7 +1054,9 @@ next: | |||
1076 | goto next; | 1054 | goto next; |
1077 | } | 1055 | } |
1078 | if (rc == -EMSGSIZE) { | 1056 | if (rc == -EMSGSIZE) { |
1079 | tsk->max_pkt = tipc_node_get_mtu(dnode, ref); | 1057 | tsk->max_pkt = tipc_node_get_mtu(net, dnode, |
1058 | portid); | ||
1059 | m->msg_iter = save; | ||
1080 | goto next; | 1060 | goto next; |
1081 | } | 1061 | } |
1082 | if (rc != -ELINKCONG) | 1062 | if (rc != -ELINKCONG) |
@@ -1085,17 +1065,14 @@ next: | |||
1085 | } | 1065 | } |
1086 | rc = tipc_wait_for_sndpkt(sock, &timeo); | 1066 | rc = tipc_wait_for_sndpkt(sock, &timeo); |
1087 | if (rc) | 1067 | if (rc) |
1088 | __skb_queue_purge(&head); | 1068 | __skb_queue_purge(pktchain); |
1089 | } while (!rc); | 1069 | } while (!rc); |
1090 | exit: | 1070 | |
1091 | if (iocb) | ||
1092 | release_sock(sk); | ||
1093 | return sent ? sent : rc; | 1071 | return sent ? sent : rc; |
1094 | } | 1072 | } |
1095 | 1073 | ||
1096 | /** | 1074 | /** |
1097 | * tipc_send_packet - send a connection-oriented message | 1075 | * tipc_send_packet - send a connection-oriented message |
1098 | * @iocb: if NULL, indicates that socket lock is already held | ||
1099 | * @sock: socket structure | 1076 | * @sock: socket structure |
1100 | * @m: message to send | 1077 | * @m: message to send |
1101 | * @dsz: length of data to be transmitted | 1078 | * @dsz: length of data to be transmitted |
@@ -1104,13 +1081,12 @@ exit: | |||
1104 | * | 1081 | * |
1105 | * Returns the number of bytes sent on success, or errno otherwise | 1082 | * Returns the number of bytes sent on success, or errno otherwise |
1106 | */ | 1083 | */ |
1107 | static int tipc_send_packet(struct kiocb *iocb, struct socket *sock, | 1084 | static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) |
1108 | struct msghdr *m, size_t dsz) | ||
1109 | { | 1085 | { |
1110 | if (dsz > TIPC_MAX_USER_MSG_SIZE) | 1086 | if (dsz > TIPC_MAX_USER_MSG_SIZE) |
1111 | return -EMSGSIZE; | 1087 | return -EMSGSIZE; |
1112 | 1088 | ||
1113 | return tipc_send_stream(iocb, sock, m, dsz); | 1089 | return tipc_send_stream(sock, m, dsz); |
1114 | } | 1090 | } |
1115 | 1091 | ||
1116 | /* tipc_sk_finish_conn - complete the setup of a connection | 1092 | /* tipc_sk_finish_conn - complete the setup of a connection |
@@ -1118,6 +1094,8 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock, | |||
1118 | static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, | 1094 | static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, |
1119 | u32 peer_node) | 1095 | u32 peer_node) |
1120 | { | 1096 | { |
1097 | struct sock *sk = &tsk->sk; | ||
1098 | struct net *net = sock_net(sk); | ||
1121 | struct tipc_msg *msg = &tsk->phdr; | 1099 | struct tipc_msg *msg = &tsk->phdr; |
1122 | 1100 | ||
1123 | msg_set_destnode(msg, peer_node); | 1101 | msg_set_destnode(msg, peer_node); |
@@ -1126,12 +1104,12 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, | |||
1126 | msg_set_lookup_scope(msg, 0); | 1104 | msg_set_lookup_scope(msg, 0); |
1127 | msg_set_hdr_sz(msg, SHORT_H_SIZE); | 1105 | msg_set_hdr_sz(msg, SHORT_H_SIZE); |
1128 | 1106 | ||
1129 | tsk->probing_interval = CONN_PROBING_INTERVAL; | 1107 | tsk->probing_intv = CONN_PROBING_INTERVAL; |
1130 | tsk->probing_state = TIPC_CONN_OK; | 1108 | tsk->probing_state = TIPC_CONN_OK; |
1131 | tsk->connected = 1; | 1109 | tsk->connected = 1; |
1132 | k_start_timer(&tsk->timer, tsk->probing_interval); | 1110 | sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); |
1133 | tipc_node_add_conn(peer_node, tsk->ref, peer_port); | 1111 | tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); |
1134 | tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref); | 1112 | tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); |
1135 | } | 1113 | } |
1136 | 1114 | ||
1137 | /** | 1115 | /** |
@@ -1230,6 +1208,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, | |||
1230 | 1208 | ||
1231 | static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) | 1209 | static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) |
1232 | { | 1210 | { |
1211 | struct net *net = sock_net(&tsk->sk); | ||
1233 | struct sk_buff *skb = NULL; | 1212 | struct sk_buff *skb = NULL; |
1234 | struct tipc_msg *msg; | 1213 | struct tipc_msg *msg; |
1235 | u32 peer_port = tsk_peer_port(tsk); | 1214 | u32 peer_port = tsk_peer_port(tsk); |
@@ -1237,13 +1216,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) | |||
1237 | 1216 | ||
1238 | if (!tsk->connected) | 1217 | if (!tsk->connected) |
1239 | return; | 1218 | return; |
1240 | skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, | 1219 | skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, |
1241 | tipc_own_addr, peer_port, tsk->ref, TIPC_OK); | 1220 | dnode, tsk_own_node(tsk), peer_port, |
1221 | tsk->portid, TIPC_OK); | ||
1242 | if (!skb) | 1222 | if (!skb) |
1243 | return; | 1223 | return; |
1244 | msg = buf_msg(skb); | 1224 | msg = buf_msg(skb); |
1245 | msg_set_msgcnt(msg, ack); | 1225 | msg_set_msgcnt(msg, ack); |
1246 | tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg)); | 1226 | tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg)); |
1247 | } | 1227 | } |
1248 | 1228 | ||
1249 | static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | 1229 | static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) |
@@ -1267,12 +1247,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
1267 | err = 0; | 1247 | err = 0; |
1268 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 1248 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1269 | break; | 1249 | break; |
1270 | err = sock_intr_errno(timeo); | ||
1271 | if (signal_pending(current)) | ||
1272 | break; | ||
1273 | err = -EAGAIN; | 1250 | err = -EAGAIN; |
1274 | if (!timeo) | 1251 | if (!timeo) |
1275 | break; | 1252 | break; |
1253 | err = sock_intr_errno(timeo); | ||
1254 | if (signal_pending(current)) | ||
1255 | break; | ||
1276 | } | 1256 | } |
1277 | finish_wait(sk_sleep(sk), &wait); | 1257 | finish_wait(sk_sleep(sk), &wait); |
1278 | *timeop = timeo; | 1258 | *timeop = timeo; |
@@ -1281,7 +1261,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
1281 | 1261 | ||
1282 | /** | 1262 | /** |
1283 | * tipc_recvmsg - receive packet-oriented message | 1263 | * tipc_recvmsg - receive packet-oriented message |
1284 | * @iocb: (unused) | ||
1285 | * @m: descriptor for message info | 1264 | * @m: descriptor for message info |
1286 | * @buf_len: total size of user buffer area | 1265 | * @buf_len: total size of user buffer area |
1287 | * @flags: receive flags | 1266 | * @flags: receive flags |
@@ -1291,8 +1270,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
1291 | * | 1270 | * |
1292 | * Returns size of returned message data, errno otherwise | 1271 | * Returns size of returned message data, errno otherwise |
1293 | */ | 1272 | */ |
1294 | static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, | 1273 | static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, |
1295 | struct msghdr *m, size_t buf_len, int flags) | 1274 | int flags) |
1296 | { | 1275 | { |
1297 | struct sock *sk = sock->sk; | 1276 | struct sock *sk = sock->sk; |
1298 | struct tipc_sock *tsk = tipc_sk(sk); | 1277 | struct tipc_sock *tsk = tipc_sk(sk); |
@@ -1376,7 +1355,6 @@ exit: | |||
1376 | 1355 | ||
1377 | /** | 1356 | /** |
1378 | * tipc_recv_stream - receive stream-oriented data | 1357 | * tipc_recv_stream - receive stream-oriented data |
1379 | * @iocb: (unused) | ||
1380 | * @m: descriptor for message info | 1358 | * @m: descriptor for message info |
1381 | * @buf_len: total size of user buffer area | 1359 | * @buf_len: total size of user buffer area |
1382 | * @flags: receive flags | 1360 | * @flags: receive flags |
@@ -1386,8 +1364,8 @@ exit: | |||
1386 | * | 1364 | * |
1387 | * Returns size of returned message data, errno otherwise | 1365 | * Returns size of returned message data, errno otherwise |
1388 | */ | 1366 | */ |
1389 | static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, | 1367 | static int tipc_recv_stream(struct socket *sock, struct msghdr *m, |
1390 | struct msghdr *m, size_t buf_len, int flags) | 1368 | size_t buf_len, int flags) |
1391 | { | 1369 | { |
1392 | struct sock *sk = sock->sk; | 1370 | struct sock *sk = sock->sk; |
1393 | struct tipc_sock *tsk = tipc_sk(sk); | 1371 | struct tipc_sock *tsk = tipc_sk(sk); |
@@ -1529,15 +1507,16 @@ static void tipc_data_ready(struct sock *sk) | |||
1529 | /** | 1507 | /** |
1530 | * filter_connect - Handle all incoming messages for a connection-based socket | 1508 | * filter_connect - Handle all incoming messages for a connection-based socket |
1531 | * @tsk: TIPC socket | 1509 | * @tsk: TIPC socket |
1532 | * @msg: message | 1510 | * @skb: pointer to message buffer. Set to NULL if buffer is consumed |
1533 | * | 1511 | * |
1534 | * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise | 1512 | * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise |
1535 | */ | 1513 | */ |
1536 | static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) | 1514 | static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb) |
1537 | { | 1515 | { |
1538 | struct sock *sk = &tsk->sk; | 1516 | struct sock *sk = &tsk->sk; |
1517 | struct net *net = sock_net(sk); | ||
1539 | struct socket *sock = sk->sk_socket; | 1518 | struct socket *sock = sk->sk_socket; |
1540 | struct tipc_msg *msg = buf_msg(*buf); | 1519 | struct tipc_msg *msg = buf_msg(*skb); |
1541 | int retval = -TIPC_ERR_NO_PORT; | 1520 | int retval = -TIPC_ERR_NO_PORT; |
1542 | 1521 | ||
1543 | if (msg_mcast(msg)) | 1522 | if (msg_mcast(msg)) |
@@ -1551,8 +1530,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) | |||
1551 | sock->state = SS_DISCONNECTING; | 1530 | sock->state = SS_DISCONNECTING; |
1552 | tsk->connected = 0; | 1531 | tsk->connected = 0; |
1553 | /* let timer expire on it's own */ | 1532 | /* let timer expire on it's own */ |
1554 | tipc_node_remove_conn(tsk_peer_node(tsk), | 1533 | tipc_node_remove_conn(net, tsk_peer_node(tsk), |
1555 | tsk->ref); | 1534 | tsk->portid); |
1556 | } | 1535 | } |
1557 | retval = TIPC_OK; | 1536 | retval = TIPC_OK; |
1558 | } | 1537 | } |
@@ -1587,8 +1566,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) | |||
1587 | * connect() routine if sleeping. | 1566 | * connect() routine if sleeping. |
1588 | */ | 1567 | */ |
1589 | if (msg_data_sz(msg) == 0) { | 1568 | if (msg_data_sz(msg) == 0) { |
1590 | kfree_skb(*buf); | 1569 | kfree_skb(*skb); |
1591 | *buf = NULL; | 1570 | *skb = NULL; |
1592 | if (waitqueue_active(sk_sleep(sk))) | 1571 | if (waitqueue_active(sk_sleep(sk))) |
1593 | wake_up_interruptible(sk_sleep(sk)); | 1572 | wake_up_interruptible(sk_sleep(sk)); |
1594 | } | 1573 | } |
@@ -1640,32 +1619,33 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) | |||
1640 | /** | 1619 | /** |
1641 | * filter_rcv - validate incoming message | 1620 | * filter_rcv - validate incoming message |
1642 | * @sk: socket | 1621 | * @sk: socket |
1643 | * @buf: message | 1622 | * @skb: pointer to message. Set to NULL if buffer is consumed. |
1644 | * | 1623 | * |
1645 | * Enqueues message on receive queue if acceptable; optionally handles | 1624 | * Enqueues message on receive queue if acceptable; optionally handles |
1646 | * disconnect indication for a connected socket. | 1625 | * disconnect indication for a connected socket. |
1647 | * | 1626 | * |
1648 | * Called with socket lock already taken; port lock may also be taken. | 1627 | * Called with socket lock already taken |
1649 | * | 1628 | * |
1650 | * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message | 1629 | * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected |
1651 | * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded | ||
1652 | */ | 1630 | */ |
1653 | static int filter_rcv(struct sock *sk, struct sk_buff *buf) | 1631 | static int filter_rcv(struct sock *sk, struct sk_buff **skb) |
1654 | { | 1632 | { |
1655 | struct socket *sock = sk->sk_socket; | 1633 | struct socket *sock = sk->sk_socket; |
1656 | struct tipc_sock *tsk = tipc_sk(sk); | 1634 | struct tipc_sock *tsk = tipc_sk(sk); |
1657 | struct tipc_msg *msg = buf_msg(buf); | 1635 | struct tipc_msg *msg = buf_msg(*skb); |
1658 | unsigned int limit = rcvbuf_limit(sk, buf); | 1636 | unsigned int limit = rcvbuf_limit(sk, *skb); |
1659 | u32 onode; | ||
1660 | int rc = TIPC_OK; | 1637 | int rc = TIPC_OK; |
1661 | 1638 | ||
1662 | if (unlikely(msg_user(msg) == CONN_MANAGER)) | 1639 | if (unlikely(msg_user(msg) == CONN_MANAGER)) { |
1663 | return tipc_sk_proto_rcv(tsk, &onode, buf); | 1640 | tipc_sk_proto_rcv(tsk, skb); |
1641 | return TIPC_OK; | ||
1642 | } | ||
1664 | 1643 | ||
1665 | if (unlikely(msg_user(msg) == SOCK_WAKEUP)) { | 1644 | if (unlikely(msg_user(msg) == SOCK_WAKEUP)) { |
1666 | kfree_skb(buf); | 1645 | kfree_skb(*skb); |
1667 | tsk->link_cong = 0; | 1646 | tsk->link_cong = 0; |
1668 | sk->sk_write_space(sk); | 1647 | sk->sk_write_space(sk); |
1648 | *skb = NULL; | ||
1669 | return TIPC_OK; | 1649 | return TIPC_OK; |
1670 | } | 1650 | } |
1671 | 1651 | ||
@@ -1677,21 +1657,22 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1677 | if (msg_connected(msg)) | 1657 | if (msg_connected(msg)) |
1678 | return -TIPC_ERR_NO_PORT; | 1658 | return -TIPC_ERR_NO_PORT; |
1679 | } else { | 1659 | } else { |
1680 | rc = filter_connect(tsk, &buf); | 1660 | rc = filter_connect(tsk, skb); |
1681 | if (rc != TIPC_OK || buf == NULL) | 1661 | if (rc != TIPC_OK || !*skb) |
1682 | return rc; | 1662 | return rc; |
1683 | } | 1663 | } |
1684 | 1664 | ||
1685 | /* Reject message if there isn't room to queue it */ | 1665 | /* Reject message if there isn't room to queue it */ |
1686 | if (sk_rmem_alloc_get(sk) + buf->truesize >= limit) | 1666 | if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit) |
1687 | return -TIPC_ERR_OVERLOAD; | 1667 | return -TIPC_ERR_OVERLOAD; |
1688 | 1668 | ||
1689 | /* Enqueue message */ | 1669 | /* Enqueue message */ |
1690 | TIPC_SKB_CB(buf)->handle = NULL; | 1670 | TIPC_SKB_CB(*skb)->handle = NULL; |
1691 | __skb_queue_tail(&sk->sk_receive_queue, buf); | 1671 | __skb_queue_tail(&sk->sk_receive_queue, *skb); |
1692 | skb_set_owner_r(buf, sk); | 1672 | skb_set_owner_r(*skb, sk); |
1693 | 1673 | ||
1694 | sk->sk_data_ready(sk); | 1674 | sk->sk_data_ready(sk); |
1675 | *skb = NULL; | ||
1695 | return TIPC_OK; | 1676 | return TIPC_OK; |
1696 | } | 1677 | } |
1697 | 1678 | ||
@@ -1700,78 +1681,125 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1700 | * @sk: socket | 1681 | * @sk: socket |
1701 | * @skb: message | 1682 | * @skb: message |
1702 | * | 1683 | * |
1703 | * Caller must hold socket lock, but not port lock. | 1684 | * Caller must hold socket lock |
1704 | * | 1685 | * |
1705 | * Returns 0 | 1686 | * Returns 0 |
1706 | */ | 1687 | */ |
1707 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 1688 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
1708 | { | 1689 | { |
1709 | int rc; | 1690 | int err; |
1710 | u32 onode; | 1691 | atomic_t *dcnt; |
1692 | u32 dnode; | ||
1711 | struct tipc_sock *tsk = tipc_sk(sk); | 1693 | struct tipc_sock *tsk = tipc_sk(sk); |
1694 | struct net *net = sock_net(sk); | ||
1712 | uint truesize = skb->truesize; | 1695 | uint truesize = skb->truesize; |
1713 | 1696 | ||
1714 | rc = filter_rcv(sk, skb); | 1697 | err = filter_rcv(sk, &skb); |
1715 | 1698 | if (likely(!skb)) { | |
1716 | if (likely(!rc)) { | 1699 | dcnt = &tsk->dupl_rcvcnt; |
1717 | if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) | 1700 | if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT) |
1718 | atomic_add(truesize, &tsk->dupl_rcvcnt); | 1701 | atomic_add(truesize, dcnt); |
1719 | return 0; | 1702 | return 0; |
1720 | } | 1703 | } |
1704 | if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err)) | ||
1705 | tipc_link_xmit_skb(net, skb, dnode, tsk->portid); | ||
1706 | return 0; | ||
1707 | } | ||
1721 | 1708 | ||
1722 | if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc)) | 1709 | /** |
1723 | return 0; | 1710 | * tipc_sk_enqueue - extract all buffers with destination 'dport' from |
1724 | 1711 | * inputq and try adding them to socket or backlog queue | |
1725 | tipc_link_xmit_skb(skb, onode, 0); | 1712 | * @inputq: list of incoming buffers with potentially different destinations |
1713 | * @sk: socket where the buffers should be enqueued | ||
1714 | * @dport: port number for the socket | ||
1715 | * @_skb: returned buffer to be forwarded or rejected, if applicable | ||
1716 | * | ||
1717 | * Caller must hold socket lock | ||
1718 | * | ||
1719 | * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD | ||
1720 | * or -TIPC_ERR_NO_PORT | ||
1721 | */ | ||
1722 | static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | ||
1723 | u32 dport, struct sk_buff **_skb) | ||
1724 | { | ||
1725 | unsigned int lim; | ||
1726 | atomic_t *dcnt; | ||
1727 | int err; | ||
1728 | struct sk_buff *skb; | ||
1729 | unsigned long time_limit = jiffies + 2; | ||
1726 | 1730 | ||
1727 | return 0; | 1731 | while (skb_queue_len(inputq)) { |
1732 | if (unlikely(time_after_eq(jiffies, time_limit))) | ||
1733 | return TIPC_OK; | ||
1734 | skb = tipc_skb_dequeue(inputq, dport); | ||
1735 | if (unlikely(!skb)) | ||
1736 | return TIPC_OK; | ||
1737 | if (!sock_owned_by_user(sk)) { | ||
1738 | err = filter_rcv(sk, &skb); | ||
1739 | if (likely(!skb)) | ||
1740 | continue; | ||
1741 | *_skb = skb; | ||
1742 | return err; | ||
1743 | } | ||
1744 | dcnt = &tipc_sk(sk)->dupl_rcvcnt; | ||
1745 | if (sk->sk_backlog.len) | ||
1746 | atomic_set(dcnt, 0); | ||
1747 | lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); | ||
1748 | if (likely(!sk_add_backlog(sk, skb, lim))) | ||
1749 | continue; | ||
1750 | *_skb = skb; | ||
1751 | return -TIPC_ERR_OVERLOAD; | ||
1752 | } | ||
1753 | return TIPC_OK; | ||
1728 | } | 1754 | } |
1729 | 1755 | ||
1730 | /** | 1756 | /** |
1731 | * tipc_sk_rcv - handle incoming message | 1757 | * tipc_sk_rcv - handle a chain of incoming buffers |
1732 | * @skb: buffer containing arriving message | 1758 | * @inputq: buffer list containing the buffers |
1733 | * Consumes buffer | 1759 | * Consumes all buffers in list until inputq is empty |
1734 | * Returns 0 if success, or errno: -EHOSTUNREACH | 1760 | * Note: may be called in multiple threads referring to the same queue |
1761 | * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH | ||
1762 | * Only node local calls check the return value, sending single-buffer queues | ||
1735 | */ | 1763 | */ |
1736 | int tipc_sk_rcv(struct sk_buff *skb) | 1764 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) |
1737 | { | 1765 | { |
1766 | u32 dnode, dport = 0; | ||
1767 | int err = -TIPC_ERR_NO_PORT; | ||
1768 | struct sk_buff *skb; | ||
1738 | struct tipc_sock *tsk; | 1769 | struct tipc_sock *tsk; |
1770 | struct tipc_net *tn; | ||
1739 | struct sock *sk; | 1771 | struct sock *sk; |
1740 | u32 dport = msg_destport(buf_msg(skb)); | ||
1741 | int rc = TIPC_OK; | ||
1742 | uint limit; | ||
1743 | u32 dnode; | ||
1744 | 1772 | ||
1745 | /* Validate destination and message */ | 1773 | while (skb_queue_len(inputq)) { |
1746 | tsk = tipc_sk_get(dport); | 1774 | skb = NULL; |
1747 | if (unlikely(!tsk)) { | 1775 | dport = tipc_skb_peek_port(inputq, dport); |
1748 | rc = tipc_msg_eval(skb, &dnode); | 1776 | tsk = tipc_sk_lookup(net, dport); |
1749 | goto exit; | 1777 | if (likely(tsk)) { |
1778 | sk = &tsk->sk; | ||
1779 | if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { | ||
1780 | err = tipc_sk_enqueue(inputq, sk, dport, &skb); | ||
1781 | spin_unlock_bh(&sk->sk_lock.slock); | ||
1782 | dport = 0; | ||
1783 | } | ||
1784 | sock_put(sk); | ||
1785 | } else { | ||
1786 | skb = tipc_skb_dequeue(inputq, dport); | ||
1787 | } | ||
1788 | if (likely(!skb)) | ||
1789 | continue; | ||
1790 | if (tipc_msg_lookup_dest(net, skb, &dnode, &err)) | ||
1791 | goto xmit; | ||
1792 | if (!err) { | ||
1793 | dnode = msg_destnode(buf_msg(skb)); | ||
1794 | goto xmit; | ||
1795 | } | ||
1796 | tn = net_generic(net, tipc_net_id); | ||
1797 | if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err)) | ||
1798 | continue; | ||
1799 | xmit: | ||
1800 | tipc_link_xmit_skb(net, skb, dnode, dport); | ||
1750 | } | 1801 | } |
1751 | sk = &tsk->sk; | 1802 | return err ? -EHOSTUNREACH : 0; |
1752 | |||
1753 | /* Queue message */ | ||
1754 | spin_lock_bh(&sk->sk_lock.slock); | ||
1755 | |||
1756 | if (!sock_owned_by_user(sk)) { | ||
1757 | rc = filter_rcv(sk, skb); | ||
1758 | } else { | ||
1759 | if (sk->sk_backlog.len == 0) | ||
1760 | atomic_set(&tsk->dupl_rcvcnt, 0); | ||
1761 | limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt); | ||
1762 | if (sk_add_backlog(sk, skb, limit)) | ||
1763 | rc = -TIPC_ERR_OVERLOAD; | ||
1764 | } | ||
1765 | spin_unlock_bh(&sk->sk_lock.slock); | ||
1766 | tipc_sk_put(tsk); | ||
1767 | if (likely(!rc)) | ||
1768 | return 0; | ||
1769 | exit: | ||
1770 | if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc)) | ||
1771 | return -EHOSTUNREACH; | ||
1772 | |||
1773 | tipc_link_xmit_skb(skb, dnode, 0); | ||
1774 | return (rc < 0) ? -EHOSTUNREACH : 0; | ||
1775 | } | 1803 | } |
1776 | 1804 | ||
1777 | static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) | 1805 | static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) |
@@ -1809,17 +1837,26 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, | |||
1809 | int destlen, int flags) | 1837 | int destlen, int flags) |
1810 | { | 1838 | { |
1811 | struct sock *sk = sock->sk; | 1839 | struct sock *sk = sock->sk; |
1840 | struct tipc_sock *tsk = tipc_sk(sk); | ||
1812 | struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; | 1841 | struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; |
1813 | struct msghdr m = {NULL,}; | 1842 | struct msghdr m = {NULL,}; |
1814 | long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout; | 1843 | long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; |
1815 | socket_state previous; | 1844 | socket_state previous; |
1816 | int res; | 1845 | int res = 0; |
1817 | 1846 | ||
1818 | lock_sock(sk); | 1847 | lock_sock(sk); |
1819 | 1848 | ||
1820 | /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ | 1849 | /* DGRAM/RDM connect(), just save the destaddr */ |
1821 | if (sock->state == SS_READY) { | 1850 | if (sock->state == SS_READY) { |
1822 | res = -EOPNOTSUPP; | 1851 | if (dst->family == AF_UNSPEC) { |
1852 | memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc)); | ||
1853 | tsk->connected = 0; | ||
1854 | } else if (destlen != sizeof(struct sockaddr_tipc)) { | ||
1855 | res = -EINVAL; | ||
1856 | } else { | ||
1857 | memcpy(&tsk->remote, dest, destlen); | ||
1858 | tsk->connected = 1; | ||
1859 | } | ||
1823 | goto exit; | 1860 | goto exit; |
1824 | } | 1861 | } |
1825 | 1862 | ||
@@ -1847,7 +1884,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, | |||
1847 | if (!timeout) | 1884 | if (!timeout) |
1848 | m.msg_flags = MSG_DONTWAIT; | 1885 | m.msg_flags = MSG_DONTWAIT; |
1849 | 1886 | ||
1850 | res = tipc_sendmsg(NULL, sock, &m, 0); | 1887 | res = __tipc_sendmsg(sock, &m, 0); |
1851 | if ((res < 0) && (res != -EWOULDBLOCK)) | 1888 | if ((res < 0) && (res != -EWOULDBLOCK)) |
1852 | goto exit; | 1889 | goto exit; |
1853 | 1890 | ||
@@ -1927,12 +1964,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) | |||
1927 | err = -EINVAL; | 1964 | err = -EINVAL; |
1928 | if (sock->state != SS_LISTENING) | 1965 | if (sock->state != SS_LISTENING) |
1929 | break; | 1966 | break; |
1930 | err = sock_intr_errno(timeo); | ||
1931 | if (signal_pending(current)) | ||
1932 | break; | ||
1933 | err = -EAGAIN; | 1967 | err = -EAGAIN; |
1934 | if (!timeo) | 1968 | if (!timeo) |
1935 | break; | 1969 | break; |
1970 | err = sock_intr_errno(timeo); | ||
1971 | if (signal_pending(current)) | ||
1972 | break; | ||
1936 | } | 1973 | } |
1937 | finish_wait(sk_sleep(sk), &wait); | 1974 | finish_wait(sk_sleep(sk), &wait); |
1938 | return err; | 1975 | return err; |
@@ -2003,7 +2040,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) | |||
2003 | struct msghdr m = {NULL,}; | 2040 | struct msghdr m = {NULL,}; |
2004 | 2041 | ||
2005 | tsk_advance_rx_queue(sk); | 2042 | tsk_advance_rx_queue(sk); |
2006 | tipc_send_packet(NULL, new_sock, &m, 0); | 2043 | __tipc_send_stream(new_sock, &m, 0); |
2007 | } else { | 2044 | } else { |
2008 | __skb_dequeue(&sk->sk_receive_queue); | 2045 | __skb_dequeue(&sk->sk_receive_queue); |
2009 | __skb_queue_head(&new_sk->sk_receive_queue, buf); | 2046 | __skb_queue_head(&new_sk->sk_receive_queue, buf); |
@@ -2027,6 +2064,7 @@ exit: | |||
2027 | static int tipc_shutdown(struct socket *sock, int how) | 2064 | static int tipc_shutdown(struct socket *sock, int how) |
2028 | { | 2065 | { |
2029 | struct sock *sk = sock->sk; | 2066 | struct sock *sk = sock->sk; |
2067 | struct net *net = sock_net(sk); | ||
2030 | struct tipc_sock *tsk = tipc_sk(sk); | 2068 | struct tipc_sock *tsk = tipc_sk(sk); |
2031 | struct sk_buff *skb; | 2069 | struct sk_buff *skb; |
2032 | u32 dnode; | 2070 | u32 dnode; |
@@ -2049,21 +2087,23 @@ restart: | |||
2049 | kfree_skb(skb); | 2087 | kfree_skb(skb); |
2050 | goto restart; | 2088 | goto restart; |
2051 | } | 2089 | } |
2052 | if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN)) | 2090 | if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, |
2053 | tipc_link_xmit_skb(skb, dnode, tsk->ref); | 2091 | TIPC_CONN_SHUTDOWN)) |
2054 | tipc_node_remove_conn(dnode, tsk->ref); | 2092 | tipc_link_xmit_skb(net, skb, dnode, |
2093 | tsk->portid); | ||
2055 | } else { | 2094 | } else { |
2056 | dnode = tsk_peer_node(tsk); | 2095 | dnode = tsk_peer_node(tsk); |
2096 | |||
2057 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, | 2097 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, |
2058 | TIPC_CONN_MSG, SHORT_H_SIZE, | 2098 | TIPC_CONN_MSG, SHORT_H_SIZE, |
2059 | 0, dnode, tipc_own_addr, | 2099 | 0, dnode, tsk_own_node(tsk), |
2060 | tsk_peer_port(tsk), | 2100 | tsk_peer_port(tsk), |
2061 | tsk->ref, TIPC_CONN_SHUTDOWN); | 2101 | tsk->portid, TIPC_CONN_SHUTDOWN); |
2062 | tipc_link_xmit_skb(skb, dnode, tsk->ref); | 2102 | tipc_link_xmit_skb(net, skb, dnode, tsk->portid); |
2063 | } | 2103 | } |
2064 | tsk->connected = 0; | 2104 | tsk->connected = 0; |
2065 | sock->state = SS_DISCONNECTING; | 2105 | sock->state = SS_DISCONNECTING; |
2066 | tipc_node_remove_conn(dnode, tsk->ref); | 2106 | tipc_node_remove_conn(net, dnode, tsk->portid); |
2067 | /* fall through */ | 2107 | /* fall through */ |
2068 | 2108 | ||
2069 | case SS_DISCONNECTING: | 2109 | case SS_DISCONNECTING: |
@@ -2084,18 +2124,14 @@ restart: | |||
2084 | return res; | 2124 | return res; |
2085 | } | 2125 | } |
2086 | 2126 | ||
2087 | static void tipc_sk_timeout(unsigned long ref) | 2127 | static void tipc_sk_timeout(unsigned long data) |
2088 | { | 2128 | { |
2089 | struct tipc_sock *tsk; | 2129 | struct tipc_sock *tsk = (struct tipc_sock *)data; |
2090 | struct sock *sk; | 2130 | struct sock *sk = &tsk->sk; |
2091 | struct sk_buff *skb = NULL; | 2131 | struct sk_buff *skb = NULL; |
2092 | u32 peer_port, peer_node; | 2132 | u32 peer_port, peer_node; |
2133 | u32 own_node = tsk_own_node(tsk); | ||
2093 | 2134 | ||
2094 | tsk = tipc_sk_get(ref); | ||
2095 | if (!tsk) | ||
2096 | return; | ||
2097 | |||
2098 | sk = &tsk->sk; | ||
2099 | bh_lock_sock(sk); | 2135 | bh_lock_sock(sk); |
2100 | if (!tsk->connected) { | 2136 | if (!tsk->connected) { |
2101 | bh_unlock_sock(sk); | 2137 | bh_unlock_sock(sk); |
@@ -2106,38 +2142,39 @@ static void tipc_sk_timeout(unsigned long ref) | |||
2106 | 2142 | ||
2107 | if (tsk->probing_state == TIPC_CONN_PROBING) { | 2143 | if (tsk->probing_state == TIPC_CONN_PROBING) { |
2108 | /* Previous probe not answered -> self abort */ | 2144 | /* Previous probe not answered -> self abort */ |
2109 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, | 2145 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, |
2110 | SHORT_H_SIZE, 0, tipc_own_addr, | 2146 | TIPC_CONN_MSG, SHORT_H_SIZE, 0, |
2111 | peer_node, ref, peer_port, | 2147 | own_node, peer_node, tsk->portid, |
2112 | TIPC_ERR_NO_PORT); | 2148 | peer_port, TIPC_ERR_NO_PORT); |
2113 | } else { | 2149 | } else { |
2114 | skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, | 2150 | skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, |
2115 | 0, peer_node, tipc_own_addr, | 2151 | INT_H_SIZE, 0, peer_node, own_node, |
2116 | peer_port, ref, TIPC_OK); | 2152 | peer_port, tsk->portid, TIPC_OK); |
2117 | tsk->probing_state = TIPC_CONN_PROBING; | 2153 | tsk->probing_state = TIPC_CONN_PROBING; |
2118 | k_start_timer(&tsk->timer, tsk->probing_interval); | 2154 | sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); |
2119 | } | 2155 | } |
2120 | bh_unlock_sock(sk); | 2156 | bh_unlock_sock(sk); |
2121 | if (skb) | 2157 | if (skb) |
2122 | tipc_link_xmit_skb(skb, peer_node, ref); | 2158 | tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid); |
2123 | exit: | 2159 | exit: |
2124 | tipc_sk_put(tsk); | 2160 | sock_put(sk); |
2125 | } | 2161 | } |
2126 | 2162 | ||
2127 | static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, | 2163 | static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, |
2128 | struct tipc_name_seq const *seq) | 2164 | struct tipc_name_seq const *seq) |
2129 | { | 2165 | { |
2166 | struct net *net = sock_net(&tsk->sk); | ||
2130 | struct publication *publ; | 2167 | struct publication *publ; |
2131 | u32 key; | 2168 | u32 key; |
2132 | 2169 | ||
2133 | if (tsk->connected) | 2170 | if (tsk->connected) |
2134 | return -EINVAL; | 2171 | return -EINVAL; |
2135 | key = tsk->ref + tsk->pub_count + 1; | 2172 | key = tsk->portid + tsk->pub_count + 1; |
2136 | if (key == tsk->ref) | 2173 | if (key == tsk->portid) |
2137 | return -EADDRINUSE; | 2174 | return -EADDRINUSE; |
2138 | 2175 | ||
2139 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, | 2176 | publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, |
2140 | scope, tsk->ref, key); | 2177 | scope, tsk->portid, key); |
2141 | if (unlikely(!publ)) | 2178 | if (unlikely(!publ)) |
2142 | return -EINVAL; | 2179 | return -EINVAL; |
2143 | 2180 | ||
@@ -2150,6 +2187,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, | |||
2150 | static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | 2187 | static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, |
2151 | struct tipc_name_seq const *seq) | 2188 | struct tipc_name_seq const *seq) |
2152 | { | 2189 | { |
2190 | struct net *net = sock_net(&tsk->sk); | ||
2153 | struct publication *publ; | 2191 | struct publication *publ; |
2154 | struct publication *safe; | 2192 | struct publication *safe; |
2155 | int rc = -EINVAL; | 2193 | int rc = -EINVAL; |
@@ -2164,12 +2202,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | |||
2164 | continue; | 2202 | continue; |
2165 | if (publ->upper != seq->upper) | 2203 | if (publ->upper != seq->upper) |
2166 | break; | 2204 | break; |
2167 | tipc_nametbl_withdraw(publ->type, publ->lower, | 2205 | tipc_nametbl_withdraw(net, publ->type, publ->lower, |
2168 | publ->ref, publ->key); | 2206 | publ->ref, publ->key); |
2169 | rc = 0; | 2207 | rc = 0; |
2170 | break; | 2208 | break; |
2171 | } | 2209 | } |
2172 | tipc_nametbl_withdraw(publ->type, publ->lower, | 2210 | tipc_nametbl_withdraw(net, publ->type, publ->lower, |
2173 | publ->ref, publ->key); | 2211 | publ->ref, publ->key); |
2174 | rc = 0; | 2212 | rc = 0; |
2175 | } | 2213 | } |
@@ -2178,336 +2216,105 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, | |||
2178 | return rc; | 2216 | return rc; |
2179 | } | 2217 | } |
2180 | 2218 | ||
2181 | static int tipc_sk_show(struct tipc_sock *tsk, char *buf, | ||
2182 | int len, int full_id) | ||
2183 | { | ||
2184 | struct publication *publ; | ||
2185 | int ret; | ||
2186 | |||
2187 | if (full_id) | ||
2188 | ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:", | ||
2189 | tipc_zone(tipc_own_addr), | ||
2190 | tipc_cluster(tipc_own_addr), | ||
2191 | tipc_node(tipc_own_addr), tsk->ref); | ||
2192 | else | ||
2193 | ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref); | ||
2194 | |||
2195 | if (tsk->connected) { | ||
2196 | u32 dport = tsk_peer_port(tsk); | ||
2197 | u32 destnode = tsk_peer_node(tsk); | ||
2198 | |||
2199 | ret += tipc_snprintf(buf + ret, len - ret, | ||
2200 | " connected to <%u.%u.%u:%u>", | ||
2201 | tipc_zone(destnode), | ||
2202 | tipc_cluster(destnode), | ||
2203 | tipc_node(destnode), dport); | ||
2204 | if (tsk->conn_type != 0) | ||
2205 | ret += tipc_snprintf(buf + ret, len - ret, | ||
2206 | " via {%u,%u}", tsk->conn_type, | ||
2207 | tsk->conn_instance); | ||
2208 | } else if (tsk->published) { | ||
2209 | ret += tipc_snprintf(buf + ret, len - ret, " bound to"); | ||
2210 | list_for_each_entry(publ, &tsk->publications, pport_list) { | ||
2211 | if (publ->lower == publ->upper) | ||
2212 | ret += tipc_snprintf(buf + ret, len - ret, | ||
2213 | " {%u,%u}", publ->type, | ||
2214 | publ->lower); | ||
2215 | else | ||
2216 | ret += tipc_snprintf(buf + ret, len - ret, | ||
2217 | " {%u,%u,%u}", publ->type, | ||
2218 | publ->lower, publ->upper); | ||
2219 | } | ||
2220 | } | ||
2221 | ret += tipc_snprintf(buf + ret, len - ret, "\n"); | ||
2222 | return ret; | ||
2223 | } | ||
2224 | |||
2225 | struct sk_buff *tipc_sk_socks_show(void) | ||
2226 | { | ||
2227 | struct sk_buff *buf; | ||
2228 | struct tlv_desc *rep_tlv; | ||
2229 | char *pb; | ||
2230 | int pb_len; | ||
2231 | struct tipc_sock *tsk; | ||
2232 | int str_len = 0; | ||
2233 | u32 ref = 0; | ||
2234 | |||
2235 | buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); | ||
2236 | if (!buf) | ||
2237 | return NULL; | ||
2238 | rep_tlv = (struct tlv_desc *)buf->data; | ||
2239 | pb = TLV_DATA(rep_tlv); | ||
2240 | pb_len = ULTRA_STRING_MAX_LEN; | ||
2241 | |||
2242 | tsk = tipc_sk_get_next(&ref); | ||
2243 | for (; tsk; tsk = tipc_sk_get_next(&ref)) { | ||
2244 | lock_sock(&tsk->sk); | ||
2245 | str_len += tipc_sk_show(tsk, pb + str_len, | ||
2246 | pb_len - str_len, 0); | ||
2247 | release_sock(&tsk->sk); | ||
2248 | tipc_sk_put(tsk); | ||
2249 | } | ||
2250 | str_len += 1; /* for "\0" */ | ||
2251 | skb_put(buf, TLV_SPACE(str_len)); | ||
2252 | TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); | ||
2253 | |||
2254 | return buf; | ||
2255 | } | ||
2256 | |||
2257 | /* tipc_sk_reinit: set non-zero address in all existing sockets | 2219 | /* tipc_sk_reinit: set non-zero address in all existing sockets |
2258 | * when we go from standalone to network mode. | 2220 | * when we go from standalone to network mode. |
2259 | */ | 2221 | */ |
2260 | void tipc_sk_reinit(void) | 2222 | void tipc_sk_reinit(struct net *net) |
2261 | { | 2223 | { |
2224 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
2225 | const struct bucket_table *tbl; | ||
2226 | struct rhash_head *pos; | ||
2227 | struct tipc_sock *tsk; | ||
2262 | struct tipc_msg *msg; | 2228 | struct tipc_msg *msg; |
2263 | u32 ref = 0; | 2229 | int i; |
2264 | struct tipc_sock *tsk = tipc_sk_get_next(&ref); | ||
2265 | 2230 | ||
2266 | for (; tsk; tsk = tipc_sk_get_next(&ref)) { | 2231 | rcu_read_lock(); |
2267 | lock_sock(&tsk->sk); | 2232 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); |
2268 | msg = &tsk->phdr; | 2233 | for (i = 0; i < tbl->size; i++) { |
2269 | msg_set_prevnode(msg, tipc_own_addr); | 2234 | rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { |
2270 | msg_set_orignode(msg, tipc_own_addr); | 2235 | spin_lock_bh(&tsk->sk.sk_lock.slock); |
2271 | release_sock(&tsk->sk); | 2236 | msg = &tsk->phdr; |
2272 | tipc_sk_put(tsk); | 2237 | msg_set_prevnode(msg, tn->own_addr); |
2238 | msg_set_orignode(msg, tn->own_addr); | ||
2239 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
2240 | } | ||
2273 | } | 2241 | } |
2242 | rcu_read_unlock(); | ||
2274 | } | 2243 | } |
2275 | 2244 | ||
2276 | /** | 2245 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) |
2277 | * struct reference - TIPC socket reference entry | ||
2278 | * @tsk: pointer to socket associated with reference entry | ||
2279 | * @ref: reference value for socket (combines instance & array index info) | ||
2280 | */ | ||
2281 | struct reference { | ||
2282 | struct tipc_sock *tsk; | ||
2283 | u32 ref; | ||
2284 | }; | ||
2285 | |||
2286 | /** | ||
2287 | * struct tipc_ref_table - table of TIPC socket reference entries | ||
2288 | * @entries: pointer to array of reference entries | ||
2289 | * @capacity: array index of first unusable entry | ||
2290 | * @init_point: array index of first uninitialized entry | ||
2291 | * @first_free: array index of first unused socket reference entry | ||
2292 | * @last_free: array index of last unused socket reference entry | ||
2293 | * @index_mask: bitmask for array index portion of reference values | ||
2294 | * @start_mask: initial value for instance value portion of reference values | ||
2295 | */ | ||
2296 | struct ref_table { | ||
2297 | struct reference *entries; | ||
2298 | u32 capacity; | ||
2299 | u32 init_point; | ||
2300 | u32 first_free; | ||
2301 | u32 last_free; | ||
2302 | u32 index_mask; | ||
2303 | u32 start_mask; | ||
2304 | }; | ||
2305 | |||
2306 | /* Socket reference table consists of 2**N entries. | ||
2307 | * | ||
2308 | * State Socket ptr Reference | ||
2309 | * ----- ---------- --------- | ||
2310 | * In use non-NULL XXXX|own index | ||
2311 | * (XXXX changes each time entry is acquired) | ||
2312 | * Free NULL YYYY|next free index | ||
2313 | * (YYYY is one more than last used XXXX) | ||
2314 | * Uninitialized NULL 0 | ||
2315 | * | ||
2316 | * Entry 0 is not used; this allows index 0 to denote the end of the free list. | ||
2317 | * | ||
2318 | * Note that a reference value of 0 does not necessarily indicate that an | ||
2319 | * entry is uninitialized, since the last entry in the free list could also | ||
2320 | * have a reference value of 0 (although this is unlikely). | ||
2321 | */ | ||
2322 | |||
2323 | static struct ref_table tipc_ref_table; | ||
2324 | |||
2325 | static DEFINE_RWLOCK(ref_table_lock); | ||
2326 | |||
2327 | /** | ||
2328 | * tipc_ref_table_init - create reference table for sockets | ||
2329 | */ | ||
2330 | int tipc_sk_ref_table_init(u32 req_sz, u32 start) | ||
2331 | { | 2246 | { |
2332 | struct reference *table; | 2247 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
2333 | u32 actual_sz; | 2248 | struct tipc_sock *tsk; |
2334 | |||
2335 | /* account for unused entry, then round up size to a power of 2 */ | ||
2336 | |||
2337 | req_sz++; | ||
2338 | for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) { | ||
2339 | /* do nothing */ | ||
2340 | }; | ||
2341 | |||
2342 | /* allocate table & mark all entries as uninitialized */ | ||
2343 | table = vzalloc(actual_sz * sizeof(struct reference)); | ||
2344 | if (table == NULL) | ||
2345 | return -ENOMEM; | ||
2346 | |||
2347 | tipc_ref_table.entries = table; | ||
2348 | tipc_ref_table.capacity = req_sz; | ||
2349 | tipc_ref_table.init_point = 1; | ||
2350 | tipc_ref_table.first_free = 0; | ||
2351 | tipc_ref_table.last_free = 0; | ||
2352 | tipc_ref_table.index_mask = actual_sz - 1; | ||
2353 | tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; | ||
2354 | 2249 | ||
2355 | return 0; | 2250 | rcu_read_lock(); |
2356 | } | 2251 | tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); |
2252 | if (tsk) | ||
2253 | sock_hold(&tsk->sk); | ||
2254 | rcu_read_unlock(); | ||
2357 | 2255 | ||
2358 | /** | 2256 | return tsk; |
2359 | * tipc_ref_table_stop - destroy reference table for sockets | ||
2360 | */ | ||
2361 | void tipc_sk_ref_table_stop(void) | ||
2362 | { | ||
2363 | if (!tipc_ref_table.entries) | ||
2364 | return; | ||
2365 | vfree(tipc_ref_table.entries); | ||
2366 | tipc_ref_table.entries = NULL; | ||
2367 | } | 2257 | } |
2368 | 2258 | ||
2369 | /* tipc_ref_acquire - create reference to a socket | 2259 | static int tipc_sk_insert(struct tipc_sock *tsk) |
2370 | * | ||
2371 | * Register an socket pointer in the reference table. | ||
2372 | * Returns a unique reference value that is used from then on to retrieve the | ||
2373 | * socket pointer, or to determine if the socket has been deregistered. | ||
2374 | */ | ||
2375 | u32 tipc_sk_ref_acquire(struct tipc_sock *tsk) | ||
2376 | { | 2260 | { |
2377 | u32 index; | 2261 | struct sock *sk = &tsk->sk; |
2378 | u32 index_mask; | 2262 | struct net *net = sock_net(sk); |
2379 | u32 next_plus_upper; | 2263 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
2380 | u32 ref = 0; | 2264 | u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; |
2381 | struct reference *entry; | 2265 | u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; |
2382 | 2266 | ||
2383 | if (unlikely(!tsk)) { | 2267 | while (remaining--) { |
2384 | pr_err("Attempt to acquire ref. to non-existent obj\n"); | 2268 | portid++; |
2385 | return 0; | 2269 | if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) |
2386 | } | 2270 | portid = TIPC_MIN_PORT; |
2387 | if (unlikely(!tipc_ref_table.entries)) { | 2271 | tsk->portid = portid; |
2388 | pr_err("Ref. table not found in acquisition attempt\n"); | 2272 | sock_hold(&tsk->sk); |
2389 | return 0; | 2273 | if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, |
2390 | } | 2274 | tsk_rht_params)) |
2391 | 2275 | return 0; | |
2392 | /* Take a free entry, if available; otherwise initialize a new one */ | 2276 | sock_put(&tsk->sk); |
2393 | write_lock_bh(&ref_table_lock); | ||
2394 | index = tipc_ref_table.first_free; | ||
2395 | entry = &tipc_ref_table.entries[index]; | ||
2396 | |||
2397 | if (likely(index)) { | ||
2398 | index = tipc_ref_table.first_free; | ||
2399 | entry = &tipc_ref_table.entries[index]; | ||
2400 | index_mask = tipc_ref_table.index_mask; | ||
2401 | next_plus_upper = entry->ref; | ||
2402 | tipc_ref_table.first_free = next_plus_upper & index_mask; | ||
2403 | ref = (next_plus_upper & ~index_mask) + index; | ||
2404 | entry->tsk = tsk; | ||
2405 | } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { | ||
2406 | index = tipc_ref_table.init_point++; | ||
2407 | entry = &tipc_ref_table.entries[index]; | ||
2408 | ref = tipc_ref_table.start_mask + index; | ||
2409 | } | 2277 | } |
2410 | 2278 | ||
2411 | if (ref) { | 2279 | return -1; |
2412 | entry->ref = ref; | ||
2413 | entry->tsk = tsk; | ||
2414 | } | ||
2415 | write_unlock_bh(&ref_table_lock); | ||
2416 | return ref; | ||
2417 | } | 2280 | } |
2418 | 2281 | ||
2419 | /* tipc_sk_ref_discard - invalidate reference to an socket | 2282 | static void tipc_sk_remove(struct tipc_sock *tsk) |
2420 | * | ||
2421 | * Disallow future references to an socket and free up the entry for re-use. | ||
2422 | */ | ||
2423 | void tipc_sk_ref_discard(u32 ref) | ||
2424 | { | 2283 | { |
2425 | struct reference *entry; | 2284 | struct sock *sk = &tsk->sk; |
2426 | u32 index; | 2285 | struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); |
2427 | u32 index_mask; | ||
2428 | |||
2429 | if (unlikely(!tipc_ref_table.entries)) { | ||
2430 | pr_err("Ref. table not found during discard attempt\n"); | ||
2431 | return; | ||
2432 | } | ||
2433 | |||
2434 | index_mask = tipc_ref_table.index_mask; | ||
2435 | index = ref & index_mask; | ||
2436 | entry = &tipc_ref_table.entries[index]; | ||
2437 | |||
2438 | write_lock_bh(&ref_table_lock); | ||
2439 | 2286 | ||
2440 | if (unlikely(!entry->tsk)) { | 2287 | if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { |
2441 | pr_err("Attempt to discard ref. to non-existent socket\n"); | 2288 | WARN_ON(atomic_read(&sk->sk_refcnt) == 1); |
2442 | goto exit; | 2289 | __sock_put(sk); |
2443 | } | 2290 | } |
2444 | if (unlikely(entry->ref != ref)) { | ||
2445 | pr_err("Attempt to discard non-existent reference\n"); | ||
2446 | goto exit; | ||
2447 | } | ||
2448 | |||
2449 | /* Mark entry as unused; increment instance part of entry's | ||
2450 | * reference to invalidate any subsequent references | ||
2451 | */ | ||
2452 | |||
2453 | entry->tsk = NULL; | ||
2454 | entry->ref = (ref & ~index_mask) + (index_mask + 1); | ||
2455 | |||
2456 | /* Append entry to free entry list */ | ||
2457 | if (unlikely(tipc_ref_table.first_free == 0)) | ||
2458 | tipc_ref_table.first_free = index; | ||
2459 | else | ||
2460 | tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; | ||
2461 | tipc_ref_table.last_free = index; | ||
2462 | exit: | ||
2463 | write_unlock_bh(&ref_table_lock); | ||
2464 | } | 2291 | } |
2465 | 2292 | ||
2466 | /* tipc_sk_get - find referenced socket and return pointer to it | 2293 | static const struct rhashtable_params tsk_rht_params = { |
2467 | */ | 2294 | .nelem_hint = 192, |
2468 | struct tipc_sock *tipc_sk_get(u32 ref) | 2295 | .head_offset = offsetof(struct tipc_sock, node), |
2296 | .key_offset = offsetof(struct tipc_sock, portid), | ||
2297 | .key_len = sizeof(u32), /* portid */ | ||
2298 | .max_size = 1048576, | ||
2299 | .min_size = 256, | ||
2300 | .automatic_shrinking = true, | ||
2301 | }; | ||
2302 | |||
2303 | int tipc_sk_rht_init(struct net *net) | ||
2469 | { | 2304 | { |
2470 | struct reference *entry; | 2305 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
2471 | struct tipc_sock *tsk; | ||
2472 | 2306 | ||
2473 | if (unlikely(!tipc_ref_table.entries)) | 2307 | return rhashtable_init(&tn->sk_rht, &tsk_rht_params); |
2474 | return NULL; | ||
2475 | read_lock_bh(&ref_table_lock); | ||
2476 | entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; | ||
2477 | tsk = entry->tsk; | ||
2478 | if (likely(tsk && (entry->ref == ref))) | ||
2479 | sock_hold(&tsk->sk); | ||
2480 | else | ||
2481 | tsk = NULL; | ||
2482 | read_unlock_bh(&ref_table_lock); | ||
2483 | return tsk; | ||
2484 | } | 2308 | } |
2485 | 2309 | ||
2486 | /* tipc_sk_get_next - lock & return next socket after referenced one | 2310 | void tipc_sk_rht_destroy(struct net *net) |
2487 | */ | ||
2488 | struct tipc_sock *tipc_sk_get_next(u32 *ref) | ||
2489 | { | 2311 | { |
2490 | struct reference *entry; | 2312 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
2491 | struct tipc_sock *tsk = NULL; | ||
2492 | uint index = *ref & tipc_ref_table.index_mask; | ||
2493 | 2313 | ||
2494 | read_lock_bh(&ref_table_lock); | 2314 | /* Wait for socket readers to complete */ |
2495 | while (++index < tipc_ref_table.capacity) { | 2315 | synchronize_net(); |
2496 | entry = &tipc_ref_table.entries[index]; | ||
2497 | if (!entry->tsk) | ||
2498 | continue; | ||
2499 | tsk = entry->tsk; | ||
2500 | sock_hold(&tsk->sk); | ||
2501 | *ref = entry->ref; | ||
2502 | break; | ||
2503 | } | ||
2504 | read_unlock_bh(&ref_table_lock); | ||
2505 | return tsk; | ||
2506 | } | ||
2507 | 2316 | ||
2508 | static void tipc_sk_put(struct tipc_sock *tsk) | 2317 | rhashtable_destroy(&tn->sk_rht); |
2509 | { | ||
2510 | sock_put(&tsk->sk); | ||
2511 | } | 2318 | } |
2512 | 2319 | ||
2513 | /** | 2320 | /** |
@@ -2639,8 +2446,9 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt, | |||
2639 | return put_user(sizeof(value), ol); | 2446 | return put_user(sizeof(value), ol); |
2640 | } | 2447 | } |
2641 | 2448 | ||
2642 | static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) | 2449 | static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
2643 | { | 2450 | { |
2451 | struct sock *sk = sock->sk; | ||
2644 | struct tipc_sioc_ln_req lnr; | 2452 | struct tipc_sioc_ln_req lnr; |
2645 | void __user *argp = (void __user *)arg; | 2453 | void __user *argp = (void __user *)arg; |
2646 | 2454 | ||
@@ -2648,7 +2456,8 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) | |||
2648 | case SIOCGETLINKNAME: | 2456 | case SIOCGETLINKNAME: |
2649 | if (copy_from_user(&lnr, argp, sizeof(lnr))) | 2457 | if (copy_from_user(&lnr, argp, sizeof(lnr))) |
2650 | return -EFAULT; | 2458 | return -EFAULT; |
2651 | if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, | 2459 | if (!tipc_node_get_linkname(sock_net(sk), |
2460 | lnr.bearer_id & 0xffff, lnr.peer, | ||
2652 | lnr.linkname, TIPC_MAX_LINK_NAME)) { | 2461 | lnr.linkname, TIPC_MAX_LINK_NAME)) { |
2653 | if (copy_to_user(argp, &lnr, sizeof(lnr))) | 2462 | if (copy_to_user(argp, &lnr, sizeof(lnr))) |
2654 | return -EFAULT; | 2463 | return -EFAULT; |
@@ -2738,12 +2547,6 @@ static struct proto tipc_proto = { | |||
2738 | .sysctl_rmem = sysctl_tipc_rmem | 2547 | .sysctl_rmem = sysctl_tipc_rmem |
2739 | }; | 2548 | }; |
2740 | 2549 | ||
2741 | static struct proto tipc_proto_kern = { | ||
2742 | .name = "TIPC", | ||
2743 | .obj_size = sizeof(struct tipc_sock), | ||
2744 | .sysctl_rmem = sysctl_tipc_rmem | ||
2745 | }; | ||
2746 | |||
2747 | /** | 2550 | /** |
2748 | * tipc_socket_init - initialize TIPC socket interface | 2551 | * tipc_socket_init - initialize TIPC socket interface |
2749 | * | 2552 | * |
@@ -2820,18 +2623,20 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, | |||
2820 | int err; | 2623 | int err; |
2821 | void *hdr; | 2624 | void *hdr; |
2822 | struct nlattr *attrs; | 2625 | struct nlattr *attrs; |
2626 | struct net *net = sock_net(skb->sk); | ||
2627 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
2823 | 2628 | ||
2824 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 2629 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
2825 | &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); | 2630 | &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); |
2826 | if (!hdr) | 2631 | if (!hdr) |
2827 | goto msg_cancel; | 2632 | goto msg_cancel; |
2828 | 2633 | ||
2829 | attrs = nla_nest_start(skb, TIPC_NLA_SOCK); | 2634 | attrs = nla_nest_start(skb, TIPC_NLA_SOCK); |
2830 | if (!attrs) | 2635 | if (!attrs) |
2831 | goto genlmsg_cancel; | 2636 | goto genlmsg_cancel; |
2832 | if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref)) | 2637 | if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid)) |
2833 | goto attr_msg_cancel; | 2638 | goto attr_msg_cancel; |
2834 | if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr)) | 2639 | if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr)) |
2835 | goto attr_msg_cancel; | 2640 | goto attr_msg_cancel; |
2836 | 2641 | ||
2837 | if (tsk->connected) { | 2642 | if (tsk->connected) { |
@@ -2859,22 +2664,37 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2859 | { | 2664 | { |
2860 | int err; | 2665 | int err; |
2861 | struct tipc_sock *tsk; | 2666 | struct tipc_sock *tsk; |
2862 | u32 prev_ref = cb->args[0]; | 2667 | const struct bucket_table *tbl; |
2863 | u32 ref = prev_ref; | 2668 | struct rhash_head *pos; |
2864 | 2669 | struct net *net = sock_net(skb->sk); | |
2865 | tsk = tipc_sk_get_next(&ref); | 2670 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
2866 | for (; tsk; tsk = tipc_sk_get_next(&ref)) { | 2671 | u32 tbl_id = cb->args[0]; |
2867 | lock_sock(&tsk->sk); | 2672 | u32 prev_portid = cb->args[1]; |
2868 | err = __tipc_nl_add_sk(skb, cb, tsk); | ||
2869 | release_sock(&tsk->sk); | ||
2870 | tipc_sk_put(tsk); | ||
2871 | if (err) | ||
2872 | break; | ||
2873 | 2673 | ||
2874 | prev_ref = ref; | 2674 | rcu_read_lock(); |
2875 | } | 2675 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); |
2676 | for (; tbl_id < tbl->size; tbl_id++) { | ||
2677 | rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { | ||
2678 | spin_lock_bh(&tsk->sk.sk_lock.slock); | ||
2679 | if (prev_portid && prev_portid != tsk->portid) { | ||
2680 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
2681 | continue; | ||
2682 | } | ||
2876 | 2683 | ||
2877 | cb->args[0] = prev_ref; | 2684 | err = __tipc_nl_add_sk(skb, cb, tsk); |
2685 | if (err) { | ||
2686 | prev_portid = tsk->portid; | ||
2687 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
2688 | goto out; | ||
2689 | } | ||
2690 | prev_portid = 0; | ||
2691 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
2692 | } | ||
2693 | } | ||
2694 | out: | ||
2695 | rcu_read_unlock(); | ||
2696 | cb->args[0] = tbl_id; | ||
2697 | cb->args[1] = prev_portid; | ||
2878 | 2698 | ||
2879 | return skb->len; | 2699 | return skb->len; |
2880 | } | 2700 | } |
@@ -2888,7 +2708,7 @@ static int __tipc_nl_add_sk_publ(struct sk_buff *skb, | |||
2888 | struct nlattr *attrs; | 2708 | struct nlattr *attrs; |
2889 | 2709 | ||
2890 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 2710 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
2891 | &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); | 2711 | &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); |
2892 | if (!hdr) | 2712 | if (!hdr) |
2893 | goto msg_cancel; | 2713 | goto msg_cancel; |
2894 | 2714 | ||
@@ -2962,12 +2782,13 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, | |||
2962 | int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) | 2782 | int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) |
2963 | { | 2783 | { |
2964 | int err; | 2784 | int err; |
2965 | u32 tsk_ref = cb->args[0]; | 2785 | u32 tsk_portid = cb->args[0]; |
2966 | u32 last_publ = cb->args[1]; | 2786 | u32 last_publ = cb->args[1]; |
2967 | u32 done = cb->args[2]; | 2787 | u32 done = cb->args[2]; |
2788 | struct net *net = sock_net(skb->sk); | ||
2968 | struct tipc_sock *tsk; | 2789 | struct tipc_sock *tsk; |
2969 | 2790 | ||
2970 | if (!tsk_ref) { | 2791 | if (!tsk_portid) { |
2971 | struct nlattr **attrs; | 2792 | struct nlattr **attrs; |
2972 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; | 2793 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; |
2973 | 2794 | ||
@@ -2984,13 +2805,13 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2984 | if (!sock[TIPC_NLA_SOCK_REF]) | 2805 | if (!sock[TIPC_NLA_SOCK_REF]) |
2985 | return -EINVAL; | 2806 | return -EINVAL; |
2986 | 2807 | ||
2987 | tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); | 2808 | tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); |
2988 | } | 2809 | } |
2989 | 2810 | ||
2990 | if (done) | 2811 | if (done) |
2991 | return 0; | 2812 | return 0; |
2992 | 2813 | ||
2993 | tsk = tipc_sk_get(tsk_ref); | 2814 | tsk = tipc_sk_lookup(net, tsk_portid); |
2994 | if (!tsk) | 2815 | if (!tsk) |
2995 | return -EINVAL; | 2816 | return -EINVAL; |
2996 | 2817 | ||
@@ -2999,9 +2820,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2999 | if (!err) | 2820 | if (!err) |
3000 | done = 1; | 2821 | done = 1; |
3001 | release_sock(&tsk->sk); | 2822 | release_sock(&tsk->sk); |
3002 | tipc_sk_put(tsk); | 2823 | sock_put(&tsk->sk); |
3003 | 2824 | ||
3004 | cb->args[0] = tsk_ref; | 2825 | cb->args[0] = tsk_portid; |
3005 | cb->args[1] = last_publ; | 2826 | cb->args[1] = last_publ; |
3006 | cb->args[2] = done; | 2827 | cb->args[2] = done; |
3007 | 2828 | ||
diff --git a/net/tipc/socket.h b/net/tipc/socket.h index d34089387006..bf6551389522 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* net/tipc/socket.h: Include file for TIPC socket code | 1 | /* net/tipc/socket.h: Include file for TIPC socket code |
2 | * | 2 | * |
3 | * Copyright (c) 2014, Ericsson AB | 3 | * Copyright (c) 2014-2015, Ericsson AB |
4 | * All rights reserved. | 4 | * All rights reserved. |
5 | * | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | 6 | * Redistribution and use in source and binary forms, with or without |
@@ -42,12 +42,14 @@ | |||
42 | #define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) | 42 | #define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) |
43 | #define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ | 43 | #define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ |
44 | SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) | 44 | SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) |
45 | int tipc_sk_rcv(struct sk_buff *buf); | 45 | int tipc_socket_init(void); |
46 | struct sk_buff *tipc_sk_socks_show(void); | 46 | void tipc_socket_stop(void); |
47 | void tipc_sk_mcast_rcv(struct sk_buff *buf); | 47 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq); |
48 | void tipc_sk_reinit(void); | 48 | void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, |
49 | int tipc_sk_ref_table_init(u32 requested_size, u32 start); | 49 | struct sk_buff_head *inputq); |
50 | void tipc_sk_ref_table_stop(void); | 50 | void tipc_sk_reinit(struct net *net); |
51 | int tipc_sk_rht_init(struct net *net); | ||
52 | void tipc_sk_rht_destroy(struct net *net); | ||
51 | int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb); | 53 | int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb); |
52 | int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb); | 54 | int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb); |
53 | 55 | ||
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 0344206b984f..1c147c869c2e 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -50,33 +50,6 @@ struct tipc_subscriber { | |||
50 | struct list_head subscription_list; | 50 | struct list_head subscription_list; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr, | ||
54 | void *usr_data, void *buf, size_t len); | ||
55 | static void *subscr_named_msg_event(int conid); | ||
56 | static void subscr_conn_shutdown_event(int conid, void *usr_data); | ||
57 | |||
58 | static atomic_t subscription_count = ATOMIC_INIT(0); | ||
59 | |||
60 | static struct sockaddr_tipc topsrv_addr __read_mostly = { | ||
61 | .family = AF_TIPC, | ||
62 | .addrtype = TIPC_ADDR_NAMESEQ, | ||
63 | .addr.nameseq.type = TIPC_TOP_SRV, | ||
64 | .addr.nameseq.lower = TIPC_TOP_SRV, | ||
65 | .addr.nameseq.upper = TIPC_TOP_SRV, | ||
66 | .scope = TIPC_NODE_SCOPE | ||
67 | }; | ||
68 | |||
69 | static struct tipc_server topsrv __read_mostly = { | ||
70 | .saddr = &topsrv_addr, | ||
71 | .imp = TIPC_CRITICAL_IMPORTANCE, | ||
72 | .type = SOCK_SEQPACKET, | ||
73 | .max_rcvbuf_size = sizeof(struct tipc_subscr), | ||
74 | .name = "topology_server", | ||
75 | .tipc_conn_recvmsg = subscr_conn_msg_event, | ||
76 | .tipc_conn_new = subscr_named_msg_event, | ||
77 | .tipc_conn_shutdown = subscr_conn_shutdown_event, | ||
78 | }; | ||
79 | |||
80 | /** | 53 | /** |
81 | * htohl - convert value to endianness used by destination | 54 | * htohl - convert value to endianness used by destination |
82 | * @in: value to convert | 55 | * @in: value to convert |
@@ -93,6 +66,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, | |||
93 | u32 found_upper, u32 event, u32 port_ref, | 66 | u32 found_upper, u32 event, u32 port_ref, |
94 | u32 node) | 67 | u32 node) |
95 | { | 68 | { |
69 | struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
96 | struct tipc_subscriber *subscriber = sub->subscriber; | 70 | struct tipc_subscriber *subscriber = sub->subscriber; |
97 | struct kvec msg_sect; | 71 | struct kvec msg_sect; |
98 | 72 | ||
@@ -103,8 +77,8 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, | |||
103 | sub->evt.found_upper = htohl(found_upper, sub->swap); | 77 | sub->evt.found_upper = htohl(found_upper, sub->swap); |
104 | sub->evt.port.ref = htohl(port_ref, sub->swap); | 78 | sub->evt.port.ref = htohl(port_ref, sub->swap); |
105 | sub->evt.port.node = htohl(node, sub->swap); | 79 | sub->evt.port.node = htohl(node, sub->swap); |
106 | tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base, | 80 | tipc_conn_sendmsg(tn->topsrv, subscriber->conid, NULL, |
107 | msg_sect.iov_len); | 81 | msg_sect.iov_base, msg_sect.iov_len); |
108 | } | 82 | } |
109 | 83 | ||
110 | /** | 84 | /** |
@@ -141,9 +115,11 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, | |||
141 | subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); | 115 | subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); |
142 | } | 116 | } |
143 | 117 | ||
144 | static void subscr_timeout(struct tipc_subscription *sub) | 118 | static void subscr_timeout(unsigned long data) |
145 | { | 119 | { |
120 | struct tipc_subscription *sub = (struct tipc_subscription *)data; | ||
146 | struct tipc_subscriber *subscriber = sub->subscriber; | 121 | struct tipc_subscriber *subscriber = sub->subscriber; |
122 | struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
147 | 123 | ||
148 | /* The spin lock per subscriber is used to protect its members */ | 124 | /* The spin lock per subscriber is used to protect its members */ |
149 | spin_lock_bh(&subscriber->lock); | 125 | spin_lock_bh(&subscriber->lock); |
@@ -167,9 +143,8 @@ static void subscr_timeout(struct tipc_subscription *sub) | |||
167 | TIPC_SUBSCR_TIMEOUT, 0, 0); | 143 | TIPC_SUBSCR_TIMEOUT, 0, 0); |
168 | 144 | ||
169 | /* Now destroy subscription */ | 145 | /* Now destroy subscription */ |
170 | k_term_timer(&sub->timer); | ||
171 | kfree(sub); | 146 | kfree(sub); |
172 | atomic_dec(&subscription_count); | 147 | atomic_dec(&tn->subscription_count); |
173 | } | 148 | } |
174 | 149 | ||
175 | /** | 150 | /** |
@@ -179,20 +154,12 @@ static void subscr_timeout(struct tipc_subscription *sub) | |||
179 | */ | 154 | */ |
180 | static void subscr_del(struct tipc_subscription *sub) | 155 | static void subscr_del(struct tipc_subscription *sub) |
181 | { | 156 | { |
157 | struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
158 | |||
182 | tipc_nametbl_unsubscribe(sub); | 159 | tipc_nametbl_unsubscribe(sub); |
183 | list_del(&sub->subscription_list); | 160 | list_del(&sub->subscription_list); |
184 | kfree(sub); | 161 | kfree(sub); |
185 | atomic_dec(&subscription_count); | 162 | atomic_dec(&tn->subscription_count); |
186 | } | ||
187 | |||
188 | /** | ||
189 | * subscr_terminate - terminate communication with a subscriber | ||
190 | * | ||
191 | * Note: Must call it in process context since it might sleep. | ||
192 | */ | ||
193 | static void subscr_terminate(struct tipc_subscriber *subscriber) | ||
194 | { | ||
195 | tipc_conn_terminate(&topsrv, subscriber->conid); | ||
196 | } | 163 | } |
197 | 164 | ||
198 | static void subscr_release(struct tipc_subscriber *subscriber) | 165 | static void subscr_release(struct tipc_subscriber *subscriber) |
@@ -207,8 +174,7 @@ static void subscr_release(struct tipc_subscriber *subscriber) | |||
207 | subscription_list) { | 174 | subscription_list) { |
208 | if (sub->timeout != TIPC_WAIT_FOREVER) { | 175 | if (sub->timeout != TIPC_WAIT_FOREVER) { |
209 | spin_unlock_bh(&subscriber->lock); | 176 | spin_unlock_bh(&subscriber->lock); |
210 | k_cancel_timer(&sub->timer); | 177 | del_timer_sync(&sub->timer); |
211 | k_term_timer(&sub->timer); | ||
212 | spin_lock_bh(&subscriber->lock); | 178 | spin_lock_bh(&subscriber->lock); |
213 | } | 179 | } |
214 | subscr_del(sub); | 180 | subscr_del(sub); |
@@ -250,8 +216,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
250 | if (sub->timeout != TIPC_WAIT_FOREVER) { | 216 | if (sub->timeout != TIPC_WAIT_FOREVER) { |
251 | sub->timeout = TIPC_WAIT_FOREVER; | 217 | sub->timeout = TIPC_WAIT_FOREVER; |
252 | spin_unlock_bh(&subscriber->lock); | 218 | spin_unlock_bh(&subscriber->lock); |
253 | k_cancel_timer(&sub->timer); | 219 | del_timer_sync(&sub->timer); |
254 | k_term_timer(&sub->timer); | ||
255 | spin_lock_bh(&subscriber->lock); | 220 | spin_lock_bh(&subscriber->lock); |
256 | } | 221 | } |
257 | subscr_del(sub); | 222 | subscr_del(sub); |
@@ -262,9 +227,11 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
262 | * | 227 | * |
263 | * Called with subscriber lock held. | 228 | * Called with subscriber lock held. |
264 | */ | 229 | */ |
265 | static int subscr_subscribe(struct tipc_subscr *s, | 230 | static int subscr_subscribe(struct net *net, struct tipc_subscr *s, |
266 | struct tipc_subscriber *subscriber, | 231 | struct tipc_subscriber *subscriber, |
267 | struct tipc_subscription **sub_p) { | 232 | struct tipc_subscription **sub_p) |
233 | { | ||
234 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
268 | struct tipc_subscription *sub; | 235 | struct tipc_subscription *sub; |
269 | int swap; | 236 | int swap; |
270 | 237 | ||
@@ -279,7 +246,7 @@ static int subscr_subscribe(struct tipc_subscr *s, | |||
279 | } | 246 | } |
280 | 247 | ||
281 | /* Refuse subscription if global limit exceeded */ | 248 | /* Refuse subscription if global limit exceeded */ |
282 | if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { | 249 | if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { |
283 | pr_warn("Subscription rejected, limit reached (%u)\n", | 250 | pr_warn("Subscription rejected, limit reached (%u)\n", |
284 | TIPC_MAX_SUBSCRIPTIONS); | 251 | TIPC_MAX_SUBSCRIPTIONS); |
285 | return -EINVAL; | 252 | return -EINVAL; |
@@ -293,10 +260,11 @@ static int subscr_subscribe(struct tipc_subscr *s, | |||
293 | } | 260 | } |
294 | 261 | ||
295 | /* Initialize subscription object */ | 262 | /* Initialize subscription object */ |
263 | sub->net = net; | ||
296 | sub->seq.type = htohl(s->seq.type, swap); | 264 | sub->seq.type = htohl(s->seq.type, swap); |
297 | sub->seq.lower = htohl(s->seq.lower, swap); | 265 | sub->seq.lower = htohl(s->seq.lower, swap); |
298 | sub->seq.upper = htohl(s->seq.upper, swap); | 266 | sub->seq.upper = htohl(s->seq.upper, swap); |
299 | sub->timeout = htohl(s->timeout, swap); | 267 | sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap)); |
300 | sub->filter = htohl(s->filter, swap); | 268 | sub->filter = htohl(s->filter, swap); |
301 | if ((!(sub->filter & TIPC_SUB_PORTS) == | 269 | if ((!(sub->filter & TIPC_SUB_PORTS) == |
302 | !(sub->filter & TIPC_SUB_SERVICE)) || | 270 | !(sub->filter & TIPC_SUB_SERVICE)) || |
@@ -309,11 +277,10 @@ static int subscr_subscribe(struct tipc_subscr *s, | |||
309 | sub->subscriber = subscriber; | 277 | sub->subscriber = subscriber; |
310 | sub->swap = swap; | 278 | sub->swap = swap; |
311 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); | 279 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); |
312 | atomic_inc(&subscription_count); | 280 | atomic_inc(&tn->subscription_count); |
313 | if (sub->timeout != TIPC_WAIT_FOREVER) { | 281 | if (sub->timeout != TIPC_WAIT_FOREVER) { |
314 | k_init_timer(&sub->timer, | 282 | setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub); |
315 | (Handler)subscr_timeout, (unsigned long)sub); | 283 | mod_timer(&sub->timer, jiffies + sub->timeout); |
316 | k_start_timer(&sub->timer, sub->timeout); | ||
317 | } | 284 | } |
318 | *sub_p = sub; | 285 | *sub_p = sub; |
319 | return 0; | 286 | return 0; |
@@ -326,24 +293,23 @@ static void subscr_conn_shutdown_event(int conid, void *usr_data) | |||
326 | } | 293 | } |
327 | 294 | ||
328 | /* Handle one request to create a new subscription for the subscriber */ | 295 | /* Handle one request to create a new subscription for the subscriber */ |
329 | static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr, | 296 | static void subscr_conn_msg_event(struct net *net, int conid, |
330 | void *usr_data, void *buf, size_t len) | 297 | struct sockaddr_tipc *addr, void *usr_data, |
298 | void *buf, size_t len) | ||
331 | { | 299 | { |
332 | struct tipc_subscriber *subscriber = usr_data; | 300 | struct tipc_subscriber *subscriber = usr_data; |
333 | struct tipc_subscription *sub = NULL; | 301 | struct tipc_subscription *sub = NULL; |
302 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
334 | 303 | ||
335 | spin_lock_bh(&subscriber->lock); | 304 | spin_lock_bh(&subscriber->lock); |
336 | if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) { | 305 | subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub); |
337 | spin_unlock_bh(&subscriber->lock); | ||
338 | subscr_terminate(subscriber); | ||
339 | return; | ||
340 | } | ||
341 | if (sub) | 306 | if (sub) |
342 | tipc_nametbl_subscribe(sub); | 307 | tipc_nametbl_subscribe(sub); |
308 | else | ||
309 | tipc_conn_terminate(tn->topsrv, subscriber->conid); | ||
343 | spin_unlock_bh(&subscriber->lock); | 310 | spin_unlock_bh(&subscriber->lock); |
344 | } | 311 | } |
345 | 312 | ||
346 | |||
347 | /* Handle one request to establish a new subscriber */ | 313 | /* Handle one request to establish a new subscriber */ |
348 | static void *subscr_named_msg_event(int conid) | 314 | static void *subscr_named_msg_event(int conid) |
349 | { | 315 | { |
@@ -362,12 +328,50 @@ static void *subscr_named_msg_event(int conid) | |||
362 | return (void *)subscriber; | 328 | return (void *)subscriber; |
363 | } | 329 | } |
364 | 330 | ||
365 | int tipc_subscr_start(void) | 331 | int tipc_subscr_start(struct net *net) |
366 | { | 332 | { |
367 | return tipc_server_start(&topsrv); | 333 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
334 | const char name[] = "topology_server"; | ||
335 | struct tipc_server *topsrv; | ||
336 | struct sockaddr_tipc *saddr; | ||
337 | |||
338 | saddr = kzalloc(sizeof(*saddr), GFP_ATOMIC); | ||
339 | if (!saddr) | ||
340 | return -ENOMEM; | ||
341 | saddr->family = AF_TIPC; | ||
342 | saddr->addrtype = TIPC_ADDR_NAMESEQ; | ||
343 | saddr->addr.nameseq.type = TIPC_TOP_SRV; | ||
344 | saddr->addr.nameseq.lower = TIPC_TOP_SRV; | ||
345 | saddr->addr.nameseq.upper = TIPC_TOP_SRV; | ||
346 | saddr->scope = TIPC_NODE_SCOPE; | ||
347 | |||
348 | topsrv = kzalloc(sizeof(*topsrv), GFP_ATOMIC); | ||
349 | if (!topsrv) { | ||
350 | kfree(saddr); | ||
351 | return -ENOMEM; | ||
352 | } | ||
353 | topsrv->net = net; | ||
354 | topsrv->saddr = saddr; | ||
355 | topsrv->imp = TIPC_CRITICAL_IMPORTANCE; | ||
356 | topsrv->type = SOCK_SEQPACKET; | ||
357 | topsrv->max_rcvbuf_size = sizeof(struct tipc_subscr); | ||
358 | topsrv->tipc_conn_recvmsg = subscr_conn_msg_event; | ||
359 | topsrv->tipc_conn_new = subscr_named_msg_event; | ||
360 | topsrv->tipc_conn_shutdown = subscr_conn_shutdown_event; | ||
361 | |||
362 | strncpy(topsrv->name, name, strlen(name) + 1); | ||
363 | tn->topsrv = topsrv; | ||
364 | atomic_set(&tn->subscription_count, 0); | ||
365 | |||
366 | return tipc_server_start(topsrv); | ||
368 | } | 367 | } |
369 | 368 | ||
370 | void tipc_subscr_stop(void) | 369 | void tipc_subscr_stop(struct net *net) |
371 | { | 370 | { |
372 | tipc_server_stop(&topsrv); | 371 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
372 | struct tipc_server *topsrv = tn->topsrv; | ||
373 | |||
374 | tipc_server_stop(topsrv); | ||
375 | kfree(topsrv->saddr); | ||
376 | kfree(topsrv); | ||
373 | } | 377 | } |
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index 393e417bee3f..33488bd9fe3c 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h | |||
@@ -39,6 +39,9 @@ | |||
39 | 39 | ||
40 | #include "server.h" | 40 | #include "server.h" |
41 | 41 | ||
42 | #define TIPC_MAX_SUBSCRIPTIONS 65535 | ||
43 | #define TIPC_MAX_PUBLICATIONS 65535 | ||
44 | |||
42 | struct tipc_subscription; | 45 | struct tipc_subscription; |
43 | struct tipc_subscriber; | 46 | struct tipc_subscriber; |
44 | 47 | ||
@@ -46,6 +49,7 @@ struct tipc_subscriber; | |||
46 | * struct tipc_subscription - TIPC network topology subscription object | 49 | * struct tipc_subscription - TIPC network topology subscription object |
47 | * @subscriber: pointer to its subscriber | 50 | * @subscriber: pointer to its subscriber |
48 | * @seq: name sequence associated with subscription | 51 | * @seq: name sequence associated with subscription |
52 | * @net: point to network namespace | ||
49 | * @timeout: duration of subscription (in ms) | 53 | * @timeout: duration of subscription (in ms) |
50 | * @filter: event filtering to be done for subscription | 54 | * @filter: event filtering to be done for subscription |
51 | * @timer: timer governing subscription duration (optional) | 55 | * @timer: timer governing subscription duration (optional) |
@@ -58,7 +62,8 @@ struct tipc_subscriber; | |||
58 | struct tipc_subscription { | 62 | struct tipc_subscription { |
59 | struct tipc_subscriber *subscriber; | 63 | struct tipc_subscriber *subscriber; |
60 | struct tipc_name_seq seq; | 64 | struct tipc_name_seq seq; |
61 | u32 timeout; | 65 | struct net *net; |
66 | unsigned long timeout; | ||
62 | u32 filter; | 67 | u32 filter; |
63 | struct timer_list timer; | 68 | struct timer_list timer; |
64 | struct list_head nameseq_list; | 69 | struct list_head nameseq_list; |
@@ -69,13 +74,10 @@ struct tipc_subscription { | |||
69 | 74 | ||
70 | int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower, | 75 | int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower, |
71 | u32 found_upper); | 76 | u32 found_upper); |
72 | |||
73 | void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, | 77 | void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, |
74 | u32 found_upper, u32 event, u32 port_ref, | 78 | u32 found_upper, u32 event, u32 port_ref, |
75 | u32 node, int must); | 79 | u32 node, int must); |
76 | 80 | int tipc_subscr_start(struct net *net); | |
77 | int tipc_subscr_start(void); | 81 | void tipc_subscr_stop(struct net *net); |
78 | |||
79 | void tipc_subscr_stop(void); | ||
80 | 82 | ||
81 | #endif | 83 | #endif |
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c new file mode 100644 index 000000000000..66deebc66aa1 --- /dev/null +++ b/net/tipc/udp_media.c | |||
@@ -0,0 +1,448 @@ | |||
1 | /* net/tipc/udp_media.c: IP bearer support for TIPC | ||
2 | * | ||
3 | * Copyright (c) 2015, Ericsson AB | ||
4 | * All rights reserved. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, this list of conditions and the following disclaimer. | ||
11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer in the | ||
13 | * documentation and/or other materials provided with the distribution. | ||
14 | * 3. Neither the names of the copyright holders nor the names of its | ||
15 | * contributors may be used to endorse or promote products derived from | ||
16 | * this software without specific prior written permission. | ||
17 | * | ||
18 | * Alternatively, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
20 | * Software Foundation. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
32 | * POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/socket.h> | ||
36 | #include <linux/ip.h> | ||
37 | #include <linux/udp.h> | ||
38 | #include <linux/inet.h> | ||
39 | #include <linux/inetdevice.h> | ||
40 | #include <linux/igmp.h> | ||
41 | #include <linux/kernel.h> | ||
42 | #include <linux/workqueue.h> | ||
43 | #include <linux/list.h> | ||
44 | #include <net/sock.h> | ||
45 | #include <net/ip.h> | ||
46 | #include <net/udp_tunnel.h> | ||
47 | #include <net/addrconf.h> | ||
48 | #include <linux/tipc_netlink.h> | ||
49 | #include "core.h" | ||
50 | #include "bearer.h" | ||
51 | |||
52 | /* IANA assigned UDP port */ | ||
53 | #define UDP_PORT_DEFAULT 6118 | ||
54 | |||
55 | static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = { | ||
56 | [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC}, | ||
57 | [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY, | ||
58 | .len = sizeof(struct sockaddr_storage)}, | ||
59 | [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY, | ||
60 | .len = sizeof(struct sockaddr_storage)}, | ||
61 | }; | ||
62 | |||
63 | /** | ||
64 | * struct udp_media_addr - IP/UDP addressing information | ||
65 | * | ||
66 | * This is the bearer level originating address used in neighbor discovery | ||
67 | * messages, and all fields should be in network byte order | ||
68 | */ | ||
69 | struct udp_media_addr { | ||
70 | __be16 proto; | ||
71 | __be16 udp_port; | ||
72 | union { | ||
73 | struct in_addr ipv4; | ||
74 | struct in6_addr ipv6; | ||
75 | }; | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * struct udp_bearer - ip/udp bearer data structure | ||
80 | * @bearer: associated generic tipc bearer | ||
81 | * @ubsock: bearer associated socket | ||
82 | * @ifindex: local address scope | ||
83 | * @work: used to schedule deferred work on a bearer | ||
84 | */ | ||
85 | struct udp_bearer { | ||
86 | struct tipc_bearer __rcu *bearer; | ||
87 | struct socket *ubsock; | ||
88 | u32 ifindex; | ||
89 | struct work_struct work; | ||
90 | }; | ||
91 | |||
92 | /* udp_media_addr_set - convert a ip/udp address to a TIPC media address */ | ||
93 | static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, | ||
94 | struct udp_media_addr *ua) | ||
95 | { | ||
96 | memset(addr, 0, sizeof(struct tipc_media_addr)); | ||
97 | addr->media_id = TIPC_MEDIA_TYPE_UDP; | ||
98 | memcpy(addr->value, ua, sizeof(struct udp_media_addr)); | ||
99 | if (ntohs(ua->proto) == ETH_P_IP) { | ||
100 | if (ipv4_is_multicast(ua->ipv4.s_addr)) | ||
101 | addr->broadcast = 1; | ||
102 | } else if (ntohs(ua->proto) == ETH_P_IPV6) { | ||
103 | if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST) | ||
104 | addr->broadcast = 1; | ||
105 | } else { | ||
106 | pr_err("Invalid UDP media address\n"); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /* tipc_udp_addr2str - convert ip/udp address to string */ | ||
111 | static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size) | ||
112 | { | ||
113 | struct udp_media_addr *ua = (struct udp_media_addr *)&a->value; | ||
114 | |||
115 | if (ntohs(ua->proto) == ETH_P_IP) | ||
116 | snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port)); | ||
117 | else if (ntohs(ua->proto) == ETH_P_IPV6) | ||
118 | snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port)); | ||
119 | else | ||
120 | pr_err("Invalid UDP media address\n"); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | /* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */ | ||
125 | static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a, | ||
126 | char *msg) | ||
127 | { | ||
128 | struct udp_media_addr *ua; | ||
129 | |||
130 | ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET); | ||
131 | if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP) | ||
132 | return -EINVAL; | ||
133 | tipc_udp_media_addr_set(a, ua); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | /* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */ | ||
138 | static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a) | ||
139 | { | ||
140 | memset(msg, 0, TIPC_MEDIA_INFO_SIZE); | ||
141 | msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP; | ||
142 | memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value, | ||
143 | sizeof(struct udp_media_addr)); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | /* tipc_send_msg - enqueue a send request */ | ||
148 | static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, | ||
149 | struct tipc_bearer *b, | ||
150 | struct tipc_media_addr *dest) | ||
151 | { | ||
152 | int ttl, err = 0; | ||
153 | struct udp_bearer *ub; | ||
154 | struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value; | ||
155 | struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; | ||
156 | struct sk_buff *clone; | ||
157 | struct rtable *rt; | ||
158 | |||
159 | clone = skb_clone(skb, GFP_ATOMIC); | ||
160 | skb_set_inner_protocol(clone, htons(ETH_P_TIPC)); | ||
161 | ub = rcu_dereference_rtnl(b->media_ptr); | ||
162 | if (!ub) { | ||
163 | err = -ENODEV; | ||
164 | goto tx_error; | ||
165 | } | ||
166 | if (dst->proto == htons(ETH_P_IP)) { | ||
167 | struct flowi4 fl = { | ||
168 | .daddr = dst->ipv4.s_addr, | ||
169 | .saddr = src->ipv4.s_addr, | ||
170 | .flowi4_mark = clone->mark, | ||
171 | .flowi4_proto = IPPROTO_UDP | ||
172 | }; | ||
173 | rt = ip_route_output_key(net, &fl); | ||
174 | if (IS_ERR(rt)) { | ||
175 | err = PTR_ERR(rt); | ||
176 | goto tx_error; | ||
177 | } | ||
178 | ttl = ip4_dst_hoplimit(&rt->dst); | ||
179 | err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone, | ||
180 | src->ipv4.s_addr, | ||
181 | dst->ipv4.s_addr, 0, ttl, 0, | ||
182 | src->udp_port, dst->udp_port, | ||
183 | false, true); | ||
184 | if (err < 0) { | ||
185 | ip_rt_put(rt); | ||
186 | goto tx_error; | ||
187 | } | ||
188 | #if IS_ENABLED(CONFIG_IPV6) | ||
189 | } else { | ||
190 | struct dst_entry *ndst; | ||
191 | struct flowi6 fl6 = { | ||
192 | .flowi6_oif = ub->ifindex, | ||
193 | .daddr = dst->ipv6, | ||
194 | .saddr = src->ipv6, | ||
195 | .flowi6_proto = IPPROTO_UDP | ||
196 | }; | ||
197 | err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6); | ||
198 | if (err) | ||
199 | goto tx_error; | ||
200 | ttl = ip6_dst_hoplimit(ndst); | ||
201 | err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone, | ||
202 | ndst->dev, &src->ipv6, | ||
203 | &dst->ipv6, 0, ttl, src->udp_port, | ||
204 | dst->udp_port, false); | ||
205 | #endif | ||
206 | } | ||
207 | return err; | ||
208 | |||
209 | tx_error: | ||
210 | kfree_skb(clone); | ||
211 | return err; | ||
212 | } | ||
213 | |||
214 | /* tipc_udp_recv - read data from bearer socket */ | ||
215 | static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) | ||
216 | { | ||
217 | struct udp_bearer *ub; | ||
218 | struct tipc_bearer *b; | ||
219 | |||
220 | ub = rcu_dereference_sk_user_data(sk); | ||
221 | if (!ub) { | ||
222 | pr_err_ratelimited("Failed to get UDP bearer reference"); | ||
223 | kfree_skb(skb); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | skb_pull(skb, sizeof(struct udphdr)); | ||
228 | rcu_read_lock(); | ||
229 | b = rcu_dereference_rtnl(ub->bearer); | ||
230 | |||
231 | if (b) { | ||
232 | tipc_rcv(sock_net(sk), skb, b); | ||
233 | rcu_read_unlock(); | ||
234 | return 0; | ||
235 | } | ||
236 | rcu_read_unlock(); | ||
237 | kfree_skb(skb); | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) | ||
242 | { | ||
243 | int err = 0; | ||
244 | struct ip_mreqn mreqn; | ||
245 | struct sock *sk = ub->ubsock->sk; | ||
246 | |||
247 | if (ntohs(remote->proto) == ETH_P_IP) { | ||
248 | if (!ipv4_is_multicast(remote->ipv4.s_addr)) | ||
249 | return 0; | ||
250 | mreqn.imr_multiaddr = remote->ipv4; | ||
251 | mreqn.imr_ifindex = ub->ifindex; | ||
252 | err = ip_mc_join_group(sk, &mreqn); | ||
253 | #if IS_ENABLED(CONFIG_IPV6) | ||
254 | } else { | ||
255 | if (!ipv6_addr_is_multicast(&remote->ipv6)) | ||
256 | return 0; | ||
257 | err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex, | ||
258 | &remote->ipv6); | ||
259 | #endif | ||
260 | } | ||
261 | return err; | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * parse_options - build local/remote addresses from configuration | ||
266 | * @attrs: netlink config data | ||
267 | * @ub: UDP bearer instance | ||
268 | * @local: local bearer IP address/port | ||
269 | * @remote: peer or multicast IP/port | ||
270 | */ | ||
271 | static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub, | ||
272 | struct udp_media_addr *local, | ||
273 | struct udp_media_addr *remote) | ||
274 | { | ||
275 | struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; | ||
276 | struct sockaddr_storage *sa_local, *sa_remote; | ||
277 | |||
278 | if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) | ||
279 | goto err; | ||
280 | if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, | ||
281 | attrs[TIPC_NLA_BEARER_UDP_OPTS], | ||
282 | tipc_nl_udp_policy)) | ||
283 | goto err; | ||
284 | if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) { | ||
285 | sa_local = nla_data(opts[TIPC_NLA_UDP_LOCAL]); | ||
286 | sa_remote = nla_data(opts[TIPC_NLA_UDP_REMOTE]); | ||
287 | } else { | ||
288 | err: | ||
289 | pr_err("Invalid UDP bearer configuration"); | ||
290 | return -EINVAL; | ||
291 | } | ||
292 | if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET) { | ||
293 | struct sockaddr_in *ip4; | ||
294 | |||
295 | ip4 = (struct sockaddr_in *)sa_local; | ||
296 | local->proto = htons(ETH_P_IP); | ||
297 | local->udp_port = ip4->sin_port; | ||
298 | local->ipv4.s_addr = ip4->sin_addr.s_addr; | ||
299 | |||
300 | ip4 = (struct sockaddr_in *)sa_remote; | ||
301 | remote->proto = htons(ETH_P_IP); | ||
302 | remote->udp_port = ip4->sin_port; | ||
303 | remote->ipv4.s_addr = ip4->sin_addr.s_addr; | ||
304 | return 0; | ||
305 | |||
306 | #if IS_ENABLED(CONFIG_IPV6) | ||
307 | } else if ((sa_local->ss_family & sa_remote->ss_family) == AF_INET6) { | ||
308 | struct sockaddr_in6 *ip6; | ||
309 | |||
310 | ip6 = (struct sockaddr_in6 *)sa_local; | ||
311 | local->proto = htons(ETH_P_IPV6); | ||
312 | local->udp_port = ip6->sin6_port; | ||
313 | local->ipv6 = ip6->sin6_addr; | ||
314 | ub->ifindex = ip6->sin6_scope_id; | ||
315 | |||
316 | ip6 = (struct sockaddr_in6 *)sa_remote; | ||
317 | remote->proto = htons(ETH_P_IPV6); | ||
318 | remote->udp_port = ip6->sin6_port; | ||
319 | remote->ipv6 = ip6->sin6_addr; | ||
320 | return 0; | ||
321 | #endif | ||
322 | } | ||
323 | return -EADDRNOTAVAIL; | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * tipc_udp_enable - callback to create a new udp bearer instance | ||
328 | * @net: network namespace | ||
329 | * @b: pointer to generic tipc_bearer | ||
330 | * @attrs: netlink bearer configuration | ||
331 | * | ||
332 | * validate the bearer parameters and initialize the udp bearer | ||
333 | * rtnl_lock should be held | ||
334 | */ | ||
335 | static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, | ||
336 | struct nlattr *attrs[]) | ||
337 | { | ||
338 | int err = -EINVAL; | ||
339 | struct udp_bearer *ub; | ||
340 | struct udp_media_addr *remote; | ||
341 | struct udp_media_addr local = {0}; | ||
342 | struct udp_port_cfg udp_conf = {0}; | ||
343 | struct udp_tunnel_sock_cfg tuncfg = {NULL}; | ||
344 | |||
345 | ub = kzalloc(sizeof(*ub), GFP_ATOMIC); | ||
346 | if (!ub) | ||
347 | return -ENOMEM; | ||
348 | |||
349 | remote = (struct udp_media_addr *)&b->bcast_addr.value; | ||
350 | memset(remote, 0, sizeof(struct udp_media_addr)); | ||
351 | err = parse_options(attrs, ub, &local, remote); | ||
352 | if (err) | ||
353 | goto err; | ||
354 | |||
355 | b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP; | ||
356 | b->bcast_addr.broadcast = 1; | ||
357 | rcu_assign_pointer(b->media_ptr, ub); | ||
358 | rcu_assign_pointer(ub->bearer, b); | ||
359 | tipc_udp_media_addr_set(&b->addr, &local); | ||
360 | if (local.proto == htons(ETH_P_IP)) { | ||
361 | struct net_device *dev; | ||
362 | |||
363 | dev = __ip_dev_find(net, local.ipv4.s_addr, false); | ||
364 | if (!dev) { | ||
365 | err = -ENODEV; | ||
366 | goto err; | ||
367 | } | ||
368 | udp_conf.family = AF_INET; | ||
369 | udp_conf.local_ip.s_addr = htonl(INADDR_ANY); | ||
370 | udp_conf.use_udp_checksums = false; | ||
371 | ub->ifindex = dev->ifindex; | ||
372 | b->mtu = dev->mtu - sizeof(struct iphdr) | ||
373 | - sizeof(struct udphdr); | ||
374 | #if IS_ENABLED(CONFIG_IPV6) | ||
375 | } else if (local.proto == htons(ETH_P_IPV6)) { | ||
376 | udp_conf.family = AF_INET6; | ||
377 | udp_conf.use_udp6_tx_checksums = true; | ||
378 | udp_conf.use_udp6_rx_checksums = true; | ||
379 | udp_conf.local_ip6 = in6addr_any; | ||
380 | b->mtu = 1280; | ||
381 | #endif | ||
382 | } else { | ||
383 | err = -EAFNOSUPPORT; | ||
384 | goto err; | ||
385 | } | ||
386 | udp_conf.local_udp_port = local.udp_port; | ||
387 | err = udp_sock_create(net, &udp_conf, &ub->ubsock); | ||
388 | if (err) | ||
389 | goto err; | ||
390 | tuncfg.sk_user_data = ub; | ||
391 | tuncfg.encap_type = 1; | ||
392 | tuncfg.encap_rcv = tipc_udp_recv; | ||
393 | tuncfg.encap_destroy = NULL; | ||
394 | setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); | ||
395 | |||
396 | if (enable_mcast(ub, remote)) | ||
397 | goto err; | ||
398 | return 0; | ||
399 | err: | ||
400 | kfree(ub); | ||
401 | return err; | ||
402 | } | ||
403 | |||
404 | /* cleanup_bearer - break the socket/bearer association */ | ||
405 | static void cleanup_bearer(struct work_struct *work) | ||
406 | { | ||
407 | struct udp_bearer *ub = container_of(work, struct udp_bearer, work); | ||
408 | |||
409 | if (ub->ubsock) | ||
410 | udp_tunnel_sock_release(ub->ubsock); | ||
411 | synchronize_net(); | ||
412 | kfree(ub); | ||
413 | } | ||
414 | |||
415 | /* tipc_udp_disable - detach bearer from socket */ | ||
416 | static void tipc_udp_disable(struct tipc_bearer *b) | ||
417 | { | ||
418 | struct udp_bearer *ub; | ||
419 | |||
420 | ub = rcu_dereference_rtnl(b->media_ptr); | ||
421 | if (!ub) { | ||
422 | pr_err("UDP bearer instance not found\n"); | ||
423 | return; | ||
424 | } | ||
425 | if (ub->ubsock) | ||
426 | sock_set_flag(ub->ubsock->sk, SOCK_DEAD); | ||
427 | RCU_INIT_POINTER(b->media_ptr, NULL); | ||
428 | RCU_INIT_POINTER(ub->bearer, NULL); | ||
429 | |||
430 | /* sock_release need to be done outside of rtnl lock */ | ||
431 | INIT_WORK(&ub->work, cleanup_bearer); | ||
432 | schedule_work(&ub->work); | ||
433 | } | ||
434 | |||
435 | struct tipc_media udp_media_info = { | ||
436 | .send_msg = tipc_udp_send_msg, | ||
437 | .enable_media = tipc_udp_enable, | ||
438 | .disable_media = tipc_udp_disable, | ||
439 | .addr2str = tipc_udp_addr2str, | ||
440 | .addr2msg = tipc_udp_addr2msg, | ||
441 | .msg2addr = tipc_udp_msg2addr, | ||
442 | .priority = TIPC_DEF_LINK_PRI, | ||
443 | .tolerance = TIPC_DEF_LINK_TOL, | ||
444 | .window = TIPC_DEF_LINK_WIN, | ||
445 | .type_id = TIPC_MEDIA_TYPE_UDP, | ||
446 | .hwaddr_len = 0, | ||
447 | .name = "udp" | ||
448 | }; | ||