diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /net/tipc/bcast.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r-- | net/tipc/bcast.c | 692 |
1 files changed, 357 insertions, 335 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 54f89f90ac3..759b318b5ff 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -39,14 +39,13 @@ | |||
39 | #include "link.h" | 39 | #include "link.h" |
40 | #include "port.h" | 40 | #include "port.h" |
41 | #include "bcast.h" | 41 | #include "bcast.h" |
42 | #include "name_distr.h" | ||
43 | 42 | ||
44 | #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ | 43 | #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ |
45 | 44 | ||
46 | #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ | 45 | #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ |
47 | 46 | ||
48 | /** | 47 | /** |
49 | * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link | 48 | * struct bcbearer_pair - a pair of bearers used by broadcast link |
50 | * @primary: pointer to primary bearer | 49 | * @primary: pointer to primary bearer |
51 | * @secondary: pointer to secondary bearer | 50 | * @secondary: pointer to secondary bearer |
52 | * | 51 | * |
@@ -54,13 +53,13 @@ | |||
54 | * to be paired. | 53 | * to be paired. |
55 | */ | 54 | */ |
56 | 55 | ||
57 | struct tipc_bcbearer_pair { | 56 | struct bcbearer_pair { |
58 | struct tipc_bearer *primary; | 57 | struct tipc_bearer *primary; |
59 | struct tipc_bearer *secondary; | 58 | struct tipc_bearer *secondary; |
60 | }; | 59 | }; |
61 | 60 | ||
62 | /** | 61 | /** |
63 | * struct tipc_bcbearer - bearer used by broadcast link | 62 | * struct bcbearer - bearer used by broadcast link |
64 | * @bearer: (non-standard) broadcast bearer structure | 63 | * @bearer: (non-standard) broadcast bearer structure |
65 | * @media: (non-standard) broadcast media structure | 64 | * @media: (non-standard) broadcast media structure |
66 | * @bpairs: array of bearer pairs | 65 | * @bpairs: array of bearer pairs |
@@ -73,46 +72,51 @@ struct tipc_bcbearer_pair { | |||
73 | * large local variables within multicast routines. Concurrent access is | 72 | * large local variables within multicast routines. Concurrent access is |
74 | * prevented through use of the spinlock "bc_lock". | 73 | * prevented through use of the spinlock "bc_lock". |
75 | */ | 74 | */ |
76 | struct tipc_bcbearer { | 75 | |
76 | struct bcbearer { | ||
77 | struct tipc_bearer bearer; | 77 | struct tipc_bearer bearer; |
78 | struct tipc_media media; | 78 | struct media media; |
79 | struct tipc_bcbearer_pair bpairs[MAX_BEARERS]; | 79 | struct bcbearer_pair bpairs[MAX_BEARERS]; |
80 | struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; | 80 | struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; |
81 | struct tipc_node_map remains; | 81 | struct tipc_node_map remains; |
82 | struct tipc_node_map remains_new; | 82 | struct tipc_node_map remains_new; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | /** | 85 | /** |
86 | * struct tipc_bclink - link used for broadcast messages | 86 | * struct bclink - link used for broadcast messages |
87 | * @link: (non-standard) broadcast link structure | 87 | * @link: (non-standard) broadcast link structure |
88 | * @node: (non-standard) node structure representing b'cast link's peer node | 88 | * @node: (non-standard) node structure representing b'cast link's peer node |
89 | * @bcast_nodes: map of broadcast-capable nodes | ||
90 | * @retransmit_to: node that most recently requested a retransmit | 89 | * @retransmit_to: node that most recently requested a retransmit |
91 | * | 90 | * |
92 | * Handles sequence numbering, fragmentation, bundling, etc. | 91 | * Handles sequence numbering, fragmentation, bundling, etc. |
93 | */ | 92 | */ |
94 | struct tipc_bclink { | 93 | |
95 | struct tipc_link link; | 94 | struct bclink { |
95 | struct link link; | ||
96 | struct tipc_node node; | 96 | struct tipc_node node; |
97 | struct tipc_node_map bcast_nodes; | ||
98 | struct tipc_node *retransmit_to; | 97 | struct tipc_node *retransmit_to; |
99 | }; | 98 | }; |
100 | 99 | ||
101 | static struct tipc_bcbearer bcast_bearer; | ||
102 | static struct tipc_bclink bcast_link; | ||
103 | |||
104 | static struct tipc_bcbearer *bcbearer = &bcast_bearer; | ||
105 | static struct tipc_bclink *bclink = &bcast_link; | ||
106 | static struct tipc_link *bcl = &bcast_link.link; | ||
107 | 100 | ||
101 | static struct bcbearer *bcbearer; | ||
102 | static struct bclink *bclink; | ||
103 | static struct link *bcl; | ||
108 | static DEFINE_SPINLOCK(bc_lock); | 104 | static DEFINE_SPINLOCK(bc_lock); |
109 | 105 | ||
106 | /* broadcast-capable node map */ | ||
107 | struct tipc_node_map tipc_bcast_nmap; | ||
108 | |||
110 | const char tipc_bclink_name[] = "broadcast-link"; | 109 | const char tipc_bclink_name[] = "broadcast-link"; |
111 | 110 | ||
112 | static void tipc_nmap_diff(struct tipc_node_map *nm_a, | 111 | static void tipc_nmap_diff(struct tipc_node_map *nm_a, |
113 | struct tipc_node_map *nm_b, | 112 | struct tipc_node_map *nm_b, |
114 | struct tipc_node_map *nm_diff); | 113 | struct tipc_node_map *nm_diff); |
115 | 114 | ||
115 | static u32 buf_seqno(struct sk_buff *buf) | ||
116 | { | ||
117 | return msg_seqno(buf_msg(buf)); | ||
118 | } | ||
119 | |||
116 | static u32 bcbuf_acks(struct sk_buff *buf) | 120 | static u32 bcbuf_acks(struct sk_buff *buf) |
117 | { | 121 | { |
118 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; | 122 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; |
@@ -128,19 +132,6 @@ static void bcbuf_decr_acks(struct sk_buff *buf) | |||
128 | bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); | 132 | bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); |
129 | } | 133 | } |
130 | 134 | ||
131 | void tipc_bclink_add_node(u32 addr) | ||
132 | { | ||
133 | spin_lock_bh(&bc_lock); | ||
134 | tipc_nmap_add(&bclink->bcast_nodes, addr); | ||
135 | spin_unlock_bh(&bc_lock); | ||
136 | } | ||
137 | |||
138 | void tipc_bclink_remove_node(u32 addr) | ||
139 | { | ||
140 | spin_lock_bh(&bc_lock); | ||
141 | tipc_nmap_remove(&bclink->bcast_nodes, addr); | ||
142 | spin_unlock_bh(&bc_lock); | ||
143 | } | ||
144 | 135 | ||
145 | static void bclink_set_last_sent(void) | 136 | static void bclink_set_last_sent(void) |
146 | { | 137 | { |
@@ -155,10 +146,35 @@ u32 tipc_bclink_get_last_sent(void) | |||
155 | return bcl->fsm_msg_cnt; | 146 | return bcl->fsm_msg_cnt; |
156 | } | 147 | } |
157 | 148 | ||
158 | static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) | 149 | /** |
150 | * bclink_set_gap - set gap according to contents of current deferred pkt queue | ||
151 | * | ||
152 | * Called with 'node' locked, bc_lock unlocked | ||
153 | */ | ||
154 | |||
155 | static void bclink_set_gap(struct tipc_node *n_ptr) | ||
159 | { | 156 | { |
160 | node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ? | 157 | struct sk_buff *buf = n_ptr->bclink.deferred_head; |
161 | seqno : node->bclink.last_sent; | 158 | |
159 | n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = | ||
160 | mod(n_ptr->bclink.last_in); | ||
161 | if (unlikely(buf != NULL)) | ||
162 | n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment | ||
167 | * | ||
168 | * This mechanism endeavours to prevent all nodes in network from trying | ||
169 | * to ACK or NACK at the same time. | ||
170 | * | ||
171 | * Note: TIPC uses a different trigger to distribute ACKs than it does to | ||
172 | * distribute NACKs, but tries to use the same spacing (divide by 16). | ||
173 | */ | ||
174 | |||
175 | static int bclink_ack_allowed(u32 n) | ||
176 | { | ||
177 | return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; | ||
162 | } | 178 | } |
163 | 179 | ||
164 | 180 | ||
@@ -167,6 +183,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) | |||
167 | * | 183 | * |
168 | * Called with bc_lock locked | 184 | * Called with bc_lock locked |
169 | */ | 185 | */ |
186 | |||
170 | struct tipc_node *tipc_bclink_retransmit_to(void) | 187 | struct tipc_node *tipc_bclink_retransmit_to(void) |
171 | { | 188 | { |
172 | return bclink->retransmit_to; | 189 | return bclink->retransmit_to; |
@@ -179,6 +196,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void) | |||
179 | * | 196 | * |
180 | * Called with bc_lock locked | 197 | * Called with bc_lock locked |
181 | */ | 198 | */ |
199 | |||
182 | static void bclink_retransmit_pkt(u32 after, u32 to) | 200 | static void bclink_retransmit_pkt(u32 after, u32 to) |
183 | { | 201 | { |
184 | struct sk_buff *buf; | 202 | struct sk_buff *buf; |
@@ -196,42 +214,21 @@ static void bclink_retransmit_pkt(u32 after, u32 to) | |||
196 | * | 214 | * |
197 | * Node is locked, bc_lock unlocked. | 215 | * Node is locked, bc_lock unlocked. |
198 | */ | 216 | */ |
217 | |||
199 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | 218 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) |
200 | { | 219 | { |
201 | struct sk_buff *crs; | 220 | struct sk_buff *crs; |
202 | struct sk_buff *next; | 221 | struct sk_buff *next; |
203 | unsigned int released = 0; | 222 | unsigned int released = 0; |
204 | 223 | ||
205 | spin_lock_bh(&bc_lock); | 224 | if (less_eq(acked, n_ptr->bclink.acked)) |
206 | 225 | return; | |
207 | /* Bail out if tx queue is empty (no clean up is required) */ | ||
208 | crs = bcl->first_out; | ||
209 | if (!crs) | ||
210 | goto exit; | ||
211 | 226 | ||
212 | /* Determine which messages need to be acknowledged */ | 227 | spin_lock_bh(&bc_lock); |
213 | if (acked == INVALID_LINK_SEQ) { | ||
214 | /* | ||
215 | * Contact with specified node has been lost, so need to | ||
216 | * acknowledge sent messages only (if other nodes still exist) | ||
217 | * or both sent and unsent messages (otherwise) | ||
218 | */ | ||
219 | if (bclink->bcast_nodes.count) | ||
220 | acked = bcl->fsm_msg_cnt; | ||
221 | else | ||
222 | acked = bcl->next_out_no; | ||
223 | } else { | ||
224 | /* | ||
225 | * Bail out if specified sequence number does not correspond | ||
226 | * to a message that has been sent and not yet acknowledged | ||
227 | */ | ||
228 | if (less(acked, buf_seqno(crs)) || | ||
229 | less(bcl->fsm_msg_cnt, acked) || | ||
230 | less_eq(acked, n_ptr->bclink.acked)) | ||
231 | goto exit; | ||
232 | } | ||
233 | 228 | ||
234 | /* Skip over packets that node has previously acknowledged */ | 229 | /* Skip over packets that node has previously acknowledged */ |
230 | |||
231 | crs = bcl->first_out; | ||
235 | while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) | 232 | while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) |
236 | crs = crs->next; | 233 | crs = crs->next; |
237 | 234 | ||
@@ -239,19 +236,11 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | |||
239 | 236 | ||
240 | while (crs && less_eq(buf_seqno(crs), acked)) { | 237 | while (crs && less_eq(buf_seqno(crs), acked)) { |
241 | next = crs->next; | 238 | next = crs->next; |
242 | 239 | bcbuf_decr_acks(crs); | |
243 | if (crs != bcl->next_out) | ||
244 | bcbuf_decr_acks(crs); | ||
245 | else { | ||
246 | bcbuf_set_acks(crs, 0); | ||
247 | bcl->next_out = next; | ||
248 | bclink_set_last_sent(); | ||
249 | } | ||
250 | |||
251 | if (bcbuf_acks(crs) == 0) { | 240 | if (bcbuf_acks(crs) == 0) { |
252 | bcl->first_out = next; | 241 | bcl->first_out = next; |
253 | bcl->out_queue_size--; | 242 | bcl->out_queue_size--; |
254 | kfree_skb(crs); | 243 | buf_discard(crs); |
255 | released = 1; | 244 | released = 1; |
256 | } | 245 | } |
257 | crs = next; | 246 | crs = next; |
@@ -266,144 +255,166 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) | |||
266 | } | 255 | } |
267 | if (unlikely(released && !list_empty(&bcl->waiting_ports))) | 256 | if (unlikely(released && !list_empty(&bcl->waiting_ports))) |
268 | tipc_link_wakeup_ports(bcl, 0); | 257 | tipc_link_wakeup_ports(bcl, 0); |
269 | exit: | ||
270 | spin_unlock_bh(&bc_lock); | 258 | spin_unlock_bh(&bc_lock); |
271 | } | 259 | } |
272 | 260 | ||
273 | /** | 261 | /** |
274 | * tipc_bclink_update_link_state - update broadcast link state | 262 | * bclink_send_ack - unicast an ACK msg |
275 | * | 263 | * |
276 | * tipc_net_lock and node lock set | 264 | * tipc_net_lock and node lock set |
277 | */ | 265 | */ |
278 | void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) | ||
279 | { | ||
280 | struct sk_buff *buf; | ||
281 | |||
282 | /* Ignore "stale" link state info */ | ||
283 | |||
284 | if (less_eq(last_sent, n_ptr->bclink.last_in)) | ||
285 | return; | ||
286 | |||
287 | /* Update link synchronization state; quit if in sync */ | ||
288 | 266 | ||
289 | bclink_update_last_sent(n_ptr, last_sent); | 267 | static void bclink_send_ack(struct tipc_node *n_ptr) |
290 | 268 | { | |
291 | if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) | 269 | struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; |
292 | return; | ||
293 | 270 | ||
294 | /* Update out-of-sync state; quit if loss is still unconfirmed */ | 271 | if (l_ptr != NULL) |
272 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | ||
273 | } | ||
295 | 274 | ||
296 | if ((++n_ptr->bclink.oos_state) == 1) { | 275 | /** |
297 | if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) | 276 | * bclink_send_nack- broadcast a NACK msg |
298 | return; | 277 | * |
299 | n_ptr->bclink.oos_state++; | 278 | * tipc_net_lock and node lock set |
300 | } | 279 | */ |
301 | 280 | ||
302 | /* Don't NACK if one has been recently sent (or seen) */ | 281 | static void bclink_send_nack(struct tipc_node *n_ptr) |
282 | { | ||
283 | struct sk_buff *buf; | ||
284 | struct tipc_msg *msg; | ||
303 | 285 | ||
304 | if (n_ptr->bclink.oos_state & 0x1) | 286 | if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) |
305 | return; | 287 | return; |
306 | 288 | ||
307 | /* Send NACK */ | ||
308 | |||
309 | buf = tipc_buf_acquire(INT_H_SIZE); | 289 | buf = tipc_buf_acquire(INT_H_SIZE); |
310 | if (buf) { | 290 | if (buf) { |
311 | struct tipc_msg *msg = buf_msg(buf); | 291 | msg = buf_msg(buf); |
312 | |||
313 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, | 292 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, |
314 | INT_H_SIZE, n_ptr->addr); | 293 | INT_H_SIZE, n_ptr->addr); |
315 | msg_set_non_seq(msg, 1); | 294 | msg_set_non_seq(msg, 1); |
316 | msg_set_mc_netid(msg, tipc_net_id); | 295 | msg_set_mc_netid(msg, tipc_net_id); |
317 | msg_set_bcast_ack(msg, n_ptr->bclink.last_in); | 296 | msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); |
318 | msg_set_bcgap_after(msg, n_ptr->bclink.last_in); | 297 | msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); |
319 | msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head | 298 | msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); |
320 | ? buf_seqno(n_ptr->bclink.deferred_head) - 1 | 299 | msg_set_bcast_tag(msg, tipc_own_tag); |
321 | : n_ptr->bclink.last_sent); | 300 | |
322 | 301 | if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { | |
323 | spin_lock_bh(&bc_lock); | 302 | bcl->stats.sent_nacks++; |
324 | tipc_bearer_send(&bcbearer->bearer, buf, NULL); | 303 | buf_discard(buf); |
325 | bcl->stats.sent_nacks++; | 304 | } else { |
326 | spin_unlock_bh(&bc_lock); | 305 | tipc_bearer_schedule(bcl->b_ptr, bcl); |
327 | kfree_skb(buf); | 306 | bcl->proto_msg_queue = buf; |
328 | 307 | bcl->stats.bearer_congs++; | |
329 | n_ptr->bclink.oos_state++; | 308 | } |
309 | |||
310 | /* | ||
311 | * Ensure we doesn't send another NACK msg to the node | ||
312 | * until 16 more deferred messages arrive from it | ||
313 | * (i.e. helps prevent all nodes from NACK'ing at same time) | ||
314 | */ | ||
315 | |||
316 | n_ptr->bclink.nack_sync = tipc_own_tag; | ||
330 | } | 317 | } |
331 | } | 318 | } |
332 | 319 | ||
333 | /** | 320 | /** |
334 | * bclink_peek_nack - monitor retransmission requests sent by other nodes | 321 | * tipc_bclink_check_gap - send a NACK if a sequence gap exists |
335 | * | 322 | * |
336 | * Delay any upcoming NACK by this node if another node has already | 323 | * tipc_net_lock and node lock set |
337 | * requested the first message this node is going to ask for. | 324 | */ |
325 | |||
326 | void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent) | ||
327 | { | ||
328 | if (!n_ptr->bclink.supported || | ||
329 | less_eq(last_sent, mod(n_ptr->bclink.last_in))) | ||
330 | return; | ||
331 | |||
332 | bclink_set_gap(n_ptr); | ||
333 | if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to) | ||
334 | n_ptr->bclink.gap_to = last_sent; | ||
335 | bclink_send_nack(n_ptr); | ||
336 | } | ||
337 | |||
338 | /** | ||
339 | * tipc_bclink_peek_nack - process a NACK msg meant for another node | ||
338 | * | 340 | * |
339 | * Only tipc_net_lock set. | 341 | * Only tipc_net_lock set. |
340 | */ | 342 | */ |
341 | static void bclink_peek_nack(struct tipc_msg *msg) | 343 | |
344 | static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) | ||
342 | { | 345 | { |
343 | struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); | 346 | struct tipc_node *n_ptr = tipc_node_find(dest); |
347 | u32 my_after, my_to; | ||
344 | 348 | ||
345 | if (unlikely(!n_ptr)) | 349 | if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) |
346 | return; | 350 | return; |
347 | |||
348 | tipc_node_lock(n_ptr); | 351 | tipc_node_lock(n_ptr); |
352 | /* | ||
353 | * Modify gap to suppress unnecessary NACKs from this node | ||
354 | */ | ||
355 | my_after = n_ptr->bclink.gap_after; | ||
356 | my_to = n_ptr->bclink.gap_to; | ||
357 | |||
358 | if (less_eq(gap_after, my_after)) { | ||
359 | if (less(my_after, gap_to) && less(gap_to, my_to)) | ||
360 | n_ptr->bclink.gap_after = gap_to; | ||
361 | else if (less_eq(my_to, gap_to)) | ||
362 | n_ptr->bclink.gap_to = n_ptr->bclink.gap_after; | ||
363 | } else if (less_eq(gap_after, my_to)) { | ||
364 | if (less_eq(my_to, gap_to)) | ||
365 | n_ptr->bclink.gap_to = gap_after; | ||
366 | } else { | ||
367 | /* | ||
368 | * Expand gap if missing bufs not in deferred queue: | ||
369 | */ | ||
370 | struct sk_buff *buf = n_ptr->bclink.deferred_head; | ||
371 | u32 prev = n_ptr->bclink.gap_to; | ||
349 | 372 | ||
350 | if (n_ptr->bclink.recv_permitted && | 373 | for (; buf; buf = buf->next) { |
351 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && | 374 | u32 seqno = buf_seqno(buf); |
352 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) | ||
353 | n_ptr->bclink.oos_state = 2; | ||
354 | 375 | ||
376 | if (mod(seqno - prev) != 1) { | ||
377 | buf = NULL; | ||
378 | break; | ||
379 | } | ||
380 | if (seqno == gap_after) | ||
381 | break; | ||
382 | prev = seqno; | ||
383 | } | ||
384 | if (buf == NULL) | ||
385 | n_ptr->bclink.gap_to = gap_after; | ||
386 | } | ||
387 | /* | ||
388 | * Some nodes may send a complementary NACK now: | ||
389 | */ | ||
390 | if (bclink_ack_allowed(sender_tag + 1)) { | ||
391 | if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { | ||
392 | bclink_send_nack(n_ptr); | ||
393 | bclink_set_gap(n_ptr); | ||
394 | } | ||
395 | } | ||
355 | tipc_node_unlock(n_ptr); | 396 | tipc_node_unlock(n_ptr); |
356 | } | 397 | } |
357 | 398 | ||
358 | /* | 399 | /** |
359 | * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster | 400 | * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster |
360 | */ | 401 | */ |
402 | |||
361 | int tipc_bclink_send_msg(struct sk_buff *buf) | 403 | int tipc_bclink_send_msg(struct sk_buff *buf) |
362 | { | 404 | { |
363 | int res; | 405 | int res; |
364 | 406 | ||
365 | spin_lock_bh(&bc_lock); | 407 | spin_lock_bh(&bc_lock); |
366 | 408 | ||
367 | if (!bclink->bcast_nodes.count) { | ||
368 | res = msg_data_sz(buf_msg(buf)); | ||
369 | kfree_skb(buf); | ||
370 | goto exit; | ||
371 | } | ||
372 | |||
373 | res = tipc_link_send_buf(bcl, buf); | 409 | res = tipc_link_send_buf(bcl, buf); |
374 | if (likely(res >= 0)) { | 410 | if (likely(res > 0)) |
375 | bclink_set_last_sent(); | 411 | bclink_set_last_sent(); |
376 | bcl->stats.queue_sz_counts++; | ||
377 | bcl->stats.accu_queue_sz += bcl->out_queue_size; | ||
378 | } | ||
379 | exit: | ||
380 | spin_unlock_bh(&bc_lock); | ||
381 | return res; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet | ||
386 | * | ||
387 | * Called with both sending node's lock and bc_lock taken. | ||
388 | */ | ||
389 | static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) | ||
390 | { | ||
391 | bclink_update_last_sent(node, seqno); | ||
392 | node->bclink.last_in = seqno; | ||
393 | node->bclink.oos_state = 0; | ||
394 | bcl->stats.recv_info++; | ||
395 | 412 | ||
396 | /* | 413 | bcl->stats.queue_sz_counts++; |
397 | * Unicast an ACK periodically, ensuring that | 414 | bcl->stats.accu_queue_sz += bcl->out_queue_size; |
398 | * all nodes in the cluster don't ACK at the same time | ||
399 | */ | ||
400 | 415 | ||
401 | if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { | 416 | spin_unlock_bh(&bc_lock); |
402 | tipc_link_send_proto_msg( | 417 | return res; |
403 | node->active_links[node->addr & 1], | ||
404 | STATE_MSG, 0, 0, 0, 0, 0); | ||
405 | bcl->stats.sent_acks++; | ||
406 | } | ||
407 | } | 418 | } |
408 | 419 | ||
409 | /** | 420 | /** |
@@ -411,33 +422,24 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) | |||
411 | * | 422 | * |
412 | * tipc_net_lock is read_locked, no other locks set | 423 | * tipc_net_lock is read_locked, no other locks set |
413 | */ | 424 | */ |
425 | |||
414 | void tipc_bclink_recv_pkt(struct sk_buff *buf) | 426 | void tipc_bclink_recv_pkt(struct sk_buff *buf) |
415 | { | 427 | { |
416 | struct tipc_msg *msg = buf_msg(buf); | 428 | struct tipc_msg *msg = buf_msg(buf); |
417 | struct tipc_node *node; | 429 | struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); |
418 | u32 next_in; | 430 | u32 next_in; |
419 | u32 seqno; | 431 | u32 seqno; |
420 | int deferred; | 432 | struct sk_buff *deferred; |
421 | |||
422 | /* Screen out unwanted broadcast messages */ | ||
423 | 433 | ||
424 | if (msg_mc_netid(msg) != tipc_net_id) | 434 | if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || |
425 | goto exit; | 435 | (msg_mc_netid(msg) != tipc_net_id))) { |
426 | 436 | buf_discard(buf); | |
427 | node = tipc_node_find(msg_prevnode(msg)); | 437 | return; |
428 | if (unlikely(!node)) | 438 | } |
429 | goto exit; | ||
430 | |||
431 | tipc_node_lock(node); | ||
432 | if (unlikely(!node->bclink.recv_permitted)) | ||
433 | goto unlock; | ||
434 | |||
435 | /* Handle broadcast protocol message */ | ||
436 | 439 | ||
437 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { | 440 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { |
438 | if (msg_type(msg) != STATE_MSG) | ||
439 | goto unlock; | ||
440 | if (msg_destnode(msg) == tipc_own_addr) { | 441 | if (msg_destnode(msg) == tipc_own_addr) { |
442 | tipc_node_lock(node); | ||
441 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); | 443 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); |
442 | tipc_node_unlock(node); | 444 | tipc_node_unlock(node); |
443 | spin_lock_bh(&bc_lock); | 445 | spin_lock_bh(&bc_lock); |
@@ -447,124 +449,85 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) | |||
447 | msg_bcgap_to(msg)); | 449 | msg_bcgap_to(msg)); |
448 | spin_unlock_bh(&bc_lock); | 450 | spin_unlock_bh(&bc_lock); |
449 | } else { | 451 | } else { |
450 | tipc_node_unlock(node); | 452 | tipc_bclink_peek_nack(msg_destnode(msg), |
451 | bclink_peek_nack(msg); | 453 | msg_bcast_tag(msg), |
454 | msg_bcgap_after(msg), | ||
455 | msg_bcgap_to(msg)); | ||
452 | } | 456 | } |
453 | goto exit; | 457 | buf_discard(buf); |
458 | return; | ||
454 | } | 459 | } |
455 | 460 | ||
456 | /* Handle in-sequence broadcast message */ | 461 | tipc_node_lock(node); |
457 | 462 | receive: | |
458 | seqno = msg_seqno(msg); | 463 | deferred = node->bclink.deferred_head; |
459 | next_in = mod(node->bclink.last_in + 1); | 464 | next_in = mod(node->bclink.last_in + 1); |
465 | seqno = msg_seqno(msg); | ||
460 | 466 | ||
461 | if (likely(seqno == next_in)) { | 467 | if (likely(seqno == next_in)) { |
462 | receive: | 468 | bcl->stats.recv_info++; |
463 | /* Deliver message to destination */ | 469 | node->bclink.last_in++; |
464 | 470 | bclink_set_gap(node); | |
471 | if (unlikely(bclink_ack_allowed(seqno))) { | ||
472 | bclink_send_ack(node); | ||
473 | bcl->stats.sent_acks++; | ||
474 | } | ||
465 | if (likely(msg_isdata(msg))) { | 475 | if (likely(msg_isdata(msg))) { |
466 | spin_lock_bh(&bc_lock); | ||
467 | bclink_accept_pkt(node, seqno); | ||
468 | spin_unlock_bh(&bc_lock); | ||
469 | tipc_node_unlock(node); | 476 | tipc_node_unlock(node); |
470 | if (likely(msg_mcast(msg))) | 477 | tipc_port_recv_mcast(buf, NULL); |
471 | tipc_port_recv_mcast(buf, NULL); | ||
472 | else | ||
473 | kfree_skb(buf); | ||
474 | } else if (msg_user(msg) == MSG_BUNDLER) { | 478 | } else if (msg_user(msg) == MSG_BUNDLER) { |
475 | spin_lock_bh(&bc_lock); | ||
476 | bclink_accept_pkt(node, seqno); | ||
477 | bcl->stats.recv_bundles++; | 479 | bcl->stats.recv_bundles++; |
478 | bcl->stats.recv_bundled += msg_msgcnt(msg); | 480 | bcl->stats.recv_bundled += msg_msgcnt(msg); |
479 | spin_unlock_bh(&bc_lock); | ||
480 | tipc_node_unlock(node); | 481 | tipc_node_unlock(node); |
481 | tipc_link_recv_bundle(buf); | 482 | tipc_link_recv_bundle(buf); |
482 | } else if (msg_user(msg) == MSG_FRAGMENTER) { | 483 | } else if (msg_user(msg) == MSG_FRAGMENTER) { |
483 | int ret = tipc_link_recv_fragment(&node->bclink.defragm, | ||
484 | &buf, &msg); | ||
485 | if (ret < 0) | ||
486 | goto unlock; | ||
487 | spin_lock_bh(&bc_lock); | ||
488 | bclink_accept_pkt(node, seqno); | ||
489 | bcl->stats.recv_fragments++; | 484 | bcl->stats.recv_fragments++; |
490 | if (ret > 0) | 485 | if (tipc_link_recv_fragment(&node->bclink.defragm, |
486 | &buf, &msg)) | ||
491 | bcl->stats.recv_fragmented++; | 487 | bcl->stats.recv_fragmented++; |
492 | spin_unlock_bh(&bc_lock); | ||
493 | tipc_node_unlock(node); | 488 | tipc_node_unlock(node); |
494 | tipc_net_route_msg(buf); | 489 | tipc_net_route_msg(buf); |
495 | } else if (msg_user(msg) == NAME_DISTRIBUTOR) { | ||
496 | spin_lock_bh(&bc_lock); | ||
497 | bclink_accept_pkt(node, seqno); | ||
498 | spin_unlock_bh(&bc_lock); | ||
499 | tipc_node_unlock(node); | ||
500 | tipc_named_recv(buf); | ||
501 | } else { | 490 | } else { |
502 | spin_lock_bh(&bc_lock); | ||
503 | bclink_accept_pkt(node, seqno); | ||
504 | spin_unlock_bh(&bc_lock); | ||
505 | tipc_node_unlock(node); | 491 | tipc_node_unlock(node); |
506 | kfree_skb(buf); | 492 | tipc_net_route_msg(buf); |
507 | } | 493 | } |
508 | buf = NULL; | 494 | if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { |
509 | 495 | tipc_node_lock(node); | |
510 | /* Determine new synchronization state */ | 496 | buf = deferred; |
511 | 497 | msg = buf_msg(buf); | |
512 | tipc_node_lock(node); | 498 | node->bclink.deferred_head = deferred->next; |
513 | if (unlikely(!tipc_node_is_up(node))) | 499 | goto receive; |
514 | goto unlock; | ||
515 | |||
516 | if (node->bclink.last_in == node->bclink.last_sent) | ||
517 | goto unlock; | ||
518 | |||
519 | if (!node->bclink.deferred_head) { | ||
520 | node->bclink.oos_state = 1; | ||
521 | goto unlock; | ||
522 | } | 500 | } |
523 | 501 | return; | |
524 | msg = buf_msg(node->bclink.deferred_head); | 502 | } else if (less(next_in, seqno)) { |
525 | seqno = msg_seqno(msg); | 503 | u32 gap_after = node->bclink.gap_after; |
526 | next_in = mod(next_in + 1); | 504 | u32 gap_to = node->bclink.gap_to; |
527 | if (seqno != next_in) | 505 | |
528 | goto unlock; | 506 | if (tipc_link_defer_pkt(&node->bclink.deferred_head, |
529 | 507 | &node->bclink.deferred_tail, | |
530 | /* Take in-sequence message from deferred queue & deliver it */ | 508 | buf)) { |
531 | 509 | node->bclink.nack_sync++; | |
532 | buf = node->bclink.deferred_head; | 510 | bcl->stats.deferred_recv++; |
533 | node->bclink.deferred_head = buf->next; | 511 | if (seqno == mod(gap_after + 1)) |
534 | node->bclink.deferred_size--; | 512 | node->bclink.gap_after = seqno; |
535 | goto receive; | 513 | else if (less(gap_after, seqno) && less(seqno, gap_to)) |
536 | } | 514 | node->bclink.gap_to = seqno; |
537 | 515 | } | |
538 | /* Handle out-of-sequence broadcast message */ | 516 | if (bclink_ack_allowed(node->bclink.nack_sync)) { |
539 | 517 | if (gap_to != gap_after) | |
540 | if (less(next_in, seqno)) { | 518 | bclink_send_nack(node); |
541 | deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, | 519 | bclink_set_gap(node); |
542 | &node->bclink.deferred_tail, | 520 | } |
543 | buf); | 521 | } else { |
544 | node->bclink.deferred_size += deferred; | ||
545 | bclink_update_last_sent(node, seqno); | ||
546 | buf = NULL; | ||
547 | } else | ||
548 | deferred = 0; | ||
549 | |||
550 | spin_lock_bh(&bc_lock); | ||
551 | |||
552 | if (deferred) | ||
553 | bcl->stats.deferred_recv++; | ||
554 | else | ||
555 | bcl->stats.duplicates++; | 522 | bcl->stats.duplicates++; |
556 | 523 | buf_discard(buf); | |
557 | spin_unlock_bh(&bc_lock); | 524 | } |
558 | |||
559 | unlock: | ||
560 | tipc_node_unlock(node); | 525 | tipc_node_unlock(node); |
561 | exit: | ||
562 | kfree_skb(buf); | ||
563 | } | 526 | } |
564 | 527 | ||
565 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) | 528 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) |
566 | { | 529 | { |
567 | return (n_ptr->bclink.recv_permitted && | 530 | return (n_ptr->bclink.supported && |
568 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); | 531 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); |
569 | } | 532 | } |
570 | 533 | ||
@@ -572,41 +535,38 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) | |||
572 | /** | 535 | /** |
573 | * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer | 536 | * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer |
574 | * | 537 | * |
575 | * Send packet over as many bearers as necessary to reach all nodes | 538 | * Send through as many bearers as necessary to reach all nodes |
576 | * that have joined the broadcast link. | 539 | * that support TIPC multicasting. |
577 | * | 540 | * |
578 | * Returns 0 (packet sent successfully) under all circumstances, | 541 | * Returns 0 if packet sent successfully, non-zero if not |
579 | * since the broadcast link's pseudo-bearer never blocks | ||
580 | */ | 542 | */ |
543 | |||
581 | static int tipc_bcbearer_send(struct sk_buff *buf, | 544 | static int tipc_bcbearer_send(struct sk_buff *buf, |
582 | struct tipc_bearer *unused1, | 545 | struct tipc_bearer *unused1, |
583 | struct tipc_media_addr *unused2) | 546 | struct tipc_media_addr *unused2) |
584 | { | 547 | { |
585 | int bp_index; | 548 | int bp_index; |
586 | 549 | ||
587 | /* | 550 | /* Prepare buffer for broadcasting (if first time trying to send it) */ |
588 | * Prepare broadcast link message for reliable transmission, | 551 | |
589 | * if first time trying to send it; | ||
590 | * preparation is skipped for broadcast link protocol messages | ||
591 | * since they are sent in an unreliable manner and don't need it | ||
592 | */ | ||
593 | if (likely(!msg_non_seq(buf_msg(buf)))) { | 552 | if (likely(!msg_non_seq(buf_msg(buf)))) { |
594 | struct tipc_msg *msg; | 553 | struct tipc_msg *msg; |
595 | 554 | ||
596 | bcbuf_set_acks(buf, bclink->bcast_nodes.count); | 555 | bcbuf_set_acks(buf, tipc_bcast_nmap.count); |
597 | msg = buf_msg(buf); | 556 | msg = buf_msg(buf); |
598 | msg_set_non_seq(msg, 1); | 557 | msg_set_non_seq(msg, 1); |
599 | msg_set_mc_netid(msg, tipc_net_id); | 558 | msg_set_mc_netid(msg, tipc_net_id); |
600 | bcl->stats.sent_info++; | 559 | bcl->stats.sent_info++; |
601 | 560 | ||
602 | if (WARN_ON(!bclink->bcast_nodes.count)) { | 561 | if (WARN_ON(!tipc_bcast_nmap.count)) { |
603 | dump_stack(); | 562 | dump_stack(); |
604 | return 0; | 563 | return 0; |
605 | } | 564 | } |
606 | } | 565 | } |
607 | 566 | ||
608 | /* Send buffer over bearers until all targets reached */ | 567 | /* Send buffer over bearers until all targets reached */ |
609 | bcbearer->remains = bclink->bcast_nodes; | 568 | |
569 | bcbearer->remains = tipc_bcast_nmap; | ||
610 | 570 | ||
611 | for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { | 571 | for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { |
612 | struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; | 572 | struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; |
@@ -619,14 +579,16 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
619 | if (bcbearer->remains_new.count == bcbearer->remains.count) | 579 | if (bcbearer->remains_new.count == bcbearer->remains.count) |
620 | continue; /* bearer pair doesn't add anything */ | 580 | continue; /* bearer pair doesn't add anything */ |
621 | 581 | ||
622 | if (!tipc_bearer_blocked(p)) | 582 | if (p->blocked || |
623 | tipc_bearer_send(p, buf, &p->media->bcast_addr); | 583 | p->media->send_msg(buf, p, &p->media->bcast_addr)) { |
624 | else if (s && !tipc_bearer_blocked(s)) | ||
625 | /* unable to send on primary bearer */ | 584 | /* unable to send on primary bearer */ |
626 | tipc_bearer_send(s, buf, &s->media->bcast_addr); | 585 | if (!s || s->blocked || |
627 | else | 586 | s->media->send_msg(buf, s, |
628 | /* unable to send on either bearer */ | 587 | &s->media->bcast_addr)) { |
629 | continue; | 588 | /* unable to send on either bearer */ |
589 | continue; | ||
590 | } | ||
591 | } | ||
630 | 592 | ||
631 | if (s) { | 593 | if (s) { |
632 | bcbearer->bpairs[bp_index].primary = s; | 594 | bcbearer->bpairs[bp_index].primary = s; |
@@ -634,27 +596,35 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
634 | } | 596 | } |
635 | 597 | ||
636 | if (bcbearer->remains_new.count == 0) | 598 | if (bcbearer->remains_new.count == 0) |
637 | break; /* all targets reached */ | 599 | return 0; |
638 | 600 | ||
639 | bcbearer->remains = bcbearer->remains_new; | 601 | bcbearer->remains = bcbearer->remains_new; |
640 | } | 602 | } |
641 | 603 | ||
642 | return 0; | 604 | /* |
605 | * Unable to reach all targets (indicate success, since currently | ||
606 | * there isn't code in place to properly block & unblock the | ||
607 | * pseudo-bearer used by the broadcast link) | ||
608 | */ | ||
609 | |||
610 | return TIPC_OK; | ||
643 | } | 611 | } |
644 | 612 | ||
645 | /** | 613 | /** |
646 | * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer | 614 | * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer |
647 | */ | 615 | */ |
616 | |||
648 | void tipc_bcbearer_sort(void) | 617 | void tipc_bcbearer_sort(void) |
649 | { | 618 | { |
650 | struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; | 619 | struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; |
651 | struct tipc_bcbearer_pair *bp_curr; | 620 | struct bcbearer_pair *bp_curr; |
652 | int b_index; | 621 | int b_index; |
653 | int pri; | 622 | int pri; |
654 | 623 | ||
655 | spin_lock_bh(&bc_lock); | 624 | spin_lock_bh(&bc_lock); |
656 | 625 | ||
657 | /* Group bearers by priority (can assume max of two per priority) */ | 626 | /* Group bearers by priority (can assume max of two per priority) */ |
627 | |||
658 | memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); | 628 | memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); |
659 | 629 | ||
660 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { | 630 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { |
@@ -670,6 +640,7 @@ void tipc_bcbearer_sort(void) | |||
670 | } | 640 | } |
671 | 641 | ||
672 | /* Create array of bearer pairs for broadcasting */ | 642 | /* Create array of bearer pairs for broadcasting */ |
643 | |||
673 | bp_curr = bcbearer->bpairs; | 644 | bp_curr = bcbearer->bpairs; |
674 | memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); | 645 | memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); |
675 | 646 | ||
@@ -696,46 +667,72 @@ void tipc_bcbearer_sort(void) | |||
696 | spin_unlock_bh(&bc_lock); | 667 | spin_unlock_bh(&bc_lock); |
697 | } | 668 | } |
698 | 669 | ||
670 | /** | ||
671 | * tipc_bcbearer_push - resolve bearer congestion | ||
672 | * | ||
673 | * Forces bclink to push out any unsent packets, until all packets are gone | ||
674 | * or congestion reoccurs. | ||
675 | * No locks set when function called | ||
676 | */ | ||
677 | |||
678 | void tipc_bcbearer_push(void) | ||
679 | { | ||
680 | struct tipc_bearer *b_ptr; | ||
681 | |||
682 | spin_lock_bh(&bc_lock); | ||
683 | b_ptr = &bcbearer->bearer; | ||
684 | if (b_ptr->blocked) { | ||
685 | b_ptr->blocked = 0; | ||
686 | tipc_bearer_lock_push(b_ptr); | ||
687 | } | ||
688 | spin_unlock_bh(&bc_lock); | ||
689 | } | ||
690 | |||
699 | 691 | ||
700 | int tipc_bclink_stats(char *buf, const u32 buf_size) | 692 | int tipc_bclink_stats(char *buf, const u32 buf_size) |
701 | { | 693 | { |
702 | int ret; | 694 | struct print_buf pb; |
703 | struct tipc_stats *s; | ||
704 | 695 | ||
705 | if (!bcl) | 696 | if (!bcl) |
706 | return 0; | 697 | return 0; |
707 | 698 | ||
699 | tipc_printbuf_init(&pb, buf, buf_size); | ||
700 | |||
708 | spin_lock_bh(&bc_lock); | 701 | spin_lock_bh(&bc_lock); |
709 | 702 | ||
710 | s = &bcl->stats; | 703 | tipc_printf(&pb, "Link <%s>\n" |
711 | 704 | " Window:%u packets\n", | |
712 | ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" | 705 | bcl->name, bcl->queue_limit[0]); |
713 | " Window:%u packets\n", | 706 | tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", |
714 | bcl->name, bcl->queue_limit[0]); | 707 | bcl->stats.recv_info, |
715 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 708 | bcl->stats.recv_fragments, |
716 | " RX packets:%u fragments:%u/%u bundles:%u/%u\n", | 709 | bcl->stats.recv_fragmented, |
717 | s->recv_info, s->recv_fragments, | 710 | bcl->stats.recv_bundles, |
718 | s->recv_fragmented, s->recv_bundles, | 711 | bcl->stats.recv_bundled); |
719 | s->recv_bundled); | 712 | tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", |
720 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 713 | bcl->stats.sent_info, |
721 | " TX packets:%u fragments:%u/%u bundles:%u/%u\n", | 714 | bcl->stats.sent_fragments, |
722 | s->sent_info, s->sent_fragments, | 715 | bcl->stats.sent_fragmented, |
723 | s->sent_fragmented, s->sent_bundles, | 716 | bcl->stats.sent_bundles, |
724 | s->sent_bundled); | 717 | bcl->stats.sent_bundled); |
725 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 718 | tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", |
726 | " RX naks:%u defs:%u dups:%u\n", | 719 | bcl->stats.recv_nacks, |
727 | s->recv_nacks, s->deferred_recv, s->duplicates); | 720 | bcl->stats.deferred_recv, |
728 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 721 | bcl->stats.duplicates); |
729 | " TX naks:%u acks:%u dups:%u\n", | 722 | tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", |
730 | s->sent_nacks, s->sent_acks, s->retransmitted); | 723 | bcl->stats.sent_nacks, |
731 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 724 | bcl->stats.sent_acks, |
732 | " Congestion link:%u Send queue max:%u avg:%u\n", | 725 | bcl->stats.retransmitted); |
733 | s->link_congs, s->max_queue_sz, | 726 | tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", |
734 | s->queue_sz_counts ? | 727 | bcl->stats.bearer_congs, |
735 | (s->accu_queue_sz / s->queue_sz_counts) : 0); | 728 | bcl->stats.link_congs, |
729 | bcl->stats.max_queue_sz, | ||
730 | bcl->stats.queue_sz_counts | ||
731 | ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts) | ||
732 | : 0); | ||
736 | 733 | ||
737 | spin_unlock_bh(&bc_lock); | 734 | spin_unlock_bh(&bc_lock); |
738 | return ret; | 735 | return tipc_printbuf_validate(&pb); |
739 | } | 736 | } |
740 | 737 | ||
741 | int tipc_bclink_reset_stats(void) | 738 | int tipc_bclink_reset_stats(void) |
@@ -762,12 +759,25 @@ int tipc_bclink_set_queue_limits(u32 limit) | |||
762 | return 0; | 759 | return 0; |
763 | } | 760 | } |
764 | 761 | ||
765 | void tipc_bclink_init(void) | 762 | int tipc_bclink_init(void) |
766 | { | 763 | { |
764 | bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); | ||
765 | bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); | ||
766 | if (!bcbearer || !bclink) { | ||
767 | warn("Multicast link creation failed, no memory\n"); | ||
768 | kfree(bcbearer); | ||
769 | bcbearer = NULL; | ||
770 | kfree(bclink); | ||
771 | bclink = NULL; | ||
772 | return -ENOMEM; | ||
773 | } | ||
774 | |||
775 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); | ||
767 | bcbearer->bearer.media = &bcbearer->media; | 776 | bcbearer->bearer.media = &bcbearer->media; |
768 | bcbearer->media.send_msg = tipc_bcbearer_send; | 777 | bcbearer->media.send_msg = tipc_bcbearer_send; |
769 | sprintf(bcbearer->media.name, "tipc-broadcast"); | 778 | sprintf(bcbearer->media.name, "tipc-multicast"); |
770 | 779 | ||
780 | bcl = &bclink->link; | ||
771 | INIT_LIST_HEAD(&bcl->waiting_ports); | 781 | INIT_LIST_HEAD(&bcl->waiting_ports); |
772 | bcl->next_out_no = 1; | 782 | bcl->next_out_no = 1; |
773 | spin_lock_init(&bclink->node.lock); | 783 | spin_lock_init(&bclink->node.lock); |
@@ -777,22 +787,29 @@ void tipc_bclink_init(void) | |||
777 | bcl->b_ptr = &bcbearer->bearer; | 787 | bcl->b_ptr = &bcbearer->bearer; |
778 | bcl->state = WORKING_WORKING; | 788 | bcl->state = WORKING_WORKING; |
779 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); | 789 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); |
790 | |||
791 | return 0; | ||
780 | } | 792 | } |
781 | 793 | ||
782 | void tipc_bclink_stop(void) | 794 | void tipc_bclink_stop(void) |
783 | { | 795 | { |
784 | spin_lock_bh(&bc_lock); | 796 | spin_lock_bh(&bc_lock); |
785 | tipc_link_stop(bcl); | 797 | if (bcbearer) { |
798 | tipc_link_stop(bcl); | ||
799 | bcl = NULL; | ||
800 | kfree(bclink); | ||
801 | bclink = NULL; | ||
802 | kfree(bcbearer); | ||
803 | bcbearer = NULL; | ||
804 | } | ||
786 | spin_unlock_bh(&bc_lock); | 805 | spin_unlock_bh(&bc_lock); |
787 | |||
788 | memset(bclink, 0, sizeof(*bclink)); | ||
789 | memset(bcbearer, 0, sizeof(*bcbearer)); | ||
790 | } | 806 | } |
791 | 807 | ||
792 | 808 | ||
793 | /** | 809 | /** |
794 | * tipc_nmap_add - add a node to a node map | 810 | * tipc_nmap_add - add a node to a node map |
795 | */ | 811 | */ |
812 | |||
796 | void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) | 813 | void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) |
797 | { | 814 | { |
798 | int n = tipc_node(node); | 815 | int n = tipc_node(node); |
@@ -808,6 +825,7 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) | |||
808 | /** | 825 | /** |
809 | * tipc_nmap_remove - remove a node from a node map | 826 | * tipc_nmap_remove - remove a node from a node map |
810 | */ | 827 | */ |
828 | |||
811 | void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) | 829 | void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) |
812 | { | 830 | { |
813 | int n = tipc_node(node); | 831 | int n = tipc_node(node); |
@@ -826,6 +844,7 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) | |||
826 | * @nm_b: input node map B | 844 | * @nm_b: input node map B |
827 | * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) | 845 | * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) |
828 | */ | 846 | */ |
847 | |||
829 | static void tipc_nmap_diff(struct tipc_node_map *nm_a, | 848 | static void tipc_nmap_diff(struct tipc_node_map *nm_a, |
830 | struct tipc_node_map *nm_b, | 849 | struct tipc_node_map *nm_b, |
831 | struct tipc_node_map *nm_diff) | 850 | struct tipc_node_map *nm_diff) |
@@ -851,9 +870,10 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a, | |||
851 | /** | 870 | /** |
852 | * tipc_port_list_add - add a port to a port list, ensuring no duplicates | 871 | * tipc_port_list_add - add a port to a port list, ensuring no duplicates |
853 | */ | 872 | */ |
854 | void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) | 873 | |
874 | void tipc_port_list_add(struct port_list *pl_ptr, u32 port) | ||
855 | { | 875 | { |
856 | struct tipc_port_list *item = pl_ptr; | 876 | struct port_list *item = pl_ptr; |
857 | int i; | 877 | int i; |
858 | int item_sz = PLSIZE; | 878 | int item_sz = PLSIZE; |
859 | int cnt = pl_ptr->count; | 879 | int cnt = pl_ptr->count; |
@@ -872,7 +892,7 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) | |||
872 | if (!item->next) { | 892 | if (!item->next) { |
873 | item->next = kmalloc(sizeof(*item), GFP_ATOMIC); | 893 | item->next = kmalloc(sizeof(*item), GFP_ATOMIC); |
874 | if (!item->next) { | 894 | if (!item->next) { |
875 | pr_warn("Incomplete multicast delivery, no memory\n"); | 895 | warn("Incomplete multicast delivery, no memory\n"); |
876 | return; | 896 | return; |
877 | } | 897 | } |
878 | item->next->next = NULL; | 898 | item->next->next = NULL; |
@@ -884,13 +904,15 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) | |||
884 | * tipc_port_list_free - free dynamically created entries in port_list chain | 904 | * tipc_port_list_free - free dynamically created entries in port_list chain |
885 | * | 905 | * |
886 | */ | 906 | */ |
887 | void tipc_port_list_free(struct tipc_port_list *pl_ptr) | 907 | |
908 | void tipc_port_list_free(struct port_list *pl_ptr) | ||
888 | { | 909 | { |
889 | struct tipc_port_list *item; | 910 | struct port_list *item; |
890 | struct tipc_port_list *next; | 911 | struct port_list *next; |
891 | 912 | ||
892 | for (item = pl_ptr->next; item; item = next) { | 913 | for (item = pl_ptr->next; item; item = next) { |
893 | next = item->next; | 914 | next = item->next; |
894 | kfree(item); | 915 | kfree(item); |
895 | } | 916 | } |
896 | } | 917 | } |
918 | |||