aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/netpoll.c36
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/netrom/nr_route.c12
-rw-r--r--net/rxrpc/call.c3
-rw-r--r--net/rxrpc/connection.c3
-rw-r--r--net/rxrpc/krxsecd.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c2
-rw-r--r--net/tipc/bcast.c83
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/bearer.c72
-rw-r--r--net/tipc/cluster.c22
-rw-r--r--net/tipc/config.c87
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/core.h21
-rw-r--r--net/tipc/dbg.c2
-rw-r--r--net/tipc/discover.c13
-rw-r--r--net/tipc/eth_media.c29
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/tipc/link.c217
-rw-r--r--net/tipc/name_distr.c30
-rw-r--r--net/tipc/name_table.c207
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/node.c78
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/node_subscr.c15
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/ref.c35
-rw-r--r--net/tipc/socket.c100
-rw-r--r--net/tipc/subscr.c20
-rw-r--r--net/tipc/user_reg.c2
-rw-r--r--net/tipc/zone.c19
37 files changed, 728 insertions, 484 deletions
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index a48a5d580408..5fe77df00186 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1113,10 +1113,9 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1113 1113
1114static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) 1114static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1115{ 1115{
1116 unsigned char *ip;
1117
1118 uint32_t dst_ip = msg->content.in_info.in_dst_ip; 1116 uint32_t dst_ip = msg->content.in_info.in_dst_ip;
1119 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); 1117 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
1118
1120 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip)); 1119 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip));
1121 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry); 1120 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry);
1122 if(entry == NULL){ 1121 if(entry == NULL){
diff --git a/net/core/dev.c b/net/core/dev.c
index ea2469398bd5..f1c52cbd6ef7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -230,7 +230,7 @@ extern void netdev_unregister_sysfs(struct net_device *);
230 * For efficiency 230 * For efficiency
231 */ 231 */
232 232
233int netdev_nit; 233static int netdev_nit;
234 234
235/* 235/*
236 * Add a protocol ID to the list. Now that the input handler is 236 * Add a protocol ID to the list. Now that the input handler is
@@ -1325,9 +1325,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1325 nskb->next = NULL; 1325 nskb->next = NULL;
1326 rc = dev->hard_start_xmit(nskb, dev); 1326 rc = dev->hard_start_xmit(nskb, dev);
1327 if (unlikely(rc)) { 1327 if (unlikely(rc)) {
1328 nskb->next = skb->next;
1328 skb->next = nskb; 1329 skb->next = nskb;
1329 return rc; 1330 return rc;
1330 } 1331 }
1332 if (unlikely(netif_queue_stopped(dev) && skb->next))
1333 return NETDEV_TX_BUSY;
1331 } while (skb->next); 1334 } while (skb->next);
1332 1335
1333 skb->destructor = DEV_GSO_CB(skb)->destructor; 1336 skb->destructor = DEV_GSO_CB(skb)->destructor;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9cb781830380..471da451cd48 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -54,6 +54,7 @@ static atomic_t trapped;
54 sizeof(struct iphdr) + sizeof(struct ethhdr)) 54 sizeof(struct iphdr) + sizeof(struct ethhdr))
55 55
56static void zap_completion_queue(void); 56static void zap_completion_queue(void);
57static void arp_reply(struct sk_buff *skb);
57 58
58static void queue_process(void *p) 59static void queue_process(void *p)
59{ 60{
@@ -153,6 +154,22 @@ static void poll_napi(struct netpoll *np)
153 } 154 }
154} 155}
155 156
157static void service_arp_queue(struct netpoll_info *npi)
158{
159 struct sk_buff *skb;
160
161 if (unlikely(!npi))
162 return;
163
164 skb = skb_dequeue(&npi->arp_tx);
165
166 while (skb != NULL) {
167 arp_reply(skb);
168 skb = skb_dequeue(&npi->arp_tx);
169 }
170 return;
171}
172
156void netpoll_poll(struct netpoll *np) 173void netpoll_poll(struct netpoll *np)
157{ 174{
158 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) 175 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
@@ -163,6 +180,8 @@ void netpoll_poll(struct netpoll *np)
163 if (np->dev->poll) 180 if (np->dev->poll)
164 poll_napi(np); 181 poll_napi(np);
165 182
183 service_arp_queue(np->dev->npinfo);
184
166 zap_completion_queue(); 185 zap_completion_queue();
167} 186}
168 187
@@ -279,14 +298,10 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
279 * network drivers do not expect to be called if the queue is 298 * network drivers do not expect to be called if the queue is
280 * stopped. 299 * stopped.
281 */ 300 */
282 if (netif_queue_stopped(np->dev)) { 301 status = NETDEV_TX_BUSY;
283 netif_tx_unlock(np->dev); 302 if (!netif_queue_stopped(np->dev))
284 netpoll_poll(np); 303 status = np->dev->hard_start_xmit(skb, np->dev);
285 udelay(50);
286 continue;
287 }
288 304
289 status = np->dev->hard_start_xmit(skb, np->dev);
290 netif_tx_unlock(np->dev); 305 netif_tx_unlock(np->dev);
291 306
292 /* success */ 307 /* success */
@@ -446,7 +461,9 @@ int __netpoll_rx(struct sk_buff *skb)
446 int proto, len, ulen; 461 int proto, len, ulen;
447 struct iphdr *iph; 462 struct iphdr *iph;
448 struct udphdr *uh; 463 struct udphdr *uh;
449 struct netpoll *np = skb->dev->npinfo->rx_np; 464 struct netpoll_info *npi = skb->dev->npinfo;
465 struct netpoll *np = npi->rx_np;
466
450 467
451 if (!np) 468 if (!np)
452 goto out; 469 goto out;
@@ -456,7 +473,7 @@ int __netpoll_rx(struct sk_buff *skb)
456 /* check if netpoll clients need ARP */ 473 /* check if netpoll clients need ARP */
457 if (skb->protocol == __constant_htons(ETH_P_ARP) && 474 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
458 atomic_read(&trapped)) { 475 atomic_read(&trapped)) {
459 arp_reply(skb); 476 skb_queue_tail(&npi->arp_tx, skb);
460 return 1; 477 return 1;
461 } 478 }
462 479
@@ -651,6 +668,7 @@ int netpoll_setup(struct netpoll *np)
651 npinfo->poll_owner = -1; 668 npinfo->poll_owner = -1;
652 npinfo->tries = MAX_RETRIES; 669 npinfo->tries = MAX_RETRIES;
653 spin_lock_init(&npinfo->rx_lock); 670 spin_lock_init(&npinfo->rx_lock);
671 skb_queue_head_init(&npinfo->arp_tx);
654 } else 672 } else
655 npinfo = ndev->npinfo; 673 npinfo = ndev->npinfo;
656 674
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e5044ba3ab6..6edbb90cbcec 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1739,12 +1739,15 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1739 unsigned int to, struct ts_config *config, 1739 unsigned int to, struct ts_config *config,
1740 struct ts_state *state) 1740 struct ts_state *state)
1741{ 1741{
1742 unsigned int ret;
1743
1742 config->get_next_block = skb_ts_get_next_block; 1744 config->get_next_block = skb_ts_get_next_block;
1743 config->finish = skb_ts_finish; 1745 config->finish = skb_ts_finish;
1744 1746
1745 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1747 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
1746 1748
1747 return textsearch_find(config, state); 1749 ret = textsearch_find(config, state);
1750 return (ret <= to - from ? ret : UINT_MAX);
1748} 1751}
1749 1752
1750/** 1753/**
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0e029c4e2903..c04176be7ed1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2166,7 +2166,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2166 if (!pskb_may_pull(skb, thlen)) 2166 if (!pskb_may_pull(skb, thlen))
2167 goto out; 2167 goto out;
2168 2168
2169 oldlen = ~htonl(skb->len); 2169 oldlen = (u16)~skb->len;
2170 __skb_pull(skb, thlen); 2170 __skb_pull(skb, thlen);
2171 2171
2172 segs = skb_segment(skb, sg); 2172 segs = skb_segment(skb, sg);
@@ -2174,7 +2174,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2174 goto out; 2174 goto out;
2175 2175
2176 len = skb_shinfo(skb)->gso_size; 2176 len = skb_shinfo(skb)->gso_size;
2177 delta = csum_add(oldlen, htonl(thlen + len)); 2177 delta = htonl(oldlen + (thlen + len));
2178 2178
2179 skb = segs; 2179 skb = segs;
2180 th = skb->h.th; 2180 th = skb->h.th;
@@ -2183,10 +2183,10 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2183 do { 2183 do {
2184 th->fin = th->psh = 0; 2184 th->fin = th->psh = 0;
2185 2185
2186 if (skb->ip_summed == CHECKSUM_NONE) { 2186 th->check = ~csum_fold(th->check + delta);
2187 th->check = csum_fold(csum_partial( 2187 if (skb->ip_summed != CHECKSUM_HW)
2188 skb->h.raw, thlen, csum_add(skb->csum, delta))); 2188 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2189 } 2189 skb->csum));
2190 2190
2191 seq += len; 2191 seq += len;
2192 skb = skb->next; 2192 skb = skb->next;
@@ -2196,11 +2196,11 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2196 th->cwr = 0; 2196 th->cwr = 0;
2197 } while (skb->next); 2197 } while (skb->next);
2198 2198
2199 if (skb->ip_summed == CHECKSUM_NONE) { 2199 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2200 delta = csum_add(oldlen, htonl(skb->tail - skb->h.raw)); 2200 th->check = ~csum_fold(th->check + delta);
2201 th->check = csum_fold(csum_partial( 2201 if (skb->ip_summed != CHECKSUM_HW)
2202 skb->h.raw, thlen, csum_add(skb->csum, delta))); 2202 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2203 } 2203 skb->csum));
2204 2204
2205out: 2205out:
2206 return segs; 2206 return segs;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8a777932786d..e728980160d2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -349,7 +349,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
349 (strict & RT6_SELECT_F_REACHABLE) && 349 (strict & RT6_SELECT_F_REACHABLE) &&
350 last && last != rt0) { 350 last && last != rt0) {
351 /* no entries matched; do round-robin */ 351 /* no entries matched; do round-robin */
352 static spinlock_t lock = SPIN_LOCK_UNLOCKED; 352 static DEFINE_SPINLOCK(lock);
353 spin_lock(&lock); 353 spin_lock(&lock);
354 *head = rt0->u.next; 354 *head = rt0->u.next;
355 rt0->u.next = last->u.next; 355 rt0->u.next = last->u.next;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index b3b9097c87c7..c11737f472d6 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -725,15 +725,17 @@ void nr_link_failed(ax25_cb *ax25, int reason)
725 struct nr_node *nr_node = NULL; 725 struct nr_node *nr_node = NULL;
726 726
727 spin_lock_bh(&nr_neigh_list_lock); 727 spin_lock_bh(&nr_neigh_list_lock);
728 nr_neigh_for_each(s, node, &nr_neigh_list) 728 nr_neigh_for_each(s, node, &nr_neigh_list) {
729 if (s->ax25 == ax25) { 729 if (s->ax25 == ax25) {
730 nr_neigh_hold(s); 730 nr_neigh_hold(s);
731 nr_neigh = s; 731 nr_neigh = s;
732 break; 732 break;
733 } 733 }
734 }
734 spin_unlock_bh(&nr_neigh_list_lock); 735 spin_unlock_bh(&nr_neigh_list_lock);
735 736
736 if (nr_neigh == NULL) return; 737 if (nr_neigh == NULL)
738 return;
737 739
738 nr_neigh->ax25 = NULL; 740 nr_neigh->ax25 = NULL;
739 ax25_cb_put(ax25); 741 ax25_cb_put(ax25);
@@ -743,11 +745,13 @@ void nr_link_failed(ax25_cb *ax25, int reason)
743 return; 745 return;
744 } 746 }
745 spin_lock_bh(&nr_node_list_lock); 747 spin_lock_bh(&nr_node_list_lock);
746 nr_node_for_each(nr_node, node, &nr_node_list) 748 nr_node_for_each(nr_node, node, &nr_node_list) {
747 nr_node_lock(nr_node); 749 nr_node_lock(nr_node);
748 if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh) 750 if (nr_node->which < nr_node->count &&
751 nr_node->routes[nr_node->which].neighbour == nr_neigh)
749 nr_node->which++; 752 nr_node->which++;
750 nr_node_unlock(nr_node); 753 nr_node_unlock(nr_node);
754 }
751 spin_unlock_bh(&nr_node_list_lock); 755 spin_unlock_bh(&nr_node_list_lock);
752 nr_neigh_put(nr_neigh); 756 nr_neigh_put(nr_neigh);
753} 757}
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
index c4aeb7d40266..d07122b57e0d 100644
--- a/net/rxrpc/call.c
+++ b/net/rxrpc/call.c
@@ -1098,8 +1098,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
1098 1098
1099 call->app_ready_seq = pmsg->seq; 1099 call->app_ready_seq = pmsg->seq;
1100 call->app_ready_qty += pmsg->dsize; 1100 call->app_ready_qty += pmsg->dsize;
1101 list_del_init(&pmsg->link); 1101 list_move_tail(&pmsg->link, &call->app_readyq);
1102 list_add_tail(&pmsg->link, &call->app_readyq);
1103 } 1102 }
1104 1103
1105 /* see if we've got the last packet yet */ 1104 /* see if we've got the last packet yet */
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 0e0a4553499f..573b572f8f91 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -402,8 +402,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn)
402 402
403 /* move to graveyard queue */ 403 /* move to graveyard queue */
404 _debug("burying connection: {%08x}", ntohl(conn->conn_id)); 404 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
405 list_del(&conn->link); 405 list_move_tail(&conn->link, &peer->conn_graveyard);
406 list_add_tail(&conn->link, &peer->conn_graveyard);
407 406
408 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ); 407 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
409 408
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index 1aadd026d354..cea4eb5e2497 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -160,8 +160,7 @@ void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
160 list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) { 160 list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) {
161 msg = list_entry(_p, struct rxrpc_message, link); 161 msg = list_entry(_p, struct rxrpc_message, link);
162 if (msg->trans == trans) { 162 if (msg->trans == trans) {
163 list_del(&msg->link); 163 list_move_tail(&msg->link, &tmp);
164 list_add_tail(&msg->link, &tmp);
165 atomic_dec(&rxrpc_krxsecd_qcount); 164 atomic_dec(&rxrpc_krxsecd_qcount);
166 } 165 }
167 } 166 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 129e2bd36aff..b8714a87b34c 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -169,7 +169,7 @@ gss_import_sec_context_kerberos(const void *p,
169 } 169 }
170 170
171 ctx_id->internal_ctx_id = ctx; 171 ctx_id->internal_ctx_id = ctx;
172 dprintk("RPC: Succesfully imported new context.\n"); 172 dprintk("RPC: Successfully imported new context.\n");
173 return 0; 173 return 0;
174 174
175out_err_free_key2: 175out_err_free_key2:
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index f43311221a72..2f312164d6d5 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,7 +70,7 @@
70# define RPCDBG_FACILITY RPCDBG_AUTH 70# define RPCDBG_FACILITY RPCDBG_AUTH
71#endif 71#endif
72 72
73spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; 73DEFINE_SPINLOCK(krb5_seq_lock);
74 74
75u32 75u32
76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, 76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 5bf11ccba7cd..3d0432aa45c1 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -201,7 +201,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
201 201
202 ctx_id->internal_ctx_id = ctx; 202 ctx_id->internal_ctx_id = ctx;
203 203
204 dprintk("Succesfully imported new spkm context.\n"); 204 dprintk("Successfully imported new spkm context.\n");
205 return 0; 205 return 0;
206 206
207out_err_free_key2: 207out_err_free_key2:
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2c4ecbe50082..1bb75703f384 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -49,13 +49,19 @@
49#include "name_table.h" 49#include "name_table.h"
50#include "bcast.h" 50#include "bcast.h"
51 51
52
53#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 52#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
54 53
55#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 54#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
56 55
57#define BCLINK_LOG_BUF_SIZE 0 56#define BCLINK_LOG_BUF_SIZE 0
58 57
58/*
59 * Loss rate for incoming broadcast frames; used to test retransmission code.
60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
61 */
62
63#define TIPC_BCAST_LOSS_RATE 0
64
59/** 65/**
60 * struct bcbearer_pair - a pair of bearers used by broadcast link 66 * struct bcbearer_pair - a pair of bearers used by broadcast link
61 * @primary: pointer to primary bearer 67 * @primary: pointer to primary bearer
@@ -75,7 +81,14 @@ struct bcbearer_pair {
75 * @bearer: (non-standard) broadcast bearer structure 81 * @bearer: (non-standard) broadcast bearer structure
76 * @media: (non-standard) broadcast media structure 82 * @media: (non-standard) broadcast media structure
77 * @bpairs: array of bearer pairs 83 * @bpairs: array of bearer pairs
78 * @bpairs_temp: array of bearer pairs used during creation of "bpairs" 84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
85 * @remains: temporary node map used by tipc_bcbearer_send()
86 * @remains_new: temporary node map used tipc_bcbearer_send()
87 *
88 * Note: The fields labelled "temporary" are incorporated into the bearer
89 * to avoid consuming potentially limited stack space through the use of
90 * large local variables within multicast routines. Concurrent access is
91 * prevented through use of the spinlock "bc_lock".
79 */ 92 */
80 93
81struct bcbearer { 94struct bcbearer {
@@ -83,6 +96,8 @@ struct bcbearer {
83 struct media media; 96 struct media media;
84 struct bcbearer_pair bpairs[MAX_BEARERS]; 97 struct bcbearer_pair bpairs[MAX_BEARERS];
85 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; 98 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
99 struct node_map remains;
100 struct node_map remains_new;
86}; 101};
87 102
88/** 103/**
@@ -102,7 +117,7 @@ struct bclink {
102static struct bcbearer *bcbearer = NULL; 117static struct bcbearer *bcbearer = NULL;
103static struct bclink *bclink = NULL; 118static struct bclink *bclink = NULL;
104static struct link *bcl = NULL; 119static struct link *bcl = NULL;
105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; 120static DEFINE_SPINLOCK(bc_lock);
106 121
107char tipc_bclink_name[] = "multicast-link"; 122char tipc_bclink_name[] = "multicast-link";
108 123
@@ -165,21 +180,18 @@ static int bclink_ack_allowed(u32 n)
165 * @after: sequence number of last packet to *not* retransmit 180 * @after: sequence number of last packet to *not* retransmit
166 * @to: sequence number of last packet to retransmit 181 * @to: sequence number of last packet to retransmit
167 * 182 *
168 * Called with 'node' locked, bc_lock unlocked 183 * Called with bc_lock locked
169 */ 184 */
170 185
171static void bclink_retransmit_pkt(u32 after, u32 to) 186static void bclink_retransmit_pkt(u32 after, u32 to)
172{ 187{
173 struct sk_buff *buf; 188 struct sk_buff *buf;
174 189
175 spin_lock_bh(&bc_lock);
176 buf = bcl->first_out; 190 buf = bcl->first_out;
177 while (buf && less_eq(buf_seqno(buf), after)) { 191 while (buf && less_eq(buf_seqno(buf), after)) {
178 buf = buf->next; 192 buf = buf->next;
179 } 193 }
180 if (buf != NULL) 194 tipc_link_retransmit(bcl, buf, mod(to - after));
181 tipc_link_retransmit(bcl, buf, mod(to - after));
182 spin_unlock_bh(&bc_lock);
183} 195}
184 196
185/** 197/**
@@ -346,8 +358,10 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
346 for (; buf; buf = buf->next) { 358 for (; buf; buf = buf->next) {
347 u32 seqno = buf_seqno(buf); 359 u32 seqno = buf_seqno(buf);
348 360
349 if (mod(seqno - prev) != 1) 361 if (mod(seqno - prev) != 1) {
350 buf = NULL; 362 buf = NULL;
363 break;
364 }
351 if (seqno == gap_after) 365 if (seqno == gap_after)
352 break; 366 break;
353 prev = seqno; 367 prev = seqno;
@@ -399,7 +413,10 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
399 */ 413 */
400 414
401void tipc_bclink_recv_pkt(struct sk_buff *buf) 415void tipc_bclink_recv_pkt(struct sk_buff *buf)
402{ 416{
417#if (TIPC_BCAST_LOSS_RATE)
418 static int rx_count = 0;
419#endif
403 struct tipc_msg *msg = buf_msg(buf); 420 struct tipc_msg *msg = buf_msg(buf);
404 struct node* node = tipc_node_find(msg_prevnode(msg)); 421 struct node* node = tipc_node_find(msg_prevnode(msg));
405 u32 next_in; 422 u32 next_in;
@@ -420,9 +437,13 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
420 tipc_node_lock(node); 437 tipc_node_lock(node);
421 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 438 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
422 tipc_node_unlock(node); 439 tipc_node_unlock(node);
440 spin_lock_bh(&bc_lock);
423 bcl->stats.recv_nacks++; 441 bcl->stats.recv_nacks++;
442 bcl->owner->next = node; /* remember requestor */
424 bclink_retransmit_pkt(msg_bcgap_after(msg), 443 bclink_retransmit_pkt(msg_bcgap_after(msg),
425 msg_bcgap_to(msg)); 444 msg_bcgap_to(msg));
445 bcl->owner->next = NULL;
446 spin_unlock_bh(&bc_lock);
426 } else { 447 } else {
427 tipc_bclink_peek_nack(msg_destnode(msg), 448 tipc_bclink_peek_nack(msg_destnode(msg),
428 msg_bcast_tag(msg), 449 msg_bcast_tag(msg),
@@ -433,6 +454,14 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
433 return; 454 return;
434 } 455 }
435 456
457#if (TIPC_BCAST_LOSS_RATE)
458 if (++rx_count == TIPC_BCAST_LOSS_RATE) {
459 rx_count = 0;
460 buf_discard(buf);
461 return;
462 }
463#endif
464
436 tipc_node_lock(node); 465 tipc_node_lock(node);
437receive: 466receive:
438 deferred = node->bclink.deferred_head; 467 deferred = node->bclink.deferred_head;
@@ -531,12 +560,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
531{ 560{
532 static int send_count = 0; 561 static int send_count = 0;
533 562
534 struct node_map *remains;
535 struct node_map *remains_new;
536 struct node_map *remains_tmp;
537 int bp_index; 563 int bp_index;
538 int swap_time; 564 int swap_time;
539 int err;
540 565
541 /* Prepare buffer for broadcasting (if first time trying to send it) */ 566 /* Prepare buffer for broadcasting (if first time trying to send it) */
542 567
@@ -557,9 +582,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
557 582
558 /* Send buffer over bearers until all targets reached */ 583 /* Send buffer over bearers until all targets reached */
559 584
560 remains = kmalloc(sizeof(struct node_map), GFP_ATOMIC); 585 bcbearer->remains = tipc_cltr_bcast_nodes;
561 remains_new = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
562 *remains = tipc_cltr_bcast_nodes;
563 586
564 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 587 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
565 struct bearer *p = bcbearer->bpairs[bp_index].primary; 588 struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -568,8 +591,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
568 if (!p) 591 if (!p)
569 break; /* no more bearers to try */ 592 break; /* no more bearers to try */
570 593
571 tipc_nmap_diff(remains, &p->nodes, remains_new); 594 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
572 if (remains_new->count == remains->count) 595 if (bcbearer->remains_new.count == bcbearer->remains.count)
573 continue; /* bearer pair doesn't add anything */ 596 continue; /* bearer pair doesn't add anything */
574 597
575 if (!p->publ.blocked && 598 if (!p->publ.blocked &&
@@ -587,27 +610,17 @@ swap:
587 bcbearer->bpairs[bp_index].primary = s; 610 bcbearer->bpairs[bp_index].primary = s;
588 bcbearer->bpairs[bp_index].secondary = p; 611 bcbearer->bpairs[bp_index].secondary = p;
589update: 612update:
590 if (remains_new->count == 0) { 613 if (bcbearer->remains_new.count == 0)
591 err = TIPC_OK; 614 return TIPC_OK;
592 goto out;
593 }
594 615
595 /* swap map */ 616 bcbearer->remains = bcbearer->remains_new;
596 remains_tmp = remains;
597 remains = remains_new;
598 remains_new = remains_tmp;
599 } 617 }
600 618
601 /* Unable to reach all targets */ 619 /* Unable to reach all targets */
602 620
603 bcbearer->bearer.publ.blocked = 1; 621 bcbearer->bearer.publ.blocked = 1;
604 bcl->stats.bearer_congs++; 622 bcl->stats.bearer_congs++;
605 err = ~TIPC_OK; 623 return ~TIPC_OK;
606
607 out:
608 kfree(remains_new);
609 kfree(remains);
610 return err;
611} 624}
612 625
613/** 626/**
@@ -765,7 +778,7 @@ int tipc_bclink_init(void)
765 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); 778 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
766 if (!bcbearer || !bclink) { 779 if (!bcbearer || !bclink) {
767 nomem: 780 nomem:
768 warn("Memory squeeze; Failed to create multicast link\n"); 781 warn("Multicast link creation failed, no memory\n");
769 kfree(bcbearer); 782 kfree(bcbearer);
770 bcbearer = NULL; 783 bcbearer = NULL;
771 kfree(bclink); 784 kfree(bclink);
@@ -783,7 +796,7 @@ int tipc_bclink_init(void)
783 memset(bclink, 0, sizeof(struct bclink)); 796 memset(bclink, 0, sizeof(struct bclink));
784 INIT_LIST_HEAD(&bcl->waiting_ports); 797 INIT_LIST_HEAD(&bcl->waiting_ports);
785 bcl->next_out_no = 1; 798 bcl->next_out_no = 1;
786 bclink->node.lock = SPIN_LOCK_UNLOCKED; 799 spin_lock_init(&bclink->node.lock);
787 bcl->owner = &bclink->node; 800 bcl->owner = &bclink->node;
788 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 801 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
789 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 802 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 0e3be2ab3307..b243d9d495f0 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -180,7 +180,7 @@ static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
180 if (!item->next) { 180 if (!item->next) {
181 item->next = kmalloc(sizeof(*item), GFP_ATOMIC); 181 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
182 if (!item->next) { 182 if (!item->next) {
183 warn("Memory squeeze: multicast destination port list is incomplete\n"); 183 warn("Incomplete multicast delivery, no memory\n");
184 return; 184 return;
185 } 185 }
186 item->next->next = NULL; 186 item->next->next = NULL;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e213a8e54855..7ef17a449cfd 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -112,39 +112,42 @@ int tipc_register_media(u32 media_type,
112 goto exit; 112 goto exit;
113 113
114 if (!media_name_valid(name)) { 114 if (!media_name_valid(name)) {
115 warn("Media registration error: illegal name <%s>\n", name); 115 warn("Media <%s> rejected, illegal name\n", name);
116 goto exit; 116 goto exit;
117 } 117 }
118 if (!bcast_addr) { 118 if (!bcast_addr) {
119 warn("Media registration error: no broadcast address supplied\n"); 119 warn("Media <%s> rejected, no broadcast address\n", name);
120 goto exit; 120 goto exit;
121 } 121 }
122 if ((bearer_priority < TIPC_MIN_LINK_PRI) && 122 if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
123 (bearer_priority > TIPC_MAX_LINK_PRI)) { 123 (bearer_priority > TIPC_MAX_LINK_PRI)) {
124 warn("Media registration error: priority %u\n", bearer_priority); 124 warn("Media <%s> rejected, illegal priority (%u)\n", name,
125 bearer_priority);
125 goto exit; 126 goto exit;
126 } 127 }
127 if ((link_tolerance < TIPC_MIN_LINK_TOL) || 128 if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
128 (link_tolerance > TIPC_MAX_LINK_TOL)) { 129 (link_tolerance > TIPC_MAX_LINK_TOL)) {
129 warn("Media registration error: tolerance %u\n", link_tolerance); 130 warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
131 link_tolerance);
130 goto exit; 132 goto exit;
131 } 133 }
132 134
133 media_id = media_count++; 135 media_id = media_count++;
134 if (media_id >= MAX_MEDIA) { 136 if (media_id >= MAX_MEDIA) {
135 warn("Attempt to register more than %u media\n", MAX_MEDIA); 137 warn("Media <%s> rejected, media limit reached (%u)\n", name,
138 MAX_MEDIA);
136 media_count--; 139 media_count--;
137 goto exit; 140 goto exit;
138 } 141 }
139 for (i = 0; i < media_id; i++) { 142 for (i = 0; i < media_id; i++) {
140 if (media_list[i].type_id == media_type) { 143 if (media_list[i].type_id == media_type) {
141 warn("Attempt to register second media with type %u\n", 144 warn("Media <%s> rejected, duplicate type (%u)\n", name,
142 media_type); 145 media_type);
143 media_count--; 146 media_count--;
144 goto exit; 147 goto exit;
145 } 148 }
146 if (!strcmp(name, media_list[i].name)) { 149 if (!strcmp(name, media_list[i].name)) {
147 warn("Attempt to re-register media name <%s>\n", name); 150 warn("Media <%s> rejected, duplicate name\n", name);
148 media_count--; 151 media_count--;
149 goto exit; 152 goto exit;
150 } 153 }
@@ -283,6 +286,9 @@ static struct bearer *bearer_find(const char *name)
283 struct bearer *b_ptr; 286 struct bearer *b_ptr;
284 u32 i; 287 u32 i;
285 288
289 if (tipc_mode != TIPC_NET_MODE)
290 return NULL;
291
286 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) { 292 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
287 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name))) 293 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
288 return b_ptr; 294 return b_ptr;
@@ -475,26 +481,33 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
475 u32 i; 481 u32 i;
476 int res = -EINVAL; 482 int res = -EINVAL;
477 483
478 if (tipc_mode != TIPC_NET_MODE) 484 if (tipc_mode != TIPC_NET_MODE) {
485 warn("Bearer <%s> rejected, not supported in standalone mode\n",
486 name);
479 return -ENOPROTOOPT; 487 return -ENOPROTOOPT;
480 488 }
481 if (!bearer_name_validate(name, &b_name) || 489 if (!bearer_name_validate(name, &b_name)) {
482 !tipc_addr_domain_valid(bcast_scope) || 490 warn("Bearer <%s> rejected, illegal name\n", name);
483 !in_scope(bcast_scope, tipc_own_addr))
484 return -EINVAL; 491 return -EINVAL;
485 492 }
493 if (!tipc_addr_domain_valid(bcast_scope) ||
494 !in_scope(bcast_scope, tipc_own_addr)) {
495 warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
496 return -EINVAL;
497 }
486 if ((priority < TIPC_MIN_LINK_PRI || 498 if ((priority < TIPC_MIN_LINK_PRI ||
487 priority > TIPC_MAX_LINK_PRI) && 499 priority > TIPC_MAX_LINK_PRI) &&
488 (priority != TIPC_MEDIA_LINK_PRI)) 500 (priority != TIPC_MEDIA_LINK_PRI)) {
501 warn("Bearer <%s> rejected, illegal priority\n", name);
489 return -EINVAL; 502 return -EINVAL;
503 }
490 504
491 write_lock_bh(&tipc_net_lock); 505 write_lock_bh(&tipc_net_lock);
492 if (!tipc_bearers)
493 goto failed;
494 506
495 m_ptr = media_find(b_name.media_name); 507 m_ptr = media_find(b_name.media_name);
496 if (!m_ptr) { 508 if (!m_ptr) {
497 warn("No media <%s>\n", b_name.media_name); 509 warn("Bearer <%s> rejected, media <%s> not registered\n", name,
510 b_name.media_name);
498 goto failed; 511 goto failed;
499 } 512 }
500 513
@@ -510,23 +523,24 @@ restart:
510 continue; 523 continue;
511 } 524 }
512 if (!strcmp(name, tipc_bearers[i].publ.name)) { 525 if (!strcmp(name, tipc_bearers[i].publ.name)) {
513 warn("Bearer <%s> already enabled\n", name); 526 warn("Bearer <%s> rejected, already enabled\n", name);
514 goto failed; 527 goto failed;
515 } 528 }
516 if ((tipc_bearers[i].priority == priority) && 529 if ((tipc_bearers[i].priority == priority) &&
517 (++with_this_prio > 2)) { 530 (++with_this_prio > 2)) {
518 if (priority-- == 0) { 531 if (priority-- == 0) {
519 warn("Third bearer <%s> with priority %u, unable to lower to %u\n", 532 warn("Bearer <%s> rejected, duplicate priority\n",
520 name, priority + 1, priority); 533 name);
521 goto failed; 534 goto failed;
522 } 535 }
523 warn("Third bearer <%s> with priority %u, lowering to %u\n", 536 warn("Bearer <%s> priority adjustment required %u->%u\n",
524 name, priority + 1, priority); 537 name, priority + 1, priority);
525 goto restart; 538 goto restart;
526 } 539 }
527 } 540 }
528 if (bearer_id >= MAX_BEARERS) { 541 if (bearer_id >= MAX_BEARERS) {
529 warn("Attempt to enable more than %d bearers\n", MAX_BEARERS); 542 warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
543 name, MAX_BEARERS);
530 goto failed; 544 goto failed;
531 } 545 }
532 546
@@ -536,7 +550,7 @@ restart:
536 strcpy(b_ptr->publ.name, name); 550 strcpy(b_ptr->publ.name, name);
537 res = m_ptr->enable_bearer(&b_ptr->publ); 551 res = m_ptr->enable_bearer(&b_ptr->publ);
538 if (res) { 552 if (res) {
539 warn("Failed to enable bearer <%s>\n", name); 553 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
540 goto failed; 554 goto failed;
541 } 555 }
542 556
@@ -552,7 +566,7 @@ restart:
552 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr, 566 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
553 bcast_scope, 2); 567 bcast_scope, 2);
554 } 568 }
555 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED; 569 spin_lock_init(&b_ptr->publ.lock);
556 write_unlock_bh(&tipc_net_lock); 570 write_unlock_bh(&tipc_net_lock);
557 info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 571 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
558 name, addr_string_fill(addr_string, bcast_scope), priority); 572 name, addr_string_fill(addr_string, bcast_scope), priority);
@@ -573,9 +587,6 @@ int tipc_block_bearer(const char *name)
573 struct link *l_ptr; 587 struct link *l_ptr;
574 struct link *temp_l_ptr; 588 struct link *temp_l_ptr;
575 589
576 if (tipc_mode != TIPC_NET_MODE)
577 return -ENOPROTOOPT;
578
579 read_lock_bh(&tipc_net_lock); 590 read_lock_bh(&tipc_net_lock);
580 b_ptr = bearer_find(name); 591 b_ptr = bearer_find(name);
581 if (!b_ptr) { 592 if (!b_ptr) {
@@ -584,6 +595,7 @@ int tipc_block_bearer(const char *name)
584 return -EINVAL; 595 return -EINVAL;
585 } 596 }
586 597
598 info("Blocking bearer <%s>\n", name);
587 spin_lock_bh(&b_ptr->publ.lock); 599 spin_lock_bh(&b_ptr->publ.lock);
588 b_ptr->publ.blocked = 1; 600 b_ptr->publ.blocked = 1;
589 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 601 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -595,7 +607,6 @@ int tipc_block_bearer(const char *name)
595 } 607 }
596 spin_unlock_bh(&b_ptr->publ.lock); 608 spin_unlock_bh(&b_ptr->publ.lock);
597 read_unlock_bh(&tipc_net_lock); 609 read_unlock_bh(&tipc_net_lock);
598 info("Blocked bearer <%s>\n", name);
599 return TIPC_OK; 610 return TIPC_OK;
600} 611}
601 612
@@ -611,15 +622,13 @@ static int bearer_disable(const char *name)
611 struct link *l_ptr; 622 struct link *l_ptr;
612 struct link *temp_l_ptr; 623 struct link *temp_l_ptr;
613 624
614 if (tipc_mode != TIPC_NET_MODE)
615 return -ENOPROTOOPT;
616
617 b_ptr = bearer_find(name); 625 b_ptr = bearer_find(name);
618 if (!b_ptr) { 626 if (!b_ptr) {
619 warn("Attempt to disable unknown bearer <%s>\n", name); 627 warn("Attempt to disable unknown bearer <%s>\n", name);
620 return -EINVAL; 628 return -EINVAL;
621 } 629 }
622 630
631 info("Disabling bearer <%s>\n", name);
623 tipc_disc_stop_link_req(b_ptr->link_req); 632 tipc_disc_stop_link_req(b_ptr->link_req);
624 spin_lock_bh(&b_ptr->publ.lock); 633 spin_lock_bh(&b_ptr->publ.lock);
625 b_ptr->link_req = NULL; 634 b_ptr->link_req = NULL;
@@ -635,7 +644,6 @@ static int bearer_disable(const char *name)
635 tipc_link_delete(l_ptr); 644 tipc_link_delete(l_ptr);
636 } 645 }
637 spin_unlock_bh(&b_ptr->publ.lock); 646 spin_unlock_bh(&b_ptr->publ.lock);
638 info("Disabled bearer <%s>\n", name);
639 memset(b_ptr, 0, sizeof(struct bearer)); 647 memset(b_ptr, 0, sizeof(struct bearer));
640 return TIPC_OK; 648 return TIPC_OK;
641} 649}
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1aed81584e96..1dcb6940e338 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -60,8 +60,10 @@ struct cluster *tipc_cltr_create(u32 addr)
60 int alloc; 60 int alloc;
61 61
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL) 63 if (c_ptr == NULL) {
64 warn("Cluster creation failure, no memory\n");
64 return NULL; 65 return NULL;
66 }
65 memset(c_ptr, 0, sizeof(*c_ptr)); 67 memset(c_ptr, 0, sizeof(*c_ptr));
66 68
67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 69 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
@@ -70,30 +72,32 @@ struct cluster *tipc_cltr_create(u32 addr)
70 else 72 else
71 max_nodes = tipc_max_nodes + 1; 73 max_nodes = tipc_max_nodes + 1;
72 alloc = sizeof(void *) * (max_nodes + 1); 74 alloc = sizeof(void *) * (max_nodes + 1);
75
73 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 76 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
74 if (c_ptr->nodes == NULL) { 77 if (c_ptr->nodes == NULL) {
78 warn("Cluster creation failure, no memory for node area\n");
75 kfree(c_ptr); 79 kfree(c_ptr);
76 return NULL; 80 return NULL;
77 } 81 }
78 memset(c_ptr->nodes, 0, alloc); 82 memset(c_ptr->nodes, 0, alloc);
83
79 if (in_own_cluster(addr)) 84 if (in_own_cluster(addr))
80 tipc_local_nodes = c_ptr->nodes; 85 tipc_local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1; 86 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0; 87 c_ptr->highest_node = 0;
83 88
84 z_ptr = tipc_zone_find(tipc_zone(addr)); 89 z_ptr = tipc_zone_find(tipc_zone(addr));
85 if (z_ptr == NULL) { 90 if (!z_ptr) {
86 z_ptr = tipc_zone_create(addr); 91 z_ptr = tipc_zone_create(addr);
87 } 92 }
88 if (z_ptr != NULL) { 93 if (!z_ptr) {
89 tipc_zone_attach_cluster(z_ptr, c_ptr); 94 kfree(c_ptr->nodes);
90 c_ptr->owner = z_ptr;
91 }
92 else {
93 kfree(c_ptr); 95 kfree(c_ptr);
94 c_ptr = NULL; 96 return NULL;
95 } 97 }
96 98
99 tipc_zone_attach_cluster(z_ptr, c_ptr);
100 c_ptr->owner = z_ptr;
97 return c_ptr; 101 return c_ptr;
98} 102}
99 103
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 48b5de2dbe60..285e1bc2d880 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -63,7 +63,7 @@ struct manager {
63 63
64static struct manager mng = { 0}; 64static struct manager mng = { 0};
65 65
66static spinlock_t config_lock = SPIN_LOCK_UNLOCKED; 66static DEFINE_SPINLOCK(config_lock);
67 67
68static const void *req_tlv_area; /* request message TLV area */ 68static const void *req_tlv_area; /* request message TLV area */
69static int req_tlv_space; /* request message TLV area size */ 69static int req_tlv_space; /* request message TLV area size */
@@ -291,13 +291,22 @@ static struct sk_buff *cfg_set_own_addr(void)
291 if (!tipc_addr_node_valid(addr)) 291 if (!tipc_addr_node_valid(addr))
292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
293 " (node address)"); 293 " (node address)");
294 if (tipc_own_addr) 294 if (tipc_mode == TIPC_NET_MODE)
295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
296 " (cannot change node address once assigned)"); 296 " (cannot change node address once assigned)");
297 tipc_own_addr = addr;
298
299 /*
300 * Must release all spinlocks before calling start_net() because
301 * Linux version of TIPC calls eth_media_start() which calls
302 * register_netdevice_notifier() which may block!
303 *
304 * Temporarily releasing the lock should be harmless for non-Linux TIPC,
305 * but Linux version of eth_media_start() should really be reworked
306 * so that it can be called with spinlocks held.
307 */
297 308
298 spin_unlock_bh(&config_lock); 309 spin_unlock_bh(&config_lock);
299 tipc_core_stop_net();
300 tipc_own_addr = addr;
301 tipc_core_start_net(); 310 tipc_core_start_net();
302 spin_lock_bh(&config_lock); 311 spin_lock_bh(&config_lock);
303 return tipc_cfg_reply_none(); 312 return tipc_cfg_reply_none();
@@ -350,50 +359,21 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
350 359
351static struct sk_buff *cfg_set_max_ports(void) 360static struct sk_buff *cfg_set_max_ports(void)
352{ 361{
353 int orig_mode;
354 u32 value; 362 u32 value;
355 363
356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 364 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
357 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 365 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
358 value = *(u32 *)TLV_DATA(req_tlv_area); 366 value = *(u32 *)TLV_DATA(req_tlv_area);
359 value = ntohl(value); 367 value = ntohl(value);
368 if (value == tipc_max_ports)
369 return tipc_cfg_reply_none();
360 if (value != delimit(value, 127, 65535)) 370 if (value != delimit(value, 127, 65535))
361 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 371 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
362 " (max ports must be 127-65535)"); 372 " (max ports must be 127-65535)");
363 373 if (tipc_mode != TIPC_NOT_RUNNING)
364 if (value == tipc_max_ports)
365 return tipc_cfg_reply_none();
366
367 if (atomic_read(&tipc_user_count) > 2)
368 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 374 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
369 " (cannot change max ports while TIPC users exist)"); 375 " (cannot change max ports while TIPC is active)");
370
371 spin_unlock_bh(&config_lock);
372 orig_mode = tipc_get_mode();
373 if (orig_mode == TIPC_NET_MODE)
374 tipc_core_stop_net();
375 tipc_core_stop();
376 tipc_max_ports = value; 376 tipc_max_ports = value;
377 tipc_core_start();
378 if (orig_mode == TIPC_NET_MODE)
379 tipc_core_start_net();
380 spin_lock_bh(&config_lock);
381 return tipc_cfg_reply_none();
382}
383
384static struct sk_buff *set_net_max(int value, int *parameter)
385{
386 int orig_mode;
387
388 if (value != *parameter) {
389 orig_mode = tipc_get_mode();
390 if (orig_mode == TIPC_NET_MODE)
391 tipc_core_stop_net();
392 *parameter = value;
393 if (orig_mode == TIPC_NET_MODE)
394 tipc_core_start_net();
395 }
396
397 return tipc_cfg_reply_none(); 377 return tipc_cfg_reply_none();
398} 378}
399 379
@@ -405,10 +385,16 @@ static struct sk_buff *cfg_set_max_zones(void)
405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 385 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
406 value = *(u32 *)TLV_DATA(req_tlv_area); 386 value = *(u32 *)TLV_DATA(req_tlv_area);
407 value = ntohl(value); 387 value = ntohl(value);
388 if (value == tipc_max_zones)
389 return tipc_cfg_reply_none();
408 if (value != delimit(value, 1, 255)) 390 if (value != delimit(value, 1, 255))
409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 391 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
410 " (max zones must be 1-255)"); 392 " (max zones must be 1-255)");
411 return set_net_max(value, &tipc_max_zones); 393 if (tipc_mode == TIPC_NET_MODE)
394 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
395 " (cannot change max zones once TIPC has joined a network)");
396 tipc_max_zones = value;
397 return tipc_cfg_reply_none();
412} 398}
413 399
414static struct sk_buff *cfg_set_max_clusters(void) 400static struct sk_buff *cfg_set_max_clusters(void)
@@ -419,8 +405,8 @@ static struct sk_buff *cfg_set_max_clusters(void)
419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
420 value = *(u32 *)TLV_DATA(req_tlv_area); 406 value = *(u32 *)TLV_DATA(req_tlv_area);
421 value = ntohl(value); 407 value = ntohl(value);
422 if (value != 1) 408 if (value != delimit(value, 1, 1))
423 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
424 " (max clusters fixed at 1)"); 410 " (max clusters fixed at 1)");
425 return tipc_cfg_reply_none(); 411 return tipc_cfg_reply_none();
426} 412}
@@ -433,10 +419,16 @@ static struct sk_buff *cfg_set_max_nodes(void)
433 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
434 value = *(u32 *)TLV_DATA(req_tlv_area); 420 value = *(u32 *)TLV_DATA(req_tlv_area);
435 value = ntohl(value); 421 value = ntohl(value);
422 if (value == tipc_max_nodes)
423 return tipc_cfg_reply_none();
436 if (value != delimit(value, 8, 2047)) 424 if (value != delimit(value, 8, 2047))
437 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 425 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
438 " (max nodes must be 8-2047)"); 426 " (max nodes must be 8-2047)");
439 return set_net_max(value, &tipc_max_nodes); 427 if (tipc_mode == TIPC_NET_MODE)
428 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
429 " (cannot change max nodes once TIPC has joined a network)");
430 tipc_max_nodes = value;
431 return tipc_cfg_reply_none();
440} 432}
441 433
442static struct sk_buff *cfg_set_max_slaves(void) 434static struct sk_buff *cfg_set_max_slaves(void)
@@ -461,15 +453,16 @@ static struct sk_buff *cfg_set_netid(void)
461 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 453 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
462 value = *(u32 *)TLV_DATA(req_tlv_area); 454 value = *(u32 *)TLV_DATA(req_tlv_area);
463 value = ntohl(value); 455 value = ntohl(value);
456 if (value == tipc_net_id)
457 return tipc_cfg_reply_none();
464 if (value != delimit(value, 1, 9999)) 458 if (value != delimit(value, 1, 9999))
465 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 459 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
466 " (network id must be 1-9999)"); 460 " (network id must be 1-9999)");
467 461 if (tipc_mode == TIPC_NET_MODE)
468 if (tipc_own_addr)
469 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 462 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
470 " (cannot change network id once part of network)"); 463 " (cannot change network id once TIPC has joined a network)");
471 464 tipc_net_id = value;
472 return set_net_max(value, &tipc_net_id); 465 return tipc_cfg_reply_none();
473} 466}
474 467
475struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area, 468struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
@@ -649,7 +642,7 @@ static void cfg_named_msg_event(void *userdata,
649 if ((size < sizeof(*req_hdr)) || 642 if ((size < sizeof(*req_hdr)) ||
650 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || 643 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
651 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) { 644 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
652 warn("discarded invalid configuration message\n"); 645 warn("Invalid configuration message discarded\n");
653 return; 646 return;
654 } 647 }
655 648
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3d0a8ee4e1d3..5003acb15919 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -2,7 +2,7 @@
2 * net/tipc/core.c: TIPC module code 2 * net/tipc/core.c: TIPC module code
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@ void tipc_socket_stop(void);
57int tipc_netlink_start(void); 57int tipc_netlink_start(void);
58void tipc_netlink_stop(void); 58void tipc_netlink_stop(void);
59 59
60#define MOD_NAME "tipc_start: " 60#define TIPC_MOD_VER "1.6.1"
61 61
62#ifndef CONFIG_TIPC_ZONES 62#ifndef CONFIG_TIPC_ZONES
63#define CONFIG_TIPC_ZONES 3 63#define CONFIG_TIPC_ZONES 3
@@ -198,7 +198,7 @@ static int __init tipc_init(void)
198 tipc_max_publications = 10000; 198 tipc_max_publications = 10000;
199 tipc_max_subscriptions = 2000; 199 tipc_max_subscriptions = 2000;
200 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536); 200 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
201 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511); 201 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255);
202 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1); 202 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
203 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047); 203 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
@@ -224,6 +224,7 @@ module_exit(tipc_exit);
224 224
225MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication"); 225MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
226MODULE_LICENSE("Dual BSD/GPL"); 226MODULE_LICENSE("Dual BSD/GPL");
227MODULE_VERSION(TIPC_MOD_VER);
227 228
228/* Native TIPC API for kernel-space applications (see tipc.h) */ 229/* Native TIPC API for kernel-space applications (see tipc.h) */
229 230
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1f2e8b27a13f..86f54f3512f1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
2 * net/tipc/core.h: Include file for TIPC global declarations 2 * net/tipc/core.h: Include file for TIPC global declarations
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -111,10 +111,6 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
111 111
112#else 112#else
113 113
114#ifndef DBG_OUTPUT
115#define DBG_OUTPUT NULL
116#endif
117
118/* 114/*
119 * TIPC debug support not included: 115 * TIPC debug support not included:
120 * - system messages are printed to system console 116 * - system messages are printed to system console
@@ -129,6 +125,19 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
129#define msg_dbg(msg,txt) do {} while (0) 125#define msg_dbg(msg,txt) do {} while (0)
130#define dump(fmt,arg...) do {} while (0) 126#define dump(fmt,arg...) do {} while (0)
131 127
128
129/*
130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is
131 * the null print buffer. Thes ensures that any system or debug messages
132 * that are generated without using the above macros are handled correctly.
133 */
134
135#undef TIPC_OUTPUT
136#define TIPC_OUTPUT TIPC_CONS
137
138#undef DBG_OUTPUT
139#define DBG_OUTPUT NULL
140
132#endif 141#endif
133 142
134 143
@@ -309,7 +318,7 @@ static inline struct sk_buff *buf_acquire(u32 size)
309 * buf_discard - frees a TIPC message buffer 318 * buf_discard - frees a TIPC message buffer
310 * @skb: message buffer 319 * @skb: message buffer
311 * 320 *
312 * Frees a new buffer. If passed NULL, just returns. 321 * Frees a message buffer. If passed NULL, just returns.
313 */ 322 */
314 323
315static inline void buf_discard(struct sk_buff *skb) 324static inline void buf_discard(struct sk_buff *skb)
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 26ef95d5fe38..55130655e1ed 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -41,7 +41,7 @@
41#define MAX_STRING 512 41#define MAX_STRING 512
42 42
43static char print_string[MAX_STRING]; 43static char print_string[MAX_STRING];
44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED; 44static DEFINE_SPINLOCK(print_lock);
45 45
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 46static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *TIPC_CONS = &cons_buf; 47struct print_buf *TIPC_CONS = &cons_buf;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 92601385e5f5..2b8441203120 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -2,7 +2,7 @@
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -176,7 +176,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
176 n_ptr = tipc_node_create(orig); 176 n_ptr = tipc_node_create(orig);
177 } 177 }
178 if (n_ptr == NULL) { 178 if (n_ptr == NULL) {
179 warn("Memory squeeze; Failed to create node\n");
180 return; 179 return;
181 } 180 }
182 spin_lock_bh(&n_ptr->lock); 181 spin_lock_bh(&n_ptr->lock);
@@ -191,10 +190,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
191 } 190 }
192 addr = &link->media_addr; 191 addr = &link->media_addr;
193 if (memcmp(addr, &media_addr, sizeof(*addr))) { 192 if (memcmp(addr, &media_addr, sizeof(*addr))) {
194 char addr_string[16]; 193 warn("Resetting link <%s>, peer interface address changed\n",
195 194 link->name);
196 warn("New bearer address for %s\n",
197 addr_string_fill(addr_string, orig));
198 memcpy(addr, &media_addr, sizeof(*addr)); 195 memcpy(addr, &media_addr, sizeof(*addr));
199 tipc_link_reset(link); 196 tipc_link_reset(link);
200 } 197 }
@@ -270,8 +267,8 @@ static void disc_timeout(struct link_req *req)
270 /* leave timer interval "as is" if already at a "normal" rate */ 267 /* leave timer interval "as is" if already at a "normal" rate */
271 } else { 268 } else {
272 req->timer_intv *= 2; 269 req->timer_intv *= 2;
273 if (req->timer_intv > TIPC_LINK_REQ_SLOW) 270 if (req->timer_intv > TIPC_LINK_REQ_FAST)
274 req->timer_intv = TIPC_LINK_REQ_SLOW; 271 req->timer_intv = TIPC_LINK_REQ_FAST;
275 if ((req->timer_intv == TIPC_LINK_REQ_FAST) && 272 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
276 (req->bearer->nodes.count)) 273 (req->bearer->nodes.count))
277 req->timer_intv = TIPC_LINK_REQ_SLOW; 274 req->timer_intv = TIPC_LINK_REQ_SLOW;
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 7a252785f727..682da4a28041 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -98,17 +98,19 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
98 u32 size; 98 u32 size;
99 99
100 if (likely(eb_ptr->bearer)) { 100 if (likely(eb_ptr->bearer)) {
101 size = msg_size((struct tipc_msg *)buf->data); 101 if (likely(!dev->promiscuity) ||
102 skb_trim(buf, size); 102 !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
103 if (likely(buf->len == size)) { 103 !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
104 buf->next = NULL; 104 size = msg_size((struct tipc_msg *)buf->data);
105 tipc_recv_msg(buf, eb_ptr->bearer); 105 skb_trim(buf, size);
106 } else { 106 if (likely(buf->len == size)) {
107 kfree_skb(buf); 107 buf->next = NULL;
108 tipc_recv_msg(buf, eb_ptr->bearer);
109 return TIPC_OK;
110 }
108 } 111 }
109 } else {
110 kfree_skb(buf);
111 } 112 }
113 kfree_skb(buf);
112 return TIPC_OK; 114 return TIPC_OK;
113} 115}
114 116
@@ -125,8 +127,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
125 127
126 /* Find device with specified name */ 128 /* Find device with specified name */
127 129
128 while (dev && dev->name && 130 while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
129 (memcmp(dev->name, driver_name, strlen(dev->name)))) {
130 dev = dev->next; 131 dev = dev->next;
131 } 132 }
132 if (!dev) 133 if (!dev)
@@ -252,7 +253,9 @@ int tipc_eth_media_start(void)
252 if (eth_started) 253 if (eth_started)
253 return -EINVAL; 254 return -EINVAL;
254 255
255 memset(&bcast_addr, 0xff, sizeof(bcast_addr)); 256 bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
257 memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
258
256 memset(eth_bearers, 0, sizeof(eth_bearers)); 259 memset(eth_bearers, 0, sizeof(eth_bearers));
257 260
258 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth", 261 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 966f70a1b608..ae6ddf00a1aa 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -44,7 +44,7 @@ struct queue_item {
44 44
45static kmem_cache_t *tipc_queue_item_cache; 45static kmem_cache_t *tipc_queue_item_cache;
46static struct list_head signal_queue_head; 46static struct list_head signal_queue_head;
47static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED; 47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled = 0; 48static int handler_enabled = 0;
49 49
50static void process_signal_queue(unsigned long dummy); 50static void process_signal_queue(unsigned long dummy);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 784b24b6d102..d64658053746 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -419,7 +419,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
419 419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) { 421 if (!l_ptr) {
422 warn("Memory squeeze; Failed to create link\n"); 422 warn("Link creation failed, no memory\n");
423 return NULL; 423 return NULL;
424 } 424 }
425 memset(l_ptr, 0, sizeof(*l_ptr)); 425 memset(l_ptr, 0, sizeof(*l_ptr));
@@ -469,7 +469,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
469 469
470 if (!pb) { 470 if (!pb) {
471 kfree(l_ptr); 471 kfree(l_ptr);
472 warn("Memory squeeze; Failed to create link\n"); 472 warn("Link creation failed, no memory for print buffer\n");
473 return NULL; 473 return NULL;
474 } 474 }
475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE); 475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
@@ -574,7 +574,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
574 break; 574 break;
575 list_del_init(&p_ptr->wait_list); 575 list_del_init(&p_ptr->wait_list);
576 p_ptr->congested_link = NULL; 576 p_ptr->congested_link = NULL;
577 assert(p_ptr->wakeup);
578 spin_lock_bh(p_ptr->publ.lock); 577 spin_lock_bh(p_ptr->publ.lock);
579 p_ptr->publ.congested = 0; 578 p_ptr->publ.congested = 0;
580 p_ptr->wakeup(&p_ptr->publ); 579 p_ptr->wakeup(&p_ptr->publ);
@@ -691,6 +690,7 @@ void tipc_link_reset(struct link *l_ptr)
691 struct sk_buff *buf; 690 struct sk_buff *buf;
692 u32 prev_state = l_ptr->state; 691 u32 prev_state = l_ptr->state;
693 u32 checkpoint = l_ptr->next_in_no; 692 u32 checkpoint = l_ptr->next_in_no;
693 int was_active_link = tipc_link_is_active(l_ptr);
694 694
695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); 695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
696 696
@@ -712,7 +712,7 @@ void tipc_link_reset(struct link *l_ptr)
712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name); 712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
713 dbg_link_dump(); 713 dbg_link_dump();
714#endif 714#endif
715 if (tipc_node_has_active_links(l_ptr->owner) && 715 if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) { 716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint; 717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER; 718 l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -755,7 +755,7 @@ void tipc_link_reset(struct link *l_ptr)
755 755
756static void link_activate(struct link *l_ptr) 756static void link_activate(struct link *l_ptr)
757{ 757{
758 l_ptr->next_in_no = 1; 758 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
759 tipc_node_link_up(l_ptr->owner, l_ptr); 759 tipc_node_link_up(l_ptr->owner, l_ptr);
760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(tipc_cfg_link_event, l_ptr, 1); 761 link_send_event(tipc_cfg_link_event, l_ptr, 1);
@@ -820,6 +820,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
820 break; 820 break;
821 case RESET_MSG: 821 case RESET_MSG:
822 dbg_link("RES -> RR\n"); 822 dbg_link("RES -> RR\n");
823 info("Resetting link <%s>, requested by peer\n",
824 l_ptr->name);
823 tipc_link_reset(l_ptr); 825 tipc_link_reset(l_ptr);
824 l_ptr->state = RESET_RESET; 826 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0; 827 l_ptr->fsm_msg_cnt = 0;
@@ -844,6 +846,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
844 break; 846 break;
845 case RESET_MSG: 847 case RESET_MSG:
846 dbg_link("RES -> RR\n"); 848 dbg_link("RES -> RR\n");
849 info("Resetting link <%s>, requested by peer "
850 "while probing\n", l_ptr->name);
847 tipc_link_reset(l_ptr); 851 tipc_link_reset(l_ptr);
848 l_ptr->state = RESET_RESET; 852 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0; 853 l_ptr->fsm_msg_cnt = 0;
@@ -875,6 +879,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
875 } else { /* Link has failed */ 879 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n", 880 dbg_link("-> RU (%u probes unanswered)\n",
877 l_ptr->fsm_msg_cnt); 881 l_ptr->fsm_msg_cnt);
882 warn("Resetting link <%s>, peer not responding\n",
883 l_ptr->name);
878 tipc_link_reset(l_ptr); 884 tipc_link_reset(l_ptr);
879 l_ptr->state = RESET_UNKNOWN; 885 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0; 886 l_ptr->fsm_msg_cnt = 0;
@@ -1050,7 +1056,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1050 msg_dbg(msg, "TIPC: Congestion, throwing away\n"); 1056 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1051 buf_discard(buf); 1057 buf_discard(buf);
1052 if (imp > CONN_MANAGER) { 1058 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name); 1059 warn("Resetting link <%s>, send queue full", l_ptr->name);
1054 tipc_link_reset(l_ptr); 1060 tipc_link_reset(l_ptr);
1055 } 1061 }
1056 return dsz; 1062 return dsz;
@@ -1135,9 +1141,13 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1135 if (n_ptr) { 1141 if (n_ptr) {
1136 tipc_node_lock(n_ptr); 1142 tipc_node_lock(n_ptr);
1137 l_ptr = n_ptr->active_links[selector & 1]; 1143 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1139 if (l_ptr) { 1144 if (l_ptr) {
1145 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1140 res = tipc_link_send_buf(l_ptr, buf); 1146 res = tipc_link_send_buf(l_ptr, buf);
1147 } else {
1148 dbg("Attempt to send msg to unreachable node:\n");
1149 msg_dbg(buf_msg(buf),">>>");
1150 buf_discard(buf);
1141 } 1151 }
1142 tipc_node_unlock(n_ptr); 1152 tipc_node_unlock(n_ptr);
1143 } else { 1153 } else {
@@ -1242,8 +1252,6 @@ int tipc_link_send_sections_fast(struct port *sender,
1242 int res; 1252 int res;
1243 u32 selector = msg_origport(hdr) & 1; 1253 u32 selector = msg_origport(hdr) & 1;
1244 1254
1245 assert(destaddr != tipc_own_addr);
1246
1247again: 1255again:
1248 /* 1256 /*
1249 * Try building message using port's max_pkt hint. 1257 * Try building message using port's max_pkt hint.
@@ -1604,40 +1612,121 @@ void tipc_link_push_queue(struct link *l_ptr)
1604 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1612 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1605} 1613}
1606 1614
1615static void link_reset_all(unsigned long addr)
1616{
1617 struct node *n_ptr;
1618 char addr_string[16];
1619 u32 i;
1620
1621 read_lock_bh(&tipc_net_lock);
1622 n_ptr = tipc_node_find((u32)addr);
1623 if (!n_ptr) {
1624 read_unlock_bh(&tipc_net_lock);
1625 return; /* node no longer exists */
1626 }
1627
1628 tipc_node_lock(n_ptr);
1629
1630 warn("Resetting all links to %s\n",
1631 addr_string_fill(addr_string, n_ptr->addr));
1632
1633 for (i = 0; i < MAX_BEARERS; i++) {
1634 if (n_ptr->links[i]) {
1635 link_print(n_ptr->links[i], TIPC_OUTPUT,
1636 "Resetting link\n");
1637 tipc_link_reset(n_ptr->links[i]);
1638 }
1639 }
1640
1641 tipc_node_unlock(n_ptr);
1642 read_unlock_bh(&tipc_net_lock);
1643}
1644
1645static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1646{
1647 struct tipc_msg *msg = buf_msg(buf);
1648
1649 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1650 tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>");
1651
1652 if (l_ptr->addr) {
1653
1654 /* Handle failure on standard link */
1655
1656 link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
1657 tipc_link_reset(l_ptr);
1658
1659 } else {
1660
1661 /* Handle failure on broadcast link */
1662
1663 struct node *n_ptr;
1664 char addr_string[16];
1665
1666 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
1667 tipc_printf(TIPC_OUTPUT, "Outstanding acks: %u\n", (u32)TIPC_SKB_CB(buf)->handle);
1668
1669 n_ptr = l_ptr->owner->next;
1670 tipc_node_lock(n_ptr);
1671
1672 addr_string_fill(addr_string, n_ptr->addr);
1673 tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
1674 tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
1675 tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
1676 tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
1677 tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
1678 tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
1679 tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1680
1681 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1682
1683 tipc_node_unlock(n_ptr);
1684
1685 l_ptr->stale_count = 0;
1686 }
1687}
1688
1607void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, 1689void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1608 u32 retransmits) 1690 u32 retransmits)
1609{ 1691{
1610 struct tipc_msg *msg; 1692 struct tipc_msg *msg;
1611 1693
1694 if (!buf)
1695 return;
1696
1697 msg = buf_msg(buf);
1698
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); 1699 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613 1700
1614 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) { 1701 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>"); 1702 if (!skb_cloned(buf)) {
1616 dbg_print_link(l_ptr, " "); 1703 msg_dbg(msg, ">NO_RETR->BCONG>");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); 1704 dbg_print_link(l_ptr, " ");
1618 l_ptr->retransm_queue_size = retransmits; 1705 l_ptr->retransm_queue_head = msg_seqno(msg);
1619 return; 1706 l_ptr->retransm_queue_size = retransmits;
1707 return;
1708 } else {
1709 /* Don't retransmit if driver already has the buffer */
1710 }
1711 } else {
1712 /* Detect repeated retransmit failures on uncongested bearer */
1713
1714 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1715 if (++l_ptr->stale_count > 100) {
1716 link_retransmit_failure(l_ptr, buf);
1717 return;
1718 }
1719 } else {
1720 l_ptr->last_retransmitted = msg_seqno(msg);
1721 l_ptr->stale_count = 1;
1722 }
1620 } 1723 }
1724
1621 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { 1725 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1622 msg = buf_msg(buf); 1726 msg = buf_msg(buf);
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1727 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1728 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1729 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) {
1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count);
1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");
1633 tipc_link_reset(l_ptr);
1634 break;
1635 }
1636 } else {
1637 l_ptr->stale_count = 0;
1638 }
1639 l_ptr->last_retransmitted = msg_seqno(msg);
1640
1641 msg_dbg(buf_msg(buf), ">RETR>"); 1730 msg_dbg(buf_msg(buf), ">RETR>");
1642 buf = buf->next; 1731 buf = buf->next;
1643 retransmits--; 1732 retransmits--;
@@ -1650,6 +1739,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1650 return; 1739 return;
1651 } 1740 }
1652 } 1741 }
1742
1653 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1743 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1654} 1744}
1655 1745
@@ -1720,6 +1810,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1720 link_recv_non_seq(buf); 1810 link_recv_non_seq(buf);
1721 continue; 1811 continue;
1722 } 1812 }
1813
1814 if (unlikely(!msg_short(msg) &&
1815 (msg_destnode(msg) != tipc_own_addr)))
1816 goto cont;
1817
1723 n_ptr = tipc_node_find(msg_prevnode(msg)); 1818 n_ptr = tipc_node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr)) 1819 if (unlikely(!n_ptr))
1725 goto cont; 1820 goto cont;
@@ -2140,7 +2235,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2140 2235
2141 if (msg_linkprio(msg) && 2236 if (msg_linkprio(msg) &&
2142 (msg_linkprio(msg) != l_ptr->priority)) { 2237 (msg_linkprio(msg) != l_ptr->priority)) {
2143 warn("Changing prio <%s>: %u->%u\n", 2238 warn("Resetting link <%s>, priority change %u->%u\n",
2144 l_ptr->name, l_ptr->priority, msg_linkprio(msg)); 2239 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2145 l_ptr->priority = msg_linkprio(msg); 2240 l_ptr->priority = msg_linkprio(msg);
2146 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 2241 tipc_link_reset(l_ptr); /* Enforce change to take effect */
@@ -2209,17 +2304,22 @@ void tipc_link_tunnel(struct link *l_ptr,
2209 u32 length = msg_size(msg); 2304 u32 length = msg_size(msg);
2210 2305
2211 tunnel = l_ptr->owner->active_links[selector & 1]; 2306 tunnel = l_ptr->owner->active_links[selector & 1];
2212 if (!tipc_link_is_up(tunnel)) 2307 if (!tipc_link_is_up(tunnel)) {
2308 warn("Link changeover error, "
2309 "tunnel link no longer available\n");
2213 return; 2310 return;
2311 }
2214 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2312 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2215 buf = buf_acquire(length + INT_H_SIZE); 2313 buf = buf_acquire(length + INT_H_SIZE);
2216 if (!buf) 2314 if (!buf) {
2315 warn("Link changeover error, "
2316 "unable to send tunnel msg\n");
2217 return; 2317 return;
2318 }
2218 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE); 2319 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2219 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length); 2320 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2220 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); 2321 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2221 msg_dbg(buf_msg(buf), ">SEND>"); 2322 msg_dbg(buf_msg(buf), ">SEND>");
2222 assert(tunnel);
2223 tipc_link_send_buf(tunnel, buf); 2323 tipc_link_send_buf(tunnel, buf);
2224} 2324}
2225 2325
@@ -2235,23 +2335,27 @@ void tipc_link_changeover(struct link *l_ptr)
2235 u32 msgcount = l_ptr->out_queue_size; 2335 u32 msgcount = l_ptr->out_queue_size;
2236 struct sk_buff *crs = l_ptr->first_out; 2336 struct sk_buff *crs = l_ptr->first_out;
2237 struct link *tunnel = l_ptr->owner->active_links[0]; 2337 struct link *tunnel = l_ptr->owner->active_links[0];
2238 int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
2239 struct tipc_msg tunnel_hdr; 2338 struct tipc_msg tunnel_hdr;
2339 int split_bundles;
2240 2340
2241 if (!tunnel) 2341 if (!tunnel)
2242 return; 2342 return;
2243 2343
2244 if (!l_ptr->owner->permit_changeover) 2344 if (!l_ptr->owner->permit_changeover) {
2345 warn("Link changeover error, "
2346 "peer did not permit changeover\n");
2245 return; 2347 return;
2348 }
2246 2349
2247 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2350 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2248 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2351 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2249 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2352 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2250 msg_set_msgcnt(&tunnel_hdr, msgcount); 2353 msg_set_msgcnt(&tunnel_hdr, msgcount);
2354 dbg("Link changeover requires %u tunnel messages\n", msgcount);
2355
2251 if (!l_ptr->first_out) { 2356 if (!l_ptr->first_out) {
2252 struct sk_buff *buf; 2357 struct sk_buff *buf;
2253 2358
2254 assert(!msgcount);
2255 buf = buf_acquire(INT_H_SIZE); 2359 buf = buf_acquire(INT_H_SIZE);
2256 if (buf) { 2360 if (buf) {
2257 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE); 2361 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@@ -2261,10 +2365,15 @@ void tipc_link_changeover(struct link *l_ptr)
2261 msg_dbg(&tunnel_hdr, "EMPTY>SEND>"); 2365 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2262 tipc_link_send_buf(tunnel, buf); 2366 tipc_link_send_buf(tunnel, buf);
2263 } else { 2367 } else {
2264 warn("Memory squeeze; link changeover failed\n"); 2368 warn("Link changeover error, "
2369 "unable to send changeover msg\n");
2265 } 2370 }
2266 return; 2371 return;
2267 } 2372 }
2373
2374 split_bundles = (l_ptr->owner->active_links[0] !=
2375 l_ptr->owner->active_links[1]);
2376
2268 while (crs) { 2377 while (crs) {
2269 struct tipc_msg *msg = buf_msg(crs); 2378 struct tipc_msg *msg = buf_msg(crs);
2270 2379
@@ -2310,7 +2419,8 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2310 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2419 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2311 outbuf = buf_acquire(length + INT_H_SIZE); 2420 outbuf = buf_acquire(length + INT_H_SIZE);
2312 if (outbuf == NULL) { 2421 if (outbuf == NULL) {
2313 warn("Memory squeeze; buffer duplication failed\n"); 2422 warn("Link changeover error, "
2423 "unable to send duplicate msg\n");
2314 return; 2424 return;
2315 } 2425 }
2316 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE); 2426 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@@ -2364,11 +2474,15 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2364 u32 msg_count = msg_msgcnt(tunnel_msg); 2474 u32 msg_count = msg_msgcnt(tunnel_msg);
2365 2475
2366 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)]; 2476 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2367 assert(dest_link != *l_ptr);
2368 if (!dest_link) { 2477 if (!dest_link) {
2369 msg_dbg(tunnel_msg, "NOLINK/<REC<"); 2478 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2370 goto exit; 2479 goto exit;
2371 } 2480 }
2481 if (dest_link == *l_ptr) {
2482 err("Unexpected changeover message on link <%s>\n",
2483 (*l_ptr)->name);
2484 goto exit;
2485 }
2372 dbg("%c<-%c:", dest_link->b_ptr->net_plane, 2486 dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2373 (*l_ptr)->b_ptr->net_plane); 2487 (*l_ptr)->b_ptr->net_plane);
2374 *l_ptr = dest_link; 2488 *l_ptr = dest_link;
@@ -2381,7 +2495,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2381 } 2495 }
2382 *buf = buf_extract(tunnel_buf,INT_H_SIZE); 2496 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2383 if (*buf == NULL) { 2497 if (*buf == NULL) {
2384 warn("Memory squeeze; failed to extract msg\n"); 2498 warn("Link changeover error, duplicate msg dropped\n");
2385 goto exit; 2499 goto exit;
2386 } 2500 }
2387 msg_dbg(tunnel_msg, "TNL<REC<"); 2501 msg_dbg(tunnel_msg, "TNL<REC<");
@@ -2393,13 +2507,17 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2393 2507
2394 if (tipc_link_is_up(dest_link)) { 2508 if (tipc_link_is_up(dest_link)) {
2395 msg_dbg(tunnel_msg, "UP/FIRST/<REC<"); 2509 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2510 info("Resetting link <%s>, changeover initiated by peer\n",
2511 dest_link->name);
2396 tipc_link_reset(dest_link); 2512 tipc_link_reset(dest_link);
2397 dest_link->exp_msg_count = msg_count; 2513 dest_link->exp_msg_count = msg_count;
2514 dbg("Expecting %u tunnelled messages\n", msg_count);
2398 if (!msg_count) 2515 if (!msg_count)
2399 goto exit; 2516 goto exit;
2400 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2517 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2401 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<"); 2518 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2402 dest_link->exp_msg_count = msg_count; 2519 dest_link->exp_msg_count = msg_count;
2520 dbg("Expecting %u tunnelled messages\n", msg_count);
2403 if (!msg_count) 2521 if (!msg_count)
2404 goto exit; 2522 goto exit;
2405 } 2523 }
@@ -2407,6 +2525,8 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2407 /* Receive original message */ 2525 /* Receive original message */
2408 2526
2409 if (dest_link->exp_msg_count == 0) { 2527 if (dest_link->exp_msg_count == 0) {
2528 warn("Link switchover error, "
2529 "got too many tunnelled messages\n");
2410 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<"); 2530 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2411 dbg_print_link(dest_link, "LINK:"); 2531 dbg_print_link(dest_link, "LINK:");
2412 goto exit; 2532 goto exit;
@@ -2422,7 +2542,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2422 buf_discard(tunnel_buf); 2542 buf_discard(tunnel_buf);
2423 return 1; 2543 return 1;
2424 } else { 2544 } else {
2425 warn("Memory squeeze; dropped incoming msg\n"); 2545 warn("Link changeover error, original msg dropped\n");
2426 } 2546 }
2427 } 2547 }
2428exit: 2548exit:
@@ -2444,13 +2564,8 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2444 while (msgcount--) { 2564 while (msgcount--) {
2445 obuf = buf_extract(buf, pos); 2565 obuf = buf_extract(buf, pos);
2446 if (obuf == NULL) { 2566 if (obuf == NULL) {
2447 char addr_string[16]; 2567 warn("Link unable to unbundle message(s)\n");
2448 2568 break;
2449 warn("Buffer allocation failure;\n");
2450 warn(" incoming message(s) from %s lost\n",
2451 addr_string_fill(addr_string,
2452 msg_orignode(buf_msg(buf))));
2453 return;
2454 }; 2569 };
2455 pos += align(msg_size(buf_msg(obuf))); 2570 pos += align(msg_size(buf_msg(obuf)));
2456 msg_dbg(buf_msg(obuf), " /"); 2571 msg_dbg(buf_msg(obuf), " /");
@@ -2508,7 +2623,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2508 } 2623 }
2509 fragm = buf_acquire(fragm_sz + INT_H_SIZE); 2624 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2510 if (fragm == NULL) { 2625 if (fragm == NULL) {
2511 warn("Memory squeeze; failed to fragment msg\n"); 2626 warn("Link unable to fragment message\n");
2512 dsz = -ENOMEM; 2627 dsz = -ENOMEM;
2513 goto exit; 2628 goto exit;
2514 } 2629 }
@@ -2623,7 +2738,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2623 set_fragm_size(pbuf,fragm_sz); 2738 set_fragm_size(pbuf,fragm_sz);
2624 set_expected_frags(pbuf,exp_fragm_cnt - 1); 2739 set_expected_frags(pbuf,exp_fragm_cnt - 1);
2625 } else { 2740 } else {
2626 warn("Memory squeeze; got no defragmenting buffer\n"); 2741 warn("Link unable to reassemble fragmented message\n");
2627 } 2742 }
2628 buf_discard(fbuf); 2743 buf_discard(fbuf);
2629 return 0; 2744 return 0;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a3bbc891f959..f0b063bcc2a9 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -127,7 +127,7 @@ void tipc_named_publish(struct publication *publ)
127 127
128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
129 if (!buf) { 129 if (!buf) {
130 warn("Memory squeeze; failed to distribute publication\n"); 130 warn("Publication distribution failure\n");
131 return; 131 return;
132 } 132 }
133 133
@@ -151,7 +151,7 @@ void tipc_named_withdraw(struct publication *publ)
151 151
152 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 152 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
153 if (!buf) { 153 if (!buf) {
154 warn("Memory squeeze; failed to distribute withdrawal\n"); 154 warn("Withdrawl distribution failure\n");
155 return; 155 return;
156 } 156 }
157 157
@@ -174,7 +174,6 @@ void tipc_named_node_up(unsigned long node)
174 u32 rest; 174 u32 rest;
175 u32 max_item_buf; 175 u32 max_item_buf;
176 176
177 assert(in_own_cluster(node));
178 read_lock_bh(&tipc_nametbl_lock); 177 read_lock_bh(&tipc_nametbl_lock);
179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE; 178 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180 max_item_buf *= ITEM_SIZE; 179 max_item_buf *= ITEM_SIZE;
@@ -185,8 +184,8 @@ void tipc_named_node_up(unsigned long node)
185 left = (rest <= max_item_buf) ? rest : max_item_buf; 184 left = (rest <= max_item_buf) ? rest : max_item_buf;
186 rest -= left; 185 rest -= left;
187 buf = named_prepare_buf(PUBLICATION, left, node); 186 buf = named_prepare_buf(PUBLICATION, left, node);
188 if (buf == NULL) { 187 if (!buf) {
189 warn("Memory Squeeze; could not send publication\n"); 188 warn("Bulk publication distribution failure\n");
190 goto exit; 189 goto exit;
191 } 190 }
192 item = (struct distr_item *)msg_data(buf_msg(buf)); 191 item = (struct distr_item *)msg_data(buf_msg(buf));
@@ -221,15 +220,24 @@ exit:
221static void node_is_down(struct publication *publ) 220static void node_is_down(struct publication *publ)
222{ 221{
223 struct publication *p; 222 struct publication *p;
223
224 write_lock_bh(&tipc_nametbl_lock); 224 write_lock_bh(&tipc_nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n", 225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper); 226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345; 227 publ->key += 1222345;
228 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 228 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key); 229 publ->node, publ->ref, publ->key);
230 assert(p == publ);
231 write_unlock_bh(&tipc_nametbl_lock); 230 write_unlock_bh(&tipc_nametbl_lock);
232 kfree(publ); 231
232 if (p != publ) {
233 err("Unable to remove publication from failed node\n"
234 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
235 publ->type, publ->lower, publ->node, publ->ref, publ->key);
236 }
237
238 if (p) {
239 kfree(p);
240 }
233} 241}
234 242
235/** 243/**
@@ -275,9 +283,15 @@ void tipc_named_recv(struct sk_buff *buf)
275 if (publ) { 283 if (publ) {
276 tipc_nodesub_unsubscribe(&publ->subscr); 284 tipc_nodesub_unsubscribe(&publ->subscr);
277 kfree(publ); 285 kfree(publ);
286 } else {
287 err("Unable to remove publication by node 0x%x\n"
288 "(type=%u, lower=%u, ref=%u, key=%u)\n",
289 msg_orignode(msg),
290 ntohl(item->type), ntohl(item->lower),
291 ntohl(item->ref), ntohl(item->key));
278 } 292 }
279 } else { 293 } else {
280 warn("tipc_named_recv: unknown msg\n"); 294 warn("Unrecognized name table message received\n");
281 } 295 }
282 item++; 296 item++;
283 } 297 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d129422fc5c2..a6926ff07bcc 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -71,7 +71,7 @@ struct sub_seq {
71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; 71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
72 * sub-sequences are sorted in ascending order 72 * sub-sequences are sorted in ascending order
73 * @alloc: number of sub-sequences currently in array 73 * @alloc: number of sub-sequences currently in array
74 * @first_free: upper bound of highest sub-sequence + 1 74 * @first_free: array index of first unused sub-sequence entry
75 * @ns_list: links to adjacent name sequences in hash chain 75 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type' 76 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure 77 * @lock: spinlock controlling access to name sequence structure
@@ -101,7 +101,7 @@ struct name_table {
101 101
102static struct name_table table = { NULL } ; 102static struct name_table table = { NULL } ;
103static atomic_t rsv_publ_ok = ATOMIC_INIT(0); 103static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
104rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED; 104DEFINE_RWLOCK(tipc_nametbl_lock);
105 105
106 106
107static int hash(int x) 107static int hash(int x)
@@ -120,7 +120,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
120 struct publication *publ = 120 struct publication *publ =
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC); 121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) { 122 if (publ == NULL) {
123 warn("Memory squeeze; failed to create publication\n"); 123 warn("Publication creation failure, no memory\n");
124 return NULL; 124 return NULL;
125 } 125 }
126 126
@@ -165,17 +165,17 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
165 struct sub_seq *sseq = tipc_subseq_alloc(1); 165 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 166
167 if (!nseq || !sseq) { 167 if (!nseq || !sseq) {
168 warn("Memory squeeze; failed to create name sequence\n"); 168 warn("Name sequence creation failed, no memory\n");
169 kfree(nseq); 169 kfree(nseq);
170 kfree(sseq); 170 kfree(sseq);
171 return NULL; 171 return NULL;
172 } 172 }
173 173
174 memset(nseq, 0, sizeof(*nseq)); 174 memset(nseq, 0, sizeof(*nseq));
175 nseq->lock = SPIN_LOCK_UNLOCKED; 175 spin_lock_init(&nseq->lock);
176 nseq->type = type; 176 nseq->type = type;
177 nseq->sseqs = sseq; 177 nseq->sseqs = sseq;
178 dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n", 178 dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
179 nseq, type, nseq->sseqs, nseq->first_free); 179 nseq, type, nseq->sseqs, nseq->first_free);
180 nseq->alloc = 1; 180 nseq->alloc = 1;
181 INIT_HLIST_NODE(&nseq->ns_list); 181 INIT_HLIST_NODE(&nseq->ns_list);
@@ -253,16 +253,16 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
253 struct sub_seq *sseq; 253 struct sub_seq *sseq;
254 int created_subseq = 0; 254 int created_subseq = 0;
255 255
256 assert(nseq->first_free <= nseq->alloc);
257 sseq = nameseq_find_subseq(nseq, lower); 256 sseq = nameseq_find_subseq(nseq, lower);
258 dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n", 257 dbg("nameseq_ins: for seq %p, {%u,%u}, found sseq %p\n",
259 nseq, type, lower, sseq); 258 nseq, type, lower, sseq);
260 if (sseq) { 259 if (sseq) {
261 260
262 /* Lower end overlaps existing entry => need an exact match */ 261 /* Lower end overlaps existing entry => need an exact match */
263 262
264 if ((sseq->lower != lower) || (sseq->upper != upper)) { 263 if ((sseq->lower != lower) || (sseq->upper != upper)) {
265 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper); 264 warn("Cannot publish {%u,%u,%u}, overlap error\n",
265 type, lower, upper);
266 return NULL; 266 return NULL;
267 } 267 }
268 } else { 268 } else {
@@ -277,25 +277,27 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
277 277
278 if ((inspos < nseq->first_free) && 278 if ((inspos < nseq->first_free) &&
279 (upper >= nseq->sseqs[inspos].lower)) { 279 (upper >= nseq->sseqs[inspos].lower)) {
280 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper); 280 warn("Cannot publish {%u,%u,%u}, overlap error\n",
281 type, lower, upper);
281 return NULL; 282 return NULL;
282 } 283 }
283 284
284 /* Ensure there is space for new sub-sequence */ 285 /* Ensure there is space for new sub-sequence */
285 286
286 if (nseq->first_free == nseq->alloc) { 287 if (nseq->first_free == nseq->alloc) {
287 struct sub_seq *sseqs = nseq->sseqs; 288 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
288 nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2); 289
289 if (nseq->sseqs != NULL) { 290 if (!sseqs) {
290 memcpy(nseq->sseqs, sseqs, 291 warn("Cannot publish {%u,%u,%u}, no memory\n",
291 nseq->alloc * sizeof (struct sub_seq)); 292 type, lower, upper);
292 kfree(sseqs);
293 dbg("Allocated %u sseqs\n", nseq->alloc);
294 nseq->alloc *= 2;
295 } else {
296 warn("Memory squeeze; failed to create sub-sequence\n");
297 return NULL; 293 return NULL;
298 } 294 }
295 dbg("Allocated %u more sseqs\n", nseq->alloc);
296 memcpy(sseqs, nseq->sseqs,
297 nseq->alloc * sizeof(struct sub_seq));
298 kfree(nseq->sseqs);
299 nseq->sseqs = sseqs;
300 nseq->alloc *= 2;
299 } 301 }
300 dbg("Have %u sseqs for type %u\n", nseq->alloc, type); 302 dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
301 303
@@ -311,7 +313,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
311 sseq->upper = upper; 313 sseq->upper = upper;
312 created_subseq = 1; 314 created_subseq = 1;
313 } 315 }
314 dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n", 316 dbg("inserting {%u,%u,%u} from <0x%x:%u> into sseq %p(%u,%u) of seq %p\n",
315 type, lower, upper, node, port, sseq, 317 type, lower, upper, node, port, sseq,
316 sseq->lower, sseq->upper, nseq); 318 sseq->lower, sseq->upper, nseq);
317 319
@@ -320,7 +322,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
320 publ = publ_create(type, lower, upper, scope, node, port, key); 322 publ = publ_create(type, lower, upper, scope, node, port, key);
321 if (!publ) 323 if (!publ)
322 return NULL; 324 return NULL;
323 dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n", 325 dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
324 publ, node, publ->node, publ->subscr.node); 326 publ, node, publ->node, publ->subscr.node);
325 327
326 if (!sseq->zone_list) 328 if (!sseq->zone_list)
@@ -367,45 +369,47 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
367 369
368/** 370/**
369 * tipc_nameseq_remove_publ - 371 * tipc_nameseq_remove_publ -
372 *
373 * NOTE: There may be cases where TIPC is asked to remove a publication
374 * that is not in the name table. For example, if another node issues a
375 * publication for a name sequence that overlaps an existing name sequence
376 * the publication will not be recorded, which means the publication won't
377 * be found when the name sequence is later withdrawn by that node.
378 * A failed withdraw request simply returns a failure indication and lets the
379 * caller issue any error or warning messages associated with such a problem.
370 */ 380 */
371 381
372static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, 382static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
373 u32 node, u32 ref, u32 key) 383 u32 node, u32 ref, u32 key)
374{ 384{
375 struct publication *publ; 385 struct publication *publ;
386 struct publication *curr;
376 struct publication *prev; 387 struct publication *prev;
377 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); 388 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
378 struct sub_seq *free; 389 struct sub_seq *free;
379 struct subscription *s, *st; 390 struct subscription *s, *st;
380 int removed_subseq = 0; 391 int removed_subseq = 0;
381 392
382 assert(nseq); 393 if (!sseq)
383
384 if (!sseq) {
385 int i;
386
387 warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
388 assert(nseq->sseqs);
389 dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
390 nseq->sseqs, nseq, nseq->alloc,
391 nseq->first_free);
392 for (i = 0; i < nseq->first_free; i++) {
393 dbg("Subseq %u(%x): lower = %u,upper = %u\n",
394 i, &nseq->sseqs[i], nseq->sseqs[i].lower,
395 nseq->sseqs[i].upper);
396 }
397 return NULL; 394 return NULL;
398 } 395
399 dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n", 396 dbg("tipc_nameseq_remove_publ: seq: %p, sseq %p, {%u,%u}, key %u\n",
400 nseq, sseq, nseq->type, inst, key); 397 nseq, sseq, nseq->type, inst, key);
401 398
399 /* Remove publication from zone scope list */
400
402 prev = sseq->zone_list; 401 prev = sseq->zone_list;
403 publ = sseq->zone_list->zone_list_next; 402 publ = sseq->zone_list->zone_list_next;
404 while ((publ->key != key) || (publ->ref != ref) || 403 while ((publ->key != key) || (publ->ref != ref) ||
405 (publ->node && (publ->node != node))) { 404 (publ->node && (publ->node != node))) {
406 prev = publ; 405 prev = publ;
407 publ = publ->zone_list_next; 406 publ = publ->zone_list_next;
408 assert(prev != sseq->zone_list); 407 if (prev == sseq->zone_list) {
408
409 /* Prevent endless loop if publication not found */
410
411 return NULL;
412 }
409 } 413 }
410 if (publ != sseq->zone_list) 414 if (publ != sseq->zone_list)
411 prev->zone_list_next = publ->zone_list_next; 415 prev->zone_list_next = publ->zone_list_next;
@@ -416,14 +420,24 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
416 sseq->zone_list = NULL; 420 sseq->zone_list = NULL;
417 } 421 }
418 422
423 /* Remove publication from cluster scope list, if present */
424
419 if (in_own_cluster(node)) { 425 if (in_own_cluster(node)) {
420 prev = sseq->cluster_list; 426 prev = sseq->cluster_list;
421 publ = sseq->cluster_list->cluster_list_next; 427 curr = sseq->cluster_list->cluster_list_next;
422 while ((publ->key != key) || (publ->ref != ref) || 428 while (curr != publ) {
423 (publ->node && (publ->node != node))) { 429 prev = curr;
424 prev = publ; 430 curr = curr->cluster_list_next;
425 publ = publ->cluster_list_next; 431 if (prev == sseq->cluster_list) {
426 assert(prev != sseq->cluster_list); 432
433 /* Prevent endless loop for malformed list */
434
435 err("Unable to de-list cluster publication\n"
436 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
437 publ->type, publ->lower, publ->node,
438 publ->ref, publ->key);
439 goto end_cluster;
440 }
427 } 441 }
428 if (publ != sseq->cluster_list) 442 if (publ != sseq->cluster_list)
429 prev->cluster_list_next = publ->cluster_list_next; 443 prev->cluster_list_next = publ->cluster_list_next;
@@ -434,15 +448,26 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
434 sseq->cluster_list = NULL; 448 sseq->cluster_list = NULL;
435 } 449 }
436 } 450 }
451end_cluster:
452
453 /* Remove publication from node scope list, if present */
437 454
438 if (node == tipc_own_addr) { 455 if (node == tipc_own_addr) {
439 prev = sseq->node_list; 456 prev = sseq->node_list;
440 publ = sseq->node_list->node_list_next; 457 curr = sseq->node_list->node_list_next;
441 while ((publ->key != key) || (publ->ref != ref) || 458 while (curr != publ) {
442 (publ->node && (publ->node != node))) { 459 prev = curr;
443 prev = publ; 460 curr = curr->node_list_next;
444 publ = publ->node_list_next; 461 if (prev == sseq->node_list) {
445 assert(prev != sseq->node_list); 462
463 /* Prevent endless loop for malformed list */
464
465 err("Unable to de-list node publication\n"
466 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
467 publ->type, publ->lower, publ->node,
468 publ->ref, publ->key);
469 goto end_node;
470 }
446 } 471 }
447 if (publ != sseq->node_list) 472 if (publ != sseq->node_list)
448 prev->node_list_next = publ->node_list_next; 473 prev->node_list_next = publ->node_list_next;
@@ -453,22 +478,18 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
453 sseq->node_list = NULL; 478 sseq->node_list = NULL;
454 } 479 }
455 } 480 }
456 assert(!publ->node || (publ->node == node)); 481end_node:
457 assert(publ->ref == ref);
458 assert(publ->key == key);
459 482
460 /* 483 /* Contract subseq list if no more publications for that subseq */
461 * Contract subseq list if no more publications: 484
462 */ 485 if (!sseq->zone_list) {
463 if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
464 free = &nseq->sseqs[nseq->first_free--]; 486 free = &nseq->sseqs[nseq->first_free--];
465 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq)); 487 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
466 removed_subseq = 1; 488 removed_subseq = 1;
467 } 489 }
468 490
469 /* 491 /* Notify any waiting subscriptions */
470 * Any subscriptions waiting ? 492
471 */
472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 493 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
473 tipc_subscr_report_overlap(s, 494 tipc_subscr_report_overlap(s,
474 publ->lower, 495 publ->lower,
@@ -478,6 +499,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
478 publ->node, 499 publ->node,
479 removed_subseq); 500 removed_subseq);
480 } 501 }
502
481 return publ; 503 return publ;
482} 504}
483 505
@@ -530,7 +552,7 @@ static struct name_seq *nametbl_find_seq(u32 type)
530 seq_head = &table.types[hash(type)]; 552 seq_head = &table.types[hash(type)];
531 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { 553 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
532 if (ns->type == type) { 554 if (ns->type == type) {
533 dbg("found %x\n", ns); 555 dbg("found %p\n", ns);
534 return ns; 556 return ns;
535 } 557 }
536 } 558 }
@@ -543,22 +565,21 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
543{ 565{
544 struct name_seq *seq = nametbl_find_seq(type); 566 struct name_seq *seq = nametbl_find_seq(type);
545 567
546 dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq); 568 dbg("tipc_nametbl_insert_publ: {%u,%u,%u} found %p\n", type, lower, upper, seq);
547 if (lower > upper) { 569 if (lower > upper) {
548 warn("Failed to publish illegal <%u,%u,%u>\n", 570 warn("Failed to publish illegal {%u,%u,%u}\n",
549 type, lower, upper); 571 type, lower, upper);
550 return NULL; 572 return NULL;
551 } 573 }
552 574
553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node); 575 dbg("Publishing {%u,%u,%u} from 0x%x\n", type, lower, upper, node);
554 if (!seq) { 576 if (!seq) {
555 seq = tipc_nameseq_create(type, &table.types[hash(type)]); 577 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
556 dbg("tipc_nametbl_insert_publ: created %x\n", seq); 578 dbg("tipc_nametbl_insert_publ: created %p\n", seq);
557 } 579 }
558 if (!seq) 580 if (!seq)
559 return NULL; 581 return NULL;
560 582
561 assert(seq->type == type);
562 return tipc_nameseq_insert_publ(seq, type, lower, upper, 583 return tipc_nameseq_insert_publ(seq, type, lower, upper,
563 scope, node, port, key); 584 scope, node, port, key);
564} 585}
@@ -572,7 +593,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
572 if (!seq) 593 if (!seq)
573 return NULL; 594 return NULL;
574 595
575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node); 596 dbg("Withdrawing {%u,%u} from 0x%x\n", type, lower, node);
576 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); 597 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
577 598
578 if (!seq->first_free && list_empty(&seq->subscriptions)) { 599 if (!seq->first_free && list_empty(&seq->subscriptions)) {
@@ -738,12 +759,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
738 struct publication *publ; 759 struct publication *publ;
739 760
740 if (table.local_publ_count >= tipc_max_publications) { 761 if (table.local_publ_count >= tipc_max_publications) {
741 warn("Failed publish: max %u local publication\n", 762 warn("Publication failed, local publication limit reached (%u)\n",
742 tipc_max_publications); 763 tipc_max_publications);
743 return NULL; 764 return NULL;
744 } 765 }
745 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) { 766 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
746 warn("Failed to publish reserved name <%u,%u,%u>\n", 767 warn("Publication failed, reserved name {%u,%u,%u}\n",
747 type, lower, upper); 768 type, lower, upper);
748 return NULL; 769 return NULL;
749 } 770 }
@@ -767,10 +788,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
767{ 788{
768 struct publication *publ; 789 struct publication *publ;
769 790
770 dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key); 791 dbg("tipc_nametbl_withdraw: {%u,%u}, key=%u\n", type, lower, key);
771 write_lock_bh(&tipc_nametbl_lock); 792 write_lock_bh(&tipc_nametbl_lock);
772 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 793 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
773 if (publ) { 794 if (likely(publ)) {
774 table.local_publ_count--; 795 table.local_publ_count--;
775 if (publ->scope != TIPC_NODE_SCOPE) 796 if (publ->scope != TIPC_NODE_SCOPE)
776 tipc_named_withdraw(publ); 797 tipc_named_withdraw(publ);
@@ -780,6 +801,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
780 return 1; 801 return 1;
781 } 802 }
782 write_unlock_bh(&tipc_nametbl_lock); 803 write_unlock_bh(&tipc_nametbl_lock);
804 err("Unable to remove local publication\n"
805 "(type=%u, lower=%u, ref=%u, key=%u)\n",
806 type, lower, ref, key);
783 return 0; 807 return 0;
784} 808}
785 809
@@ -787,8 +811,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
787 * tipc_nametbl_subscribe - add a subscription object to the name table 811 * tipc_nametbl_subscribe - add a subscription object to the name table
788 */ 812 */
789 813
790void 814void tipc_nametbl_subscribe(struct subscription *s)
791tipc_nametbl_subscribe(struct subscription *s)
792{ 815{
793 u32 type = s->seq.type; 816 u32 type = s->seq.type;
794 struct name_seq *seq; 817 struct name_seq *seq;
@@ -800,11 +823,13 @@ tipc_nametbl_subscribe(struct subscription *s)
800 } 823 }
801 if (seq){ 824 if (seq){
802 spin_lock_bh(&seq->lock); 825 spin_lock_bh(&seq->lock);
803 dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n", 826 dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
804 seq, type, s->seq.lower, s->seq.upper); 827 seq, type, s->seq.lower, s->seq.upper);
805 assert(seq->type == type);
806 tipc_nameseq_subscribe(seq, s); 828 tipc_nameseq_subscribe(seq, s);
807 spin_unlock_bh(&seq->lock); 829 spin_unlock_bh(&seq->lock);
830 } else {
831 warn("Failed to create subscription for {%u,%u,%u}\n",
832 s->seq.type, s->seq.lower, s->seq.upper);
808 } 833 }
809 write_unlock_bh(&tipc_nametbl_lock); 834 write_unlock_bh(&tipc_nametbl_lock);
810} 835}
@@ -813,8 +838,7 @@ tipc_nametbl_subscribe(struct subscription *s)
813 * tipc_nametbl_unsubscribe - remove a subscription object from name table 838 * tipc_nametbl_unsubscribe - remove a subscription object from name table
814 */ 839 */
815 840
816void 841void tipc_nametbl_unsubscribe(struct subscription *s)
817tipc_nametbl_unsubscribe(struct subscription *s)
818{ 842{
819 struct name_seq *seq; 843 struct name_seq *seq;
820 844
@@ -1049,35 +1073,20 @@ int tipc_nametbl_init(void)
1049 1073
1050void tipc_nametbl_stop(void) 1074void tipc_nametbl_stop(void)
1051{ 1075{
1052 struct hlist_head *seq_head;
1053 struct hlist_node *seq_node;
1054 struct hlist_node *tmp;
1055 struct name_seq *seq;
1056 u32 i; 1076 u32 i;
1057 1077
1058 if (!table.types) 1078 if (!table.types)
1059 return; 1079 return;
1060 1080
1081 /* Verify name table is empty, then release it */
1082
1061 write_lock_bh(&tipc_nametbl_lock); 1083 write_lock_bh(&tipc_nametbl_lock);
1062 for (i = 0; i < tipc_nametbl_size; i++) { 1084 for (i = 0; i < tipc_nametbl_size; i++) {
1063 seq_head = &table.types[i]; 1085 if (!hlist_empty(&table.types[i]))
1064 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) { 1086 err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
1065 struct sub_seq *sseq = seq->sseqs;
1066
1067 for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
1068 struct publication *publ = sseq->zone_list;
1069 assert(publ);
1070 do {
1071 struct publication *next =
1072 publ->zone_list_next;
1073 kfree(publ);
1074 publ = next;
1075 }
1076 while (publ != sseq->zone_list);
1077 }
1078 }
1079 } 1087 }
1080 kfree(table.types); 1088 kfree(table.types);
1081 table.types = NULL; 1089 table.types = NULL;
1082 write_unlock_bh(&tipc_nametbl_lock); 1090 write_unlock_bh(&tipc_nametbl_lock);
1083} 1091}
1092
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f7c8223ddf7d..e5a359ab4930 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -115,7 +115,7 @@
115 * - A local spin_lock protecting the queue of subscriber events. 115 * - A local spin_lock protecting the queue of subscriber events.
116*/ 116*/
117 117
118rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED; 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct network tipc_net = { NULL };
120 120
121struct node *tipc_net_select_remote_node(u32 addr, u32 ref) 121struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0d5db06e203f..861322b935da 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -61,34 +61,37 @@ struct node *tipc_node_create(u32 addr)
61 struct node **curr_node; 61 struct node **curr_node;
62 62
63 n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC); 63 n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (n_ptr != NULL) { 64 if (!n_ptr) {
65 memset(n_ptr, 0, sizeof(*n_ptr)); 65 warn("Node creation failed, no memory\n");
66 n_ptr->addr = addr; 66 return NULL;
67 n_ptr->lock = SPIN_LOCK_UNLOCKED; 67 }
68 INIT_LIST_HEAD(&n_ptr->nsub); 68
69 69 c_ptr = tipc_cltr_find(addr);
70 c_ptr = tipc_cltr_find(addr); 70 if (!c_ptr) {
71 if (c_ptr == NULL) 71 c_ptr = tipc_cltr_create(addr);
72 c_ptr = tipc_cltr_create(addr); 72 }
73 if (c_ptr != NULL) { 73 if (!c_ptr) {
74 n_ptr->owner = c_ptr; 74 kfree(n_ptr);
75 tipc_cltr_attach_node(c_ptr, n_ptr); 75 return NULL;
76 n_ptr->last_router = -1; 76 }
77 77
78 /* Insert node into ordered list */ 78 memset(n_ptr, 0, sizeof(*n_ptr));
79 for (curr_node = &tipc_nodes; *curr_node; 79 n_ptr->addr = addr;
80 curr_node = &(*curr_node)->next) { 80 spin_lock_init(&n_ptr->lock);
81 if (addr < (*curr_node)->addr) { 81 INIT_LIST_HEAD(&n_ptr->nsub);
82 n_ptr->next = *curr_node; 82 n_ptr->owner = c_ptr;
83 break; 83 tipc_cltr_attach_node(c_ptr, n_ptr);
84 } 84 n_ptr->last_router = -1;
85 } 85
86 (*curr_node) = n_ptr; 86 /* Insert node into ordered list */
87 } else { 87 for (curr_node = &tipc_nodes; *curr_node;
88 kfree(n_ptr); 88 curr_node = &(*curr_node)->next) {
89 n_ptr = NULL; 89 if (addr < (*curr_node)->addr) {
90 } 90 n_ptr->next = *curr_node;
91 } 91 break;
92 }
93 }
94 (*curr_node) = n_ptr;
92 return n_ptr; 95 return n_ptr;
93} 96}
94 97
@@ -122,6 +125,8 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
122{ 125{
123 struct link **active = &n_ptr->active_links[0]; 126 struct link **active = &n_ptr->active_links[0];
124 127
128 n_ptr->working_links++;
129
125 info("Established link <%s> on network plane %c\n", 130 info("Established link <%s> on network plane %c\n",
126 l_ptr->name, l_ptr->b_ptr->net_plane); 131 l_ptr->name, l_ptr->b_ptr->net_plane);
127 132
@@ -132,7 +137,7 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
132 return; 137 return;
133 } 138 }
134 if (l_ptr->priority < active[0]->priority) { 139 if (l_ptr->priority < active[0]->priority) {
135 info("Link is standby\n"); 140 info("New link <%s> becomes standby\n", l_ptr->name);
136 return; 141 return;
137 } 142 }
138 tipc_link_send_duplicate(active[0], l_ptr); 143 tipc_link_send_duplicate(active[0], l_ptr);
@@ -140,8 +145,9 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
140 active[0] = l_ptr; 145 active[0] = l_ptr;
141 return; 146 return;
142 } 147 }
143 info("Link <%s> on network plane %c becomes standby\n", 148 info("Old link <%s> becomes standby\n", active[0]->name);
144 active[0]->name, active[0]->b_ptr->net_plane); 149 if (active[1] != active[0])
150 info("Old link <%s> becomes standby\n", active[1]->name);
145 active[0] = active[1] = l_ptr; 151 active[0] = active[1] = l_ptr;
146} 152}
147 153
@@ -181,6 +187,8 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
181{ 187{
182 struct link **active; 188 struct link **active;
183 189
190 n_ptr->working_links--;
191
184 if (!tipc_link_is_active(l_ptr)) { 192 if (!tipc_link_is_active(l_ptr)) {
185 info("Lost standby link <%s> on network plane %c\n", 193 info("Lost standby link <%s> on network plane %c\n",
186 l_ptr->name, l_ptr->b_ptr->net_plane); 194 l_ptr->name, l_ptr->b_ptr->net_plane);
@@ -210,8 +218,7 @@ int tipc_node_has_active_links(struct node *n_ptr)
210 218
211int tipc_node_has_redundant_links(struct node *n_ptr) 219int tipc_node_has_redundant_links(struct node *n_ptr)
212{ 220{
213 return (tipc_node_has_active_links(n_ptr) && 221 return (n_ptr->working_links > 1);
214 (n_ptr->active_links[0] != n_ptr->active_links[1]));
215} 222}
216 223
217static int tipc_node_has_active_routes(struct node *n_ptr) 224static int tipc_node_has_active_routes(struct node *n_ptr)
@@ -234,7 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
234 u32 bearer_id = l_ptr->b_ptr->identity; 241 u32 bearer_id = l_ptr->b_ptr->identity;
235 char addr_string[16]; 242 char addr_string[16];
236 243
237 assert(bearer_id < MAX_BEARERS);
238 if (n_ptr->link_cnt >= 2) { 244 if (n_ptr->link_cnt >= 2) {
239 char addr_string[16]; 245 char addr_string[16];
240 246
@@ -249,7 +255,7 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
249 n_ptr->link_cnt++; 255 n_ptr->link_cnt++;
250 return n_ptr; 256 return n_ptr;
251 } 257 }
252 err("Attempt to establish second link on <%s> to <%s> \n", 258 err("Attempt to establish second link on <%s> to %s \n",
253 l_ptr->b_ptr->publ.name, 259 l_ptr->b_ptr->publ.name,
254 addr_string_fill(addr_string, l_ptr->addr)); 260 addr_string_fill(addr_string, l_ptr->addr));
255 } 261 }
@@ -314,7 +320,7 @@ static void node_established_contact(struct node *n_ptr)
314 struct cluster *c_ptr; 320 struct cluster *c_ptr;
315 321
316 dbg("node_established_contact:-> %x\n", n_ptr->addr); 322 dbg("node_established_contact:-> %x\n", n_ptr->addr);
317 if (!tipc_node_has_active_routes(n_ptr)) { 323 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
318 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 324 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
319 } 325 }
320 326
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 781126e084ae..a07cc79ea637 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -51,6 +51,7 @@
51 * @nsub: list of "node down" subscriptions monitoring node 51 * @nsub: list of "node down" subscriptions monitoring node
52 * @active_links: pointers to active links to node 52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node 53 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby)
54 * @link_cnt: number of links to node 55 * @link_cnt: number of links to node
55 * @permit_changeover: non-zero if node has redundant links to this system 56 * @permit_changeover: non-zero if node has redundant links to this system
56 * @routers: bitmap (used for multicluster communication) 57 * @routers: bitmap (used for multicluster communication)
@@ -76,6 +77,7 @@ struct node {
76 struct link *active_links[2]; 77 struct link *active_links[2];
77 struct link *links[MAX_BEARERS]; 78 struct link *links[MAX_BEARERS];
78 int link_cnt; 79 int link_cnt;
80 int working_links;
79 int permit_changeover; 81 int permit_changeover;
80 u32 routers[512/32]; 82 u32 routers[512/32];
81 int last_router; 83 int last_router;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index cff4068cc755..cc3fff3dec4f 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -47,18 +47,19 @@
47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down) 48 void *usr_handle, net_ev_handler handle_down)
49{ 49{
50 node_sub->node = NULL; 50 if (addr == tipc_own_addr) {
51 if (addr == tipc_own_addr) 51 node_sub->node = NULL;
52 return; 52 return;
53 if (!tipc_addr_node_valid(addr)) { 53 }
54 warn("node_subscr with illegal %x\n", addr); 54
55 node_sub->node = tipc_node_find(addr);
56 if (!node_sub->node) {
57 warn("Node subscription rejected, unknown node 0x%x\n", addr);
55 return; 58 return;
56 } 59 }
57
58 node_sub->handle_node_down = handle_down; 60 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle; 61 node_sub->usr_handle = usr_handle;
60 node_sub->node = tipc_node_find(addr); 62
61 assert(node_sub->node);
62 tipc_node_lock(node_sub->node); 63 tipc_node_lock(node_sub->node);
63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); 64 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
64 tipc_node_unlock(node_sub->node); 65 tipc_node_unlock(node_sub->node);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 67e96cb1e825..3251c8d8e53c 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -57,8 +57,8 @@
57static struct sk_buff *msg_queue_head = NULL; 57static struct sk_buff *msg_queue_head = NULL;
58static struct sk_buff *msg_queue_tail = NULL; 58static struct sk_buff *msg_queue_tail = NULL;
59 59
60spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED; 60DEFINE_SPINLOCK(tipc_port_list_lock);
61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED; 61static DEFINE_SPINLOCK(queue_lock);
62 62
63static LIST_HEAD(ports); 63static LIST_HEAD(ports);
64static void port_handle_node_down(unsigned long ref); 64static void port_handle_node_down(unsigned long ref);
@@ -168,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
168 struct port_list *item = dp; 168 struct port_list *item = dp;
169 int cnt = 0; 169 int cnt = 0;
170 170
171 assert(buf);
172 msg = buf_msg(buf); 171 msg = buf_msg(buf);
173 172
174 /* Create destination port list, if one wasn't supplied */ 173 /* Create destination port list, if one wasn't supplied */
@@ -196,7 +195,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
196 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC); 195 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
197 196
198 if (b == NULL) { 197 if (b == NULL) {
199 warn("Buffer allocation failure\n"); 198 warn("Unable to deliver multicast message(s)\n");
200 msg_dbg(msg, "LOST:"); 199 msg_dbg(msg, "LOST:");
201 goto exit; 200 goto exit;
202 } 201 }
@@ -228,14 +227,14 @@ u32 tipc_createport_raw(void *usr_handle,
228 u32 ref; 227 u32 ref;
229 228
230 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
231 if (p_ptr == NULL) { 230 if (!p_ptr) {
232 warn("Memory squeeze; failed to create port\n"); 231 warn("Port creation failed, no memory\n");
233 return 0; 232 return 0;
234 } 233 }
235 memset(p_ptr, 0, sizeof(*p_ptr)); 234 memset(p_ptr, 0, sizeof(*p_ptr));
236 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 235 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
237 if (!ref) { 236 if (!ref) {
238 warn("Reference Table Exhausted\n"); 237 warn("Port creation failed, reference table exhausted\n");
239 kfree(p_ptr); 238 kfree(p_ptr);
240 return 0; 239 return 0;
241 } 240 }
@@ -810,18 +809,20 @@ static void port_dispatcher_sigh(void *dummy)
810 void *usr_handle; 809 void *usr_handle;
811 int connected; 810 int connected;
812 int published; 811 int published;
812 u32 message_type;
813 813
814 struct sk_buff *next = buf->next; 814 struct sk_buff *next = buf->next;
815 struct tipc_msg *msg = buf_msg(buf); 815 struct tipc_msg *msg = buf_msg(buf);
816 u32 dref = msg_destport(msg); 816 u32 dref = msg_destport(msg);
817 817
818 message_type = msg_type(msg);
819 if (message_type > TIPC_DIRECT_MSG)
820 goto reject; /* Unsupported message type */
821
818 p_ptr = tipc_port_lock(dref); 822 p_ptr = tipc_port_lock(dref);
819 if (!p_ptr) { 823 if (!p_ptr)
820 /* Port deleted while msg in queue */ 824 goto reject; /* Port deleted while msg in queue */
821 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 825
822 buf = next;
823 continue;
824 }
825 orig.ref = msg_origport(msg); 826 orig.ref = msg_origport(msg);
826 orig.node = msg_orignode(msg); 827 orig.node = msg_orignode(msg);
827 up_ptr = p_ptr->user_port; 828 up_ptr = p_ptr->user_port;
@@ -832,7 +833,7 @@ static void port_dispatcher_sigh(void *dummy)
832 if (unlikely(msg_errcode(msg))) 833 if (unlikely(msg_errcode(msg)))
833 goto err; 834 goto err;
834 835
835 switch (msg_type(msg)) { 836 switch (message_type) {
836 837
837 case TIPC_CONN_MSG:{ 838 case TIPC_CONN_MSG:{
838 tipc_conn_msg_event cb = up_ptr->conn_msg_cb; 839 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
@@ -874,6 +875,7 @@ static void port_dispatcher_sigh(void *dummy)
874 &orig); 875 &orig);
875 break; 876 break;
876 } 877 }
878 case TIPC_MCAST_MSG:
877 case TIPC_NAMED_MSG:{ 879 case TIPC_NAMED_MSG:{
878 tipc_named_msg_event cb = up_ptr->named_msg_cb; 880 tipc_named_msg_event cb = up_ptr->named_msg_cb;
879 881
@@ -886,7 +888,8 @@ static void port_dispatcher_sigh(void *dummy)
886 goto reject; 888 goto reject;
887 dseq.type = msg_nametype(msg); 889 dseq.type = msg_nametype(msg);
888 dseq.lower = msg_nameinst(msg); 890 dseq.lower = msg_nameinst(msg);
889 dseq.upper = dseq.lower; 891 dseq.upper = (message_type == TIPC_NAMED_MSG)
892 ? dseq.lower : msg_nameupper(msg);
890 skb_pull(buf, msg_hdr_sz(msg)); 893 skb_pull(buf, msg_hdr_sz(msg));
891 cb(usr_handle, dref, &buf, msg_data(msg), 894 cb(usr_handle, dref, &buf, msg_data(msg),
892 msg_data_sz(msg), msg_importance(msg), 895 msg_data_sz(msg), msg_importance(msg),
@@ -899,7 +902,7 @@ static void port_dispatcher_sigh(void *dummy)
899 buf = next; 902 buf = next;
900 continue; 903 continue;
901err: 904err:
902 switch (msg_type(msg)) { 905 switch (message_type) {
903 906
904 case TIPC_CONN_MSG:{ 907 case TIPC_CONN_MSG:{
905 tipc_conn_shutdown_event cb = 908 tipc_conn_shutdown_event cb =
@@ -931,6 +934,7 @@ err:
931 msg_data_sz(msg), msg_errcode(msg), &orig); 934 msg_data_sz(msg), msg_errcode(msg), &orig);
932 break; 935 break;
933 } 936 }
937 case TIPC_MCAST_MSG:
934 case TIPC_NAMED_MSG:{ 938 case TIPC_NAMED_MSG:{
935 tipc_named_msg_err_event cb = 939 tipc_named_msg_err_event cb =
936 up_ptr->named_err_cb; 940 up_ptr->named_err_cb;
@@ -940,7 +944,8 @@ err:
940 break; 944 break;
941 dseq.type = msg_nametype(msg); 945 dseq.type = msg_nametype(msg);
942 dseq.lower = msg_nameinst(msg); 946 dseq.lower = msg_nameinst(msg);
943 dseq.upper = dseq.lower; 947 dseq.upper = (message_type == TIPC_NAMED_MSG)
948 ? dseq.lower : msg_nameupper(msg);
944 skb_pull(buf, msg_hdr_sz(msg)); 949 skb_pull(buf, msg_hdr_sz(msg));
945 cb(usr_handle, dref, &buf, msg_data(msg), 950 cb(usr_handle, dref, &buf, msg_data(msg),
946 msg_data_sz(msg), msg_errcode(msg), &dseq); 951 msg_data_sz(msg), msg_errcode(msg), &dseq);
@@ -1054,7 +1059,8 @@ int tipc_createport(u32 user_ref,
1054 u32 ref; 1059 u32 ref;
1055 1060
1056 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1057 if (up_ptr == NULL) { 1062 if (!up_ptr) {
1063 warn("Port creation failed, no memory\n");
1058 return -ENOMEM; 1064 return -ENOMEM;
1059 } 1065 }
1060 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); 1066 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
@@ -1165,8 +1171,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1165 p_ptr = tipc_port_lock(ref); 1171 p_ptr = tipc_port_lock(ref);
1166 if (!p_ptr) 1172 if (!p_ptr)
1167 return -EINVAL; 1173 return -EINVAL;
1168 if (!p_ptr->publ.published)
1169 goto exit;
1170 if (!seq) { 1174 if (!seq) {
1171 list_for_each_entry_safe(publ, tpubl, 1175 list_for_each_entry_safe(publ, tpubl,
1172 &p_ptr->publications, pport_list) { 1176 &p_ptr->publications, pport_list) {
@@ -1193,7 +1197,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1193 } 1197 }
1194 if (list_empty(&p_ptr->publications)) 1198 if (list_empty(&p_ptr->publications))
1195 p_ptr->publ.published = 0; 1199 p_ptr->publ.published = 0;
1196exit:
1197 tipc_port_unlock(p_ptr); 1200 tipc_port_unlock(p_ptr);
1198 return res; 1201 return res;
1199} 1202}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 33bbf5095094..596d3c8ff750 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -63,7 +63,7 @@
63 63
64struct ref_table tipc_ref_table = { NULL }; 64struct ref_table tipc_ref_table = { NULL };
65 65
66static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED; 66static DEFINE_RWLOCK(ref_table_lock);
67 67
68/** 68/**
69 * tipc_ref_table_init - create reference table for objects 69 * tipc_ref_table_init - create reference table for objects
@@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
87 index_mask = sz - 1; 87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) { 88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = NULL; 89 table[i].object = NULL;
90 table[i].lock = SPIN_LOCK_UNLOCKED; 90 spin_lock_init(&table[i].lock);
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; 91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 } 92 }
93 tipc_ref_table.entries = table; 93 tipc_ref_table.entries = table;
@@ -127,7 +127,14 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
127 u32 next_plus_upper; 127 u32 next_plus_upper;
128 u32 reference = 0; 128 u32 reference = 0;
129 129
130 assert(tipc_ref_table.entries && object); 130 if (!object) {
131 err("Attempt to acquire reference to non-existent object\n");
132 return 0;
133 }
134 if (!tipc_ref_table.entries) {
135 err("Reference table not found during acquisition attempt\n");
136 return 0;
137 }
131 138
132 write_lock_bh(&ref_table_lock); 139 write_lock_bh(&ref_table_lock);
133 if (tipc_ref_table.first_free) { 140 if (tipc_ref_table.first_free) {
@@ -162,15 +169,28 @@ void tipc_ref_discard(u32 ref)
162 u32 index; 169 u32 index;
163 u32 index_mask; 170 u32 index_mask;
164 171
165 assert(tipc_ref_table.entries); 172 if (!ref) {
166 assert(ref != 0); 173 err("Attempt to discard reference 0\n");
174 return;
175 }
176 if (!tipc_ref_table.entries) {
177 err("Reference table not found during discard attempt\n");
178 return;
179 }
167 180
168 write_lock_bh(&ref_table_lock); 181 write_lock_bh(&ref_table_lock);
169 index_mask = tipc_ref_table.index_mask; 182 index_mask = tipc_ref_table.index_mask;
170 index = ref & index_mask; 183 index = ref & index_mask;
171 entry = &(tipc_ref_table.entries[index]); 184 entry = &(tipc_ref_table.entries[index]);
172 assert(entry->object != 0); 185
173 assert(entry->data.reference == ref); 186 if (!entry->object) {
187 err("Attempt to discard reference to non-existent object\n");
188 goto exit;
189 }
190 if (entry->data.reference != ref) {
191 err("Attempt to discard non-existent reference\n");
192 goto exit;
193 }
174 194
175 /* mark entry as unused */ 195 /* mark entry as unused */
176 entry->object = NULL; 196 entry->object = NULL;
@@ -184,6 +204,7 @@ void tipc_ref_discard(u32 ref)
184 204
185 /* increment upper bits of entry to invalidate subsequent references */ 205 /* increment upper bits of entry to invalidate subsequent references */
186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); 206 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
207exit:
187 write_unlock_bh(&ref_table_lock); 208 write_unlock_bh(&ref_table_lock);
188} 209}
189 210
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 648a734e6044..32d778448a00 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -169,12 +169,6 @@ static int tipc_create(struct socket *sock, int protocol)
169 struct sock *sk; 169 struct sock *sk;
170 u32 ref; 170 u32 ref;
171 171
172 if ((sock->type != SOCK_STREAM) &&
173 (sock->type != SOCK_SEQPACKET) &&
174 (sock->type != SOCK_DGRAM) &&
175 (sock->type != SOCK_RDM))
176 return -EPROTOTYPE;
177
178 if (unlikely(protocol != 0)) 172 if (unlikely(protocol != 0))
179 return -EPROTONOSUPPORT; 173 return -EPROTONOSUPPORT;
180 174
@@ -199,6 +193,9 @@ static int tipc_create(struct socket *sock, int protocol)
199 sock->ops = &msg_ops; 193 sock->ops = &msg_ops;
200 sock->state = SS_READY; 194 sock->state = SS_READY;
201 break; 195 break;
196 default:
197 tipc_deleteport(ref);
198 return -EPROTOTYPE;
202 } 199 }
203 200
204 sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1); 201 sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
@@ -426,7 +423,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
426 423
427 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) 424 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
428 return -EFAULT; 425 return -EFAULT;
429 if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN))) 426 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
430 return -EACCES; 427 return -EACCES;
431 428
432 return 0; 429 return 0;
@@ -437,7 +434,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
437 * @iocb: (unused) 434 * @iocb: (unused)
438 * @sock: socket structure 435 * @sock: socket structure
439 * @m: message to send 436 * @m: message to send
440 * @total_len: (unused) 437 * @total_len: length of message
441 * 438 *
442 * Message must have an destination specified explicitly. 439 * Message must have an destination specified explicitly.
443 * Used for SOCK_RDM and SOCK_DGRAM messages, 440 * Used for SOCK_RDM and SOCK_DGRAM messages,
@@ -458,7 +455,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
458 455
459 if (unlikely(!dest)) 456 if (unlikely(!dest))
460 return -EDESTADDRREQ; 457 return -EDESTADDRREQ;
461 if (unlikely(dest->family != AF_TIPC)) 458 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
459 (dest->family != AF_TIPC)))
462 return -EINVAL; 460 return -EINVAL;
463 461
464 needs_conn = (sock->state != SS_READY); 462 needs_conn = (sock->state != SS_READY);
@@ -470,6 +468,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
470 if ((tsock->p->published) || 468 if ((tsock->p->published) ||
471 ((sock->type == SOCK_STREAM) && (total_len != 0))) 469 ((sock->type == SOCK_STREAM) && (total_len != 0)))
472 return -EOPNOTSUPP; 470 return -EOPNOTSUPP;
471 if (dest->addrtype == TIPC_ADDR_NAME) {
472 tsock->p->conn_type = dest->addr.name.name.type;
473 tsock->p->conn_instance = dest->addr.name.name.instance;
474 }
473 } 475 }
474 476
475 if (down_interruptible(&tsock->sem)) 477 if (down_interruptible(&tsock->sem))
@@ -538,7 +540,7 @@ exit:
538 * @iocb: (unused) 540 * @iocb: (unused)
539 * @sock: socket structure 541 * @sock: socket structure
540 * @m: message to send 542 * @m: message to send
541 * @total_len: (unused) 543 * @total_len: length of message
542 * 544 *
543 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data. 545 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
544 * 546 *
@@ -561,15 +563,15 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
561 return -ERESTARTSYS; 563 return -ERESTARTSYS;
562 } 564 }
563 565
564 if (unlikely(sock->state != SS_CONNECTED)) {
565 if (sock->state == SS_DISCONNECTING)
566 res = -EPIPE;
567 else
568 res = -ENOTCONN;
569 goto exit;
570 }
571
572 do { 566 do {
567 if (unlikely(sock->state != SS_CONNECTED)) {
568 if (sock->state == SS_DISCONNECTING)
569 res = -EPIPE;
570 else
571 res = -ENOTCONN;
572 goto exit;
573 }
574
573 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); 575 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
574 if (likely(res != -ELINKCONG)) { 576 if (likely(res != -ELINKCONG)) {
575exit: 577exit:
@@ -597,7 +599,8 @@ exit:
597 * 599 *
598 * Used for SOCK_STREAM data. 600 * Used for SOCK_STREAM data.
599 * 601 *
600 * Returns the number of bytes sent on success, or errno otherwise 602 * Returns the number of bytes sent on success (or partial success),
603 * or errno if no data sent
601 */ 604 */
602 605
603 606
@@ -611,6 +614,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
611 char __user *curr_start; 614 char __user *curr_start;
612 int curr_left; 615 int curr_left;
613 int bytes_to_send; 616 int bytes_to_send;
617 int bytes_sent;
614 int res; 618 int res;
615 619
616 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE)) 620 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
@@ -633,11 +637,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
633 * of small iovec entries into send_packet(). 637 * of small iovec entries into send_packet().
634 */ 638 */
635 639
636 my_msg = *m; 640 curr_iov = m->msg_iov;
637 curr_iov = my_msg.msg_iov; 641 curr_iovlen = m->msg_iovlen;
638 curr_iovlen = my_msg.msg_iovlen;
639 my_msg.msg_iov = &my_iov; 642 my_msg.msg_iov = &my_iov;
640 my_msg.msg_iovlen = 1; 643 my_msg.msg_iovlen = 1;
644 bytes_sent = 0;
641 645
642 while (curr_iovlen--) { 646 while (curr_iovlen--) {
643 curr_start = curr_iov->iov_base; 647 curr_start = curr_iov->iov_base;
@@ -648,16 +652,18 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
648 ? curr_left : TIPC_MAX_USER_MSG_SIZE; 652 ? curr_left : TIPC_MAX_USER_MSG_SIZE;
649 my_iov.iov_base = curr_start; 653 my_iov.iov_base = curr_start;
650 my_iov.iov_len = bytes_to_send; 654 my_iov.iov_len = bytes_to_send;
651 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) 655 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) {
652 return res; 656 return bytes_sent ? bytes_sent : res;
657 }
653 curr_left -= bytes_to_send; 658 curr_left -= bytes_to_send;
654 curr_start += bytes_to_send; 659 curr_start += bytes_to_send;
660 bytes_sent += bytes_to_send;
655 } 661 }
656 662
657 curr_iov++; 663 curr_iov++;
658 } 664 }
659 665
660 return total_len; 666 return bytes_sent;
661} 667}
662 668
663/** 669/**
@@ -727,6 +733,7 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
727 u32 anc_data[3]; 733 u32 anc_data[3];
728 u32 err; 734 u32 err;
729 u32 dest_type; 735 u32 dest_type;
736 int has_name;
730 int res; 737 int res;
731 738
732 if (likely(m->msg_controllen == 0)) 739 if (likely(m->msg_controllen == 0))
@@ -738,10 +745,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
738 if (unlikely(err)) { 745 if (unlikely(err)) {
739 anc_data[0] = err; 746 anc_data[0] = err;
740 anc_data[1] = msg_data_sz(msg); 747 anc_data[1] = msg_data_sz(msg);
741 if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data))) 748 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
742 return res; 749 return res;
743 if (anc_data[1] && 750 if (anc_data[1] &&
744 (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1], 751 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
745 msg_data(msg)))) 752 msg_data(msg))))
746 return res; 753 return res;
747 } 754 }
@@ -751,25 +758,28 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
751 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 758 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
752 switch (dest_type) { 759 switch (dest_type) {
753 case TIPC_NAMED_MSG: 760 case TIPC_NAMED_MSG:
761 has_name = 1;
754 anc_data[0] = msg_nametype(msg); 762 anc_data[0] = msg_nametype(msg);
755 anc_data[1] = msg_namelower(msg); 763 anc_data[1] = msg_namelower(msg);
756 anc_data[2] = msg_namelower(msg); 764 anc_data[2] = msg_namelower(msg);
757 break; 765 break;
758 case TIPC_MCAST_MSG: 766 case TIPC_MCAST_MSG:
767 has_name = 1;
759 anc_data[0] = msg_nametype(msg); 768 anc_data[0] = msg_nametype(msg);
760 anc_data[1] = msg_namelower(msg); 769 anc_data[1] = msg_namelower(msg);
761 anc_data[2] = msg_nameupper(msg); 770 anc_data[2] = msg_nameupper(msg);
762 break; 771 break;
763 case TIPC_CONN_MSG: 772 case TIPC_CONN_MSG:
773 has_name = (tport->conn_type != 0);
764 anc_data[0] = tport->conn_type; 774 anc_data[0] = tport->conn_type;
765 anc_data[1] = tport->conn_instance; 775 anc_data[1] = tport->conn_instance;
766 anc_data[2] = tport->conn_instance; 776 anc_data[2] = tport->conn_instance;
767 break; 777 break;
768 default: 778 default:
769 anc_data[0] = 0; 779 has_name = 0;
770 } 780 }
771 if (anc_data[0] && 781 if (has_name &&
772 (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data))) 782 (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
773 return res; 783 return res;
774 784
775 return 0; 785 return 0;
@@ -960,7 +970,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
960restart: 970restart:
961 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) && 971 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
962 (flags & MSG_DONTWAIT))) { 972 (flags & MSG_DONTWAIT))) {
963 res = (sz_copied == 0) ? -EWOULDBLOCK : 0; 973 res = -EWOULDBLOCK;
964 goto exit; 974 goto exit;
965 } 975 }
966 976
@@ -1051,7 +1061,7 @@ restart:
1051 1061
1052exit: 1062exit:
1053 up(&tsock->sem); 1063 up(&tsock->sem);
1054 return res ? res : sz_copied; 1064 return sz_copied ? sz_copied : res;
1055} 1065}
1056 1066
1057/** 1067/**
@@ -1236,7 +1246,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1236 if (sock->state == SS_READY) 1246 if (sock->state == SS_READY)
1237 return -EOPNOTSUPP; 1247 return -EOPNOTSUPP;
1238 1248
1239 /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */ 1249 /* Issue Posix-compliant error code if socket is in the wrong state */
1250
1240 if (sock->state == SS_LISTENING) 1251 if (sock->state == SS_LISTENING)
1241 return -EOPNOTSUPP; 1252 return -EOPNOTSUPP;
1242 if (sock->state == SS_CONNECTING) 1253 if (sock->state == SS_CONNECTING)
@@ -1244,13 +1255,20 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1244 if (sock->state != SS_UNCONNECTED) 1255 if (sock->state != SS_UNCONNECTED)
1245 return -EISCONN; 1256 return -EISCONN;
1246 1257
1247 if ((dst->family != AF_TIPC) || 1258 /*
1248 ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID))) 1259 * Reject connection attempt using multicast address
1260 *
1261 * Note: send_msg() validates the rest of the address fields,
1262 * so there's no need to do it here
1263 */
1264
1265 if (dst->addrtype == TIPC_ADDR_MCAST)
1249 return -EINVAL; 1266 return -EINVAL;
1250 1267
1251 /* Send a 'SYN-' to destination */ 1268 /* Send a 'SYN-' to destination */
1252 1269
1253 m.msg_name = dest; 1270 m.msg_name = dest;
1271 m.msg_namelen = destlen;
1254 if ((res = send_msg(NULL, sock, &m, 0)) < 0) { 1272 if ((res = send_msg(NULL, sock, &m, 0)) < 0) {
1255 sock->state = SS_DISCONNECTING; 1273 sock->state = SS_DISCONNECTING;
1256 return res; 1274 return res;
@@ -1269,10 +1287,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1269 msg = buf_msg(buf); 1287 msg = buf_msg(buf);
1270 res = auto_connect(sock, tsock, msg); 1288 res = auto_connect(sock, tsock, msg);
1271 if (!res) { 1289 if (!res) {
1272 if (dst->addrtype == TIPC_ADDR_NAME) {
1273 tsock->p->conn_type = dst->addr.name.name.type;
1274 tsock->p->conn_instance = dst->addr.name.name.instance;
1275 }
1276 if (!msg_data_sz(msg)) 1290 if (!msg_data_sz(msg))
1277 advance_queue(tsock); 1291 advance_queue(tsock);
1278 } 1292 }
@@ -1386,7 +1400,7 @@ exit:
1386/** 1400/**
1387 * shutdown - shutdown socket connection 1401 * shutdown - shutdown socket connection
1388 * @sock: socket structure 1402 * @sock: socket structure
1389 * @how: direction to close (always treated as read + write) 1403 * @how: direction to close (unused; always treated as read + write)
1390 * 1404 *
1391 * Terminates connection (if necessary), then purges socket's receive queue. 1405 * Terminates connection (if necessary), then purges socket's receive queue.
1392 * 1406 *
@@ -1469,7 +1483,8 @@ restart:
1469 * Returns 0 on success, errno otherwise 1483 * Returns 0 on success, errno otherwise
1470 */ 1484 */
1471 1485
1472static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol) 1486static int setsockopt(struct socket *sock,
1487 int lvl, int opt, char __user *ov, int ol)
1473{ 1488{
1474 struct tipc_sock *tsock = tipc_sk(sock->sk); 1489 struct tipc_sock *tsock = tipc_sk(sock->sk);
1475 u32 value; 1490 u32 value;
@@ -1525,7 +1540,8 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
1525 * Returns 0 on success, errno otherwise 1540 * Returns 0 on success, errno otherwise
1526 */ 1541 */
1527 1542
1528static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol) 1543static int getsockopt(struct socket *sock,
1544 int lvl, int opt, char __user *ov, int *ol)
1529{ 1545{
1530 struct tipc_sock *tsock = tipc_sk(sock->sk); 1546 struct tipc_sock *tsock = tipc_sk(sock->sk);
1531 int len; 1547 int len;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index c5f026c7fd38..e19b4bcd67ec 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -266,7 +266,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
266 /* Refuse subscription if global limit exceeded */ 266 /* Refuse subscription if global limit exceeded */
267 267
268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
269 warn("Failed: max %u subscriptions\n", tipc_max_subscriptions); 269 warn("Subscription rejected, subscription limit reached (%u)\n",
270 tipc_max_subscriptions);
270 subscr_terminate(subscriber); 271 subscr_terminate(subscriber);
271 return; 272 return;
272 } 273 }
@@ -274,8 +275,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
274 /* Allocate subscription object */ 275 /* Allocate subscription object */
275 276
276 sub = kmalloc(sizeof(*sub), GFP_ATOMIC); 277 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
277 if (sub == NULL) { 278 if (!sub) {
278 warn("Memory squeeze; ignoring subscription\n"); 279 warn("Subscription rejected, no memory\n");
279 subscr_terminate(subscriber); 280 subscr_terminate(subscriber);
280 return; 281 return;
281 } 282 }
@@ -298,8 +299,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
298 if ((((sub->filter != TIPC_SUB_PORTS) 299 if ((((sub->filter != TIPC_SUB_PORTS)
299 && (sub->filter != TIPC_SUB_SERVICE))) 300 && (sub->filter != TIPC_SUB_SERVICE)))
300 || (sub->seq.lower > sub->seq.upper)) { 301 || (sub->seq.lower > sub->seq.upper)) {
301 warn("Rejecting illegal subscription %u,%u,%u\n", 302 warn("Subscription rejected, illegal request\n");
302 sub->seq.type, sub->seq.lower, sub->seq.upper);
303 kfree(sub); 303 kfree(sub);
304 subscr_terminate(subscriber); 304 subscr_terminate(subscriber);
305 return; 305 return;
@@ -387,7 +387,7 @@ static void subscr_named_msg_event(void *usr_handle,
387 dbg("subscr_named_msg_event: orig = %x own = %x,\n", 387 dbg("subscr_named_msg_event: orig = %x own = %x,\n",
388 orig->node, tipc_own_addr); 388 orig->node, tipc_own_addr);
389 if (size && (size != sizeof(struct tipc_subscr))) { 389 if (size && (size != sizeof(struct tipc_subscr))) {
390 warn("Received tipc_subscr of invalid size\n"); 390 warn("Subscriber rejected, invalid subscription size\n");
391 return; 391 return;
392 } 392 }
393 393
@@ -395,7 +395,7 @@ static void subscr_named_msg_event(void *usr_handle,
395 395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) { 397 if (subscriber == NULL) {
398 warn("Memory squeeze; ignoring subscriber setup\n"); 398 warn("Subscriber rejected, no memory\n");
399 return; 399 return;
400 } 400 }
401 memset(subscriber, 0, sizeof(struct subscriber)); 401 memset(subscriber, 0, sizeof(struct subscriber));
@@ -403,7 +403,7 @@ static void subscr_named_msg_event(void *usr_handle,
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 403 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); 404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
405 if (subscriber->ref == 0) { 405 if (subscriber->ref == 0) {
406 warn("Failed to acquire subscriber reference\n"); 406 warn("Subscriber rejected, reference table exhausted\n");
407 kfree(subscriber); 407 kfree(subscriber);
408 return; 408 return;
409 } 409 }
@@ -422,7 +422,7 @@ static void subscr_named_msg_event(void *usr_handle,
422 NULL, 422 NULL,
423 &subscriber->port_ref); 423 &subscriber->port_ref);
424 if (subscriber->port_ref == 0) { 424 if (subscriber->port_ref == 0) {
425 warn("Memory squeeze; failed to create subscription port\n"); 425 warn("Subscriber rejected, unable to create port\n");
426 tipc_ref_discard(subscriber->ref); 426 tipc_ref_discard(subscriber->ref);
427 kfree(subscriber); 427 kfree(subscriber);
428 return; 428 return;
@@ -457,7 +457,7 @@ int tipc_subscr_start(void)
457 int res = -1; 457 int res = -1;
458 458
459 memset(&topsrv, 0, sizeof (topsrv)); 459 memset(&topsrv, 0, sizeof (topsrv));
460 topsrv.lock = SPIN_LOCK_UNLOCKED; 460 spin_lock_init(&topsrv.lock);
461 INIT_LIST_HEAD(&topsrv.subscriber_list); 461 INIT_LIST_HEAD(&topsrv.subscriber_list);
462 462
463 spin_lock_bh(&topsrv.lock); 463 spin_lock_bh(&topsrv.lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 3f3f933976e9..1e3ae57c7228 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -67,7 +67,7 @@ struct tipc_user {
67 67
68static struct tipc_user *users = NULL; 68static struct tipc_user *users = NULL;
69static u32 next_free_user = MAX_USERID + 1; 69static u32 next_free_user = MAX_USERID + 1;
70static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED; 70static DEFINE_SPINLOCK(reg_lock);
71 71
72/** 72/**
73 * reg_init - create TIPC user registry (but don't activate it) 73 * reg_init - create TIPC user registry (but don't activate it)
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 2803e1b4f170..316c4872ff5b 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -44,19 +44,24 @@
44 44
45struct _zone *tipc_zone_create(u32 addr) 45struct _zone *tipc_zone_create(u32 addr)
46{ 46{
47 struct _zone *z_ptr = NULL; 47 struct _zone *z_ptr;
48 u32 z_num; 48 u32 z_num;
49 49
50 if (!tipc_addr_domain_valid(addr)) 50 if (!tipc_addr_domain_valid(addr)) {
51 err("Zone creation failed, invalid domain 0x%x\n", addr);
51 return NULL; 52 return NULL;
53 }
52 54
53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
54 if (z_ptr != NULL) { 56 if (!z_ptr) {
55 memset(z_ptr, 0, sizeof(*z_ptr)); 57 warn("Zone creation failed, insufficient memory\n");
56 z_num = tipc_zone(addr); 58 return NULL;
57 z_ptr->addr = tipc_addr(z_num, 0, 0);
58 tipc_net.zones[z_num] = z_ptr;
59 } 59 }
60
61 memset(z_ptr, 0, sizeof(*z_ptr));
62 z_num = tipc_zone(addr);
63 z_ptr->addr = tipc_addr(z_num, 0, 0);
64 tipc_net.zones[z_num] = z_ptr;
60 return z_ptr; 65 return z_ptr;
61} 66}
62 67