aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/net.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
commitf9da455b93f6ba076935b4ef4589f61e529ae046 (patch)
tree3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /net/tipc/net.c
parent0e04c641b199435f3779454055f6a7de258ecdfc (diff)
parente5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov. 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J Benniston. 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn Mork. 4) BPF now has a "random" opcode, from Chema Gonzalez. 5) Add more BPF documentation and improve test framework, from Daniel Borkmann. 6) Support TCP fastopen over ipv6, from Daniel Lee. 7) Add software TSO helper functions and use them to support software TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia. 8) Support software TSO in fec driver too, from Nimrod Andy. 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli. 10) Handle broadcasts more gracefully over macvlan when there are large numbers of interfaces configured, from Herbert Xu. 11) Allow more control over fwmark used for non-socket based responses, from Lorenzo Colitti. 12) Do TCP congestion window limiting based upon measurements, from Neal Cardwell. 13) Support busy polling in SCTP, from Neal Horman. 14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru. 15) Bridge promisc mode handling improvements from Vlad Yasevich. 16) Don't use inetpeer entries to implement ID generation any more, it performs poorly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 tcp: fixing TLP's FIN recovery net: fec: Add software TSO support net: fec: Add Scatter/gather support net: fec: Increase buffer descriptor entry number net: fec: Factorize feature setting net: fec: Enable IP header hardware checksum net: fec: Factorize the .xmit transmit function bridge: fix compile error when compiling without IPv6 support bridge: fix smatch warning / potential null pointer dereference via-rhine: fix full-duplex with autoneg disable bnx2x: Enlarge the dorq threshold for VFs bnx2x: Check for UNDI in uncommon branch bnx2x: Fix 1G-baseT link bnx2x: Fix link for KR with swapped polarity lane sctp: Fix sk_ack_backlog wrap-around problem net/core: Add VF link state control policy net/fsl: xgmac_mdio is dependent on OF_MDIO net/fsl: Make xgmac_mdio read error message useful net_sched: drr: warn when qdisc is not work conserving ...
Diffstat (limited to 'net/tipc/net.c')
-rw-r--r--net/tipc/net.c71
1 files changed, 34 insertions, 37 deletions
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 4c564eb69e1a..f64375e7f99f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -39,45 +39,41 @@
39#include "name_distr.h" 39#include "name_distr.h"
40#include "subscr.h" 40#include "subscr.h"
41#include "port.h" 41#include "port.h"
42#include "socket.h"
42#include "node.h" 43#include "node.h"
43#include "config.h" 44#include "config.h"
44 45
45/* 46/*
46 * The TIPC locking policy is designed to ensure a very fine locking 47 * The TIPC locking policy is designed to ensure a very fine locking
47 * granularity, permitting complete parallel access to individual 48 * granularity, permitting complete parallel access to individual
48 * port and node/link instances. The code consists of three major 49 * port and node/link instances. The code consists of four major
49 * locking domains, each protected with their own disjunct set of locks. 50 * locking domains, each protected with their own disjunct set of locks.
50 * 51 *
51 * 1: The routing hierarchy. 52 * 1: The bearer level.
52 * Comprises the structures 'zone', 'cluster', 'node', 'link' 53 * RTNL lock is used to serialize the process of configuring bearer
53 * and 'bearer'. The whole hierarchy is protected by a big 54 * on update side, and RCU lock is applied on read side to make
54 * read/write lock, tipc_net_lock, to enssure that nothing is added 55 * bearer instance valid on both paths of message transmission and
55 * or removed while code is accessing any of these structures. 56 * reception.
56 * This layer must not be called from the two others while they
57 * hold any of their own locks.
58 * Neither must it itself do any upcalls to the other two before
59 * it has released tipc_net_lock and other protective locks.
60 * 57 *
61 * Within the tipc_net_lock domain there are two sub-domains;'node' and 58 * 2: The node and link level.
62 * 'bearer', where local write operations are permitted, 59 * All node instances are saved into two tipc_node_list and node_htable
63 * provided that those are protected by individual spin_locks 60 * lists. The two lists are protected by node_list_lock on write side,
64 * per instance. Code holding tipc_net_lock(read) and a node spin_lock 61 * and they are guarded with RCU lock on read side. Especially node
65 * is permitted to poke around in both the node itself and its 62 * instance is destroyed only when TIPC module is removed, and we can
66 * subordinate links. I.e, it can update link counters and queues, 63 * confirm that there has no any user who is accessing the node at the
67 * change link state, send protocol messages, and alter the 64 * moment. Therefore, Except for iterating the two lists within RCU
68 * "active_links" array in the node; but it can _not_ remove a link 65 * protection, it's no needed to hold RCU that we access node instance
69 * or a node from the overall structure. 66 * in other places.
70 * Correspondingly, individual bearers may change status within a
71 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
72 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
73 * 67 *
68 * In addition, all members in node structure including link instances
69 * are protected by node spin lock.
74 * 70 *
75 * 2: The transport level of the protocol. 71 * 3: The transport level of the protocol.
76 * This consists of the structures port, (and its user level 72 * This consists of the structures port, (and its user level
77 * representations, such as user_port and tipc_sock), reference and 73 * representations, such as user_port and tipc_sock), reference and
78 * tipc_user (port.c, reg.c, socket.c). 74 * tipc_user (port.c, reg.c, socket.c).
79 * 75 *
80 * This layer has four different locks: 76 * This layer has four different locks:
81 * - The tipc_port spin_lock. This is protecting each port instance 77 * - The tipc_port spin_lock. This is protecting each port instance
82 * from parallel data access and removal. Since we can not place 78 * from parallel data access and removal. Since we can not place
83 * this lock in the port itself, it has been placed in the 79 * this lock in the port itself, it has been placed in the
@@ -96,7 +92,7 @@
96 * There are two such lists; 'port_list', which is used for management, 92 * There are two such lists; 'port_list', which is used for management,
97 * and 'wait_list', which is used to queue ports during congestion. 93 * and 'wait_list', which is used to queue ports during congestion.
98 * 94 *
99 * 3: The name table (name_table.c, name_distr.c, subscription.c) 95 * 4: The name table (name_table.c, name_distr.c, subscription.c)
100 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 96 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
101 * overall name table structure. Nothing must be added/removed to 97 * overall name table structure. Nothing must be added/removed to
102 * this structure without holding write access to it. 98 * this structure without holding write access to it.
@@ -108,8 +104,6 @@
108 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
109*/ 105*/
110 106
111DEFINE_RWLOCK(tipc_net_lock);
112
113static void net_route_named_msg(struct sk_buff *buf) 107static void net_route_named_msg(struct sk_buff *buf)
114{ 108{
115 struct tipc_msg *msg = buf_msg(buf); 109 struct tipc_msg *msg = buf_msg(buf);
@@ -148,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
148 if (msg_mcast(msg)) 142 if (msg_mcast(msg))
149 tipc_port_mcast_rcv(buf, NULL); 143 tipc_port_mcast_rcv(buf, NULL);
150 else if (msg_destport(msg)) 144 else if (msg_destport(msg))
151 tipc_port_rcv(buf); 145 tipc_sk_rcv(buf);
152 else 146 else
153 net_route_named_msg(buf); 147 net_route_named_msg(buf);
154 return; 148 return;
@@ -171,22 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf)
171 tipc_link_xmit(buf, dnode, msg_link_selector(msg)); 165 tipc_link_xmit(buf, dnode, msg_link_selector(msg));
172} 166}
173 167
174void tipc_net_start(u32 addr) 168int tipc_net_start(u32 addr)
175{ 169{
176 char addr_string[16]; 170 char addr_string[16];
171 int res;
177 172
178 write_lock_bh(&tipc_net_lock);
179 tipc_own_addr = addr; 173 tipc_own_addr = addr;
180 tipc_named_reinit(); 174 tipc_named_reinit();
181 tipc_port_reinit(); 175 tipc_port_reinit();
182 tipc_bclink_init(); 176 res = tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock); 177 if (res)
178 return res;
184 179
185 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 180 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
186 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 181 TIPC_ZONE_SCOPE, 0, tipc_own_addr);
182
187 pr_info("Started in network mode\n"); 183 pr_info("Started in network mode\n");
188 pr_info("Own node address %s, network identity %u\n", 184 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 185 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
186 return 0;
190} 187}
191 188
192void tipc_net_stop(void) 189void tipc_net_stop(void)
@@ -195,11 +192,11 @@ void tipc_net_stop(void)
195 return; 192 return;
196 193
197 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 194 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
198 write_lock_bh(&tipc_net_lock); 195 rtnl_lock();
199 tipc_bearer_stop(); 196 tipc_bearer_stop();
200 tipc_bclink_stop(); 197 tipc_bclink_stop();
201 tipc_node_stop(); 198 tipc_node_stop();
202 write_unlock_bh(&tipc_net_lock); 199 rtnl_unlock();
203 200
204 pr_info("Left network mode\n"); 201 pr_info("Left network mode\n");
205} 202}