diff options
| author | Wolfram Sang <wsa@the-dreams.de> | 2014-06-17 08:36:41 -0400 |
|---|---|---|
| committer | Wolfram Sang <wsa@the-dreams.de> | 2014-06-17 08:37:31 -0400 |
| commit | f0b1f6442b5090fed3529cb39f3acf8c91693d3d (patch) | |
| tree | bc5f62b017a82161c9a7f892f464813f6efd5bf3 /net/tipc/net.c | |
| parent | 4632a93f015caf6d7db4352f37aab74a39e60d7a (diff) | |
| parent | 7171511eaec5bf23fb06078f59784a3a0626b38f (diff) | |
Merge tag 'v3.16-rc1' into i2c/for-next
Merge a stable base (Linux 3.16-rc1)
Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
Diffstat (limited to 'net/tipc/net.c')
| -rw-r--r-- | net/tipc/net.c | 71 |
1 files changed, 34 insertions, 37 deletions
diff --git a/net/tipc/net.c b/net/tipc/net.c index 4c564eb69e1a..f64375e7f99f 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
| @@ -39,45 +39,41 @@ | |||
| 39 | #include "name_distr.h" | 39 | #include "name_distr.h" |
| 40 | #include "subscr.h" | 40 | #include "subscr.h" |
| 41 | #include "port.h" | 41 | #include "port.h" |
| 42 | #include "socket.h" | ||
| 42 | #include "node.h" | 43 | #include "node.h" |
| 43 | #include "config.h" | 44 | #include "config.h" |
| 44 | 45 | ||
| 45 | /* | 46 | /* |
| 46 | * The TIPC locking policy is designed to ensure a very fine locking | 47 | * The TIPC locking policy is designed to ensure a very fine locking |
| 47 | * granularity, permitting complete parallel access to individual | 48 | * granularity, permitting complete parallel access to individual |
| 48 | * port and node/link instances. The code consists of three major | 49 | * port and node/link instances. The code consists of four major |
| 49 | * locking domains, each protected with their own disjunct set of locks. | 50 | * locking domains, each protected with their own disjunct set of locks. |
| 50 | * | 51 | * |
| 51 | * 1: The routing hierarchy. | 52 | * 1: The bearer level. |
| 52 | * Comprises the structures 'zone', 'cluster', 'node', 'link' | 53 | * RTNL lock is used to serialize the process of configuring bearer |
| 53 | * and 'bearer'. The whole hierarchy is protected by a big | 54 | * on update side, and RCU lock is applied on read side to make |
| 54 | * read/write lock, tipc_net_lock, to enssure that nothing is added | 55 | * bearer instance valid on both paths of message transmission and |
| 55 | * or removed while code is accessing any of these structures. | 56 | * reception. |
| 56 | * This layer must not be called from the two others while they | ||
| 57 | * hold any of their own locks. | ||
| 58 | * Neither must it itself do any upcalls to the other two before | ||
| 59 | * it has released tipc_net_lock and other protective locks. | ||
| 60 | * | 57 | * |
| 61 | * Within the tipc_net_lock domain there are two sub-domains;'node' and | 58 | * 2: The node and link level. |
| 62 | * 'bearer', where local write operations are permitted, | 59 | * All node instances are saved into two tipc_node_list and node_htable |
| 63 | * provided that those are protected by individual spin_locks | 60 | * lists. The two lists are protected by node_list_lock on write side, |
| 64 | * per instance. Code holding tipc_net_lock(read) and a node spin_lock | 61 | * and they are guarded with RCU lock on read side. Especially node |
| 65 | * is permitted to poke around in both the node itself and its | 62 | * instance is destroyed only when TIPC module is removed, and we can |
| 66 | * subordinate links. I.e, it can update link counters and queues, | 63 | * confirm that there has no any user who is accessing the node at the |
| 67 | * change link state, send protocol messages, and alter the | 64 | * moment. Therefore, Except for iterating the two lists within RCU |
| 68 | * "active_links" array in the node; but it can _not_ remove a link | 65 | * protection, it's no needed to hold RCU that we access node instance |
| 69 | * or a node from the overall structure. | 66 | * in other places. |
| 70 | * Correspondingly, individual bearers may change status within a | ||
| 71 | * tipc_net_lock(read), protected by an individual spin_lock ber bearer | ||
| 72 | * instance, but it needs tipc_net_lock(write) to remove/add any bearers. | ||
| 73 | * | 67 | * |
| 68 | * In addition, all members in node structure including link instances | ||
| 69 | * are protected by node spin lock. | ||
| 74 | * | 70 | * |
| 75 | * 2: The transport level of the protocol. | 71 | * 3: The transport level of the protocol. |
| 76 | * This consists of the structures port, (and its user level | 72 | * This consists of the structures port, (and its user level |
| 77 | * representations, such as user_port and tipc_sock), reference and | 73 | * representations, such as user_port and tipc_sock), reference and |
| 78 | * tipc_user (port.c, reg.c, socket.c). | 74 | * tipc_user (port.c, reg.c, socket.c). |
| 79 | * | 75 | * |
| 80 | * This layer has four different locks: | 76 | * This layer has four different locks: |
| 81 | * - The tipc_port spin_lock. This is protecting each port instance | 77 | * - The tipc_port spin_lock. This is protecting each port instance |
| 82 | * from parallel data access and removal. Since we can not place | 78 | * from parallel data access and removal. Since we can not place |
| 83 | * this lock in the port itself, it has been placed in the | 79 | * this lock in the port itself, it has been placed in the |
| @@ -96,7 +92,7 @@ | |||
| 96 | * There are two such lists; 'port_list', which is used for management, | 92 | * There are two such lists; 'port_list', which is used for management, |
| 97 | * and 'wait_list', which is used to queue ports during congestion. | 93 | * and 'wait_list', which is used to queue ports during congestion. |
| 98 | * | 94 | * |
| 99 | * 3: The name table (name_table.c, name_distr.c, subscription.c) | 95 | * 4: The name table (name_table.c, name_distr.c, subscription.c) |
| 100 | * - There is one big read/write-lock (tipc_nametbl_lock) protecting the | 96 | * - There is one big read/write-lock (tipc_nametbl_lock) protecting the |
| 101 | * overall name table structure. Nothing must be added/removed to | 97 | * overall name table structure. Nothing must be added/removed to |
| 102 | * this structure without holding write access to it. | 98 | * this structure without holding write access to it. |
| @@ -108,8 +104,6 @@ | |||
| 108 | * - A local spin_lock protecting the queue of subscriber events. | 104 | * - A local spin_lock protecting the queue of subscriber events. |
| 109 | */ | 105 | */ |
| 110 | 106 | ||
| 111 | DEFINE_RWLOCK(tipc_net_lock); | ||
| 112 | |||
| 113 | static void net_route_named_msg(struct sk_buff *buf) | 107 | static void net_route_named_msg(struct sk_buff *buf) |
| 114 | { | 108 | { |
| 115 | struct tipc_msg *msg = buf_msg(buf); | 109 | struct tipc_msg *msg = buf_msg(buf); |
| @@ -148,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf) | |||
| 148 | if (msg_mcast(msg)) | 142 | if (msg_mcast(msg)) |
| 149 | tipc_port_mcast_rcv(buf, NULL); | 143 | tipc_port_mcast_rcv(buf, NULL); |
| 150 | else if (msg_destport(msg)) | 144 | else if (msg_destport(msg)) |
| 151 | tipc_port_rcv(buf); | 145 | tipc_sk_rcv(buf); |
| 152 | else | 146 | else |
| 153 | net_route_named_msg(buf); | 147 | net_route_named_msg(buf); |
| 154 | return; | 148 | return; |
| @@ -171,22 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf) | |||
| 171 | tipc_link_xmit(buf, dnode, msg_link_selector(msg)); | 165 | tipc_link_xmit(buf, dnode, msg_link_selector(msg)); |
| 172 | } | 166 | } |
| 173 | 167 | ||
| 174 | void tipc_net_start(u32 addr) | 168 | int tipc_net_start(u32 addr) |
| 175 | { | 169 | { |
| 176 | char addr_string[16]; | 170 | char addr_string[16]; |
| 171 | int res; | ||
| 177 | 172 | ||
| 178 | write_lock_bh(&tipc_net_lock); | ||
| 179 | tipc_own_addr = addr; | 173 | tipc_own_addr = addr; |
| 180 | tipc_named_reinit(); | 174 | tipc_named_reinit(); |
| 181 | tipc_port_reinit(); | 175 | tipc_port_reinit(); |
| 182 | tipc_bclink_init(); | 176 | res = tipc_bclink_init(); |
| 183 | write_unlock_bh(&tipc_net_lock); | 177 | if (res) |
| 178 | return res; | ||
| 184 | 179 | ||
| 185 | tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, | 180 | tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, |
| 186 | TIPC_ZONE_SCOPE, 0, tipc_own_addr); | 181 | TIPC_ZONE_SCOPE, 0, tipc_own_addr); |
| 182 | |||
| 187 | pr_info("Started in network mode\n"); | 183 | pr_info("Started in network mode\n"); |
| 188 | pr_info("Own node address %s, network identity %u\n", | 184 | pr_info("Own node address %s, network identity %u\n", |
| 189 | tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); | 185 | tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); |
| 186 | return 0; | ||
| 190 | } | 187 | } |
| 191 | 188 | ||
| 192 | void tipc_net_stop(void) | 189 | void tipc_net_stop(void) |
| @@ -195,11 +192,11 @@ void tipc_net_stop(void) | |||
| 195 | return; | 192 | return; |
| 196 | 193 | ||
| 197 | tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); | 194 | tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); |
| 198 | write_lock_bh(&tipc_net_lock); | 195 | rtnl_lock(); |
| 199 | tipc_bearer_stop(); | 196 | tipc_bearer_stop(); |
| 200 | tipc_bclink_stop(); | 197 | tipc_bclink_stop(); |
| 201 | tipc_node_stop(); | 198 | tipc_node_stop(); |
| 202 | write_unlock_bh(&tipc_net_lock); | 199 | rtnl_unlock(); |
| 203 | 200 | ||
| 204 | pr_info("Left network mode\n"); | 201 | pr_info("Left network mode\n"); |
| 205 | } | 202 | } |
