aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/vport.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch/vport.c')
-rw-r--r--net/openvswitch/vport.c58
1 files changed, 28 insertions, 30 deletions
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index f6b8132ce4cb..720623190eaa 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -40,7 +40,7 @@ static const struct vport_ops *vport_ops_list[] = {
40 &ovs_internal_vport_ops, 40 &ovs_internal_vport_ops,
41}; 41};
42 42
43/* Protected by RCU read lock for reading, RTNL lock for writing. */ 43/* Protected by RCU read lock for reading, ovs_mutex for writing. */
44static struct hlist_head *dev_table; 44static struct hlist_head *dev_table;
45#define VPORT_HASH_BUCKETS 1024 45#define VPORT_HASH_BUCKETS 1024
46 46
@@ -80,7 +80,7 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name)
80 * 80 *
81 * @name: name of port to find 81 * @name: name of port to find
82 * 82 *
83 * Must be called with RTNL or RCU read lock. 83 * Must be called with ovs or RCU read lock.
84 */ 84 */
85struct vport *ovs_vport_locate(struct net *net, const char *name) 85struct vport *ovs_vport_locate(struct net *net, const char *name)
86{ 86{
@@ -128,7 +128,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
128 vport->ops = ops; 128 vport->ops = ops;
129 INIT_HLIST_NODE(&vport->dp_hash_node); 129 INIT_HLIST_NODE(&vport->dp_hash_node);
130 130
131 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); 131 vport->percpu_stats = alloc_percpu(struct pcpu_tstats);
132 if (!vport->percpu_stats) { 132 if (!vport->percpu_stats) {
133 kfree(vport); 133 kfree(vport);
134 return ERR_PTR(-ENOMEM); 134 return ERR_PTR(-ENOMEM);
@@ -161,7 +161,7 @@ void ovs_vport_free(struct vport *vport)
161 * @parms: Information about new vport. 161 * @parms: Information about new vport.
162 * 162 *
163 * Creates a new vport with the specified configuration (which is dependent on 163 * Creates a new vport with the specified configuration (which is dependent on
164 * device type). RTNL lock must be held. 164 * device type). ovs_mutex must be held.
165 */ 165 */
166struct vport *ovs_vport_add(const struct vport_parms *parms) 166struct vport *ovs_vport_add(const struct vport_parms *parms)
167{ 167{
@@ -169,8 +169,6 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
169 int err = 0; 169 int err = 0;
170 int i; 170 int i;
171 171
172 ASSERT_RTNL();
173
174 for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { 172 for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
175 if (vport_ops_list[i]->type == parms->type) { 173 if (vport_ops_list[i]->type == parms->type) {
176 struct hlist_head *bucket; 174 struct hlist_head *bucket;
@@ -201,12 +199,10 @@ out:
201 * @port: New configuration. 199 * @port: New configuration.
202 * 200 *
203 * Modifies an existing device with the specified configuration (which is 201 * Modifies an existing device with the specified configuration (which is
204 * dependent on device type). RTNL lock must be held. 202 * dependent on device type). ovs_mutex must be held.
205 */ 203 */
206int ovs_vport_set_options(struct vport *vport, struct nlattr *options) 204int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
207{ 205{
208 ASSERT_RTNL();
209
210 if (!vport->ops->set_options) 206 if (!vport->ops->set_options)
211 return -EOPNOTSUPP; 207 return -EOPNOTSUPP;
212 return vport->ops->set_options(vport, options); 208 return vport->ops->set_options(vport, options);
@@ -218,11 +214,11 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
218 * @vport: vport to delete. 214 * @vport: vport to delete.
219 * 215 *
220 * Detaches @vport from its datapath and destroys it. It is possible to fail 216 * Detaches @vport from its datapath and destroys it. It is possible to fail
221 * for reasons such as lack of memory. RTNL lock must be held. 217 * for reasons such as lack of memory. ovs_mutex must be held.
222 */ 218 */
223void ovs_vport_del(struct vport *vport) 219void ovs_vport_del(struct vport *vport)
224{ 220{
225 ASSERT_RTNL(); 221 ASSERT_OVSL();
226 222
227 hlist_del_rcu(&vport->hash_node); 223 hlist_del_rcu(&vport->hash_node);
228 224
@@ -237,7 +233,7 @@ void ovs_vport_del(struct vport *vport)
237 * 233 *
238 * Retrieves transmit, receive, and error stats for the given device. 234 * Retrieves transmit, receive, and error stats for the given device.
239 * 235 *
240 * Must be called with RTNL lock or rcu_read_lock. 236 * Must be called with ovs_mutex or rcu_read_lock.
241 */ 237 */
242void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) 238void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
243{ 239{
@@ -264,16 +260,16 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
264 spin_unlock_bh(&vport->stats_lock); 260 spin_unlock_bh(&vport->stats_lock);
265 261
266 for_each_possible_cpu(i) { 262 for_each_possible_cpu(i) {
267 const struct vport_percpu_stats *percpu_stats; 263 const struct pcpu_tstats *percpu_stats;
268 struct vport_percpu_stats local_stats; 264 struct pcpu_tstats local_stats;
269 unsigned int start; 265 unsigned int start;
270 266
271 percpu_stats = per_cpu_ptr(vport->percpu_stats, i); 267 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
272 268
273 do { 269 do {
274 start = u64_stats_fetch_begin_bh(&percpu_stats->sync); 270 start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
275 local_stats = *percpu_stats; 271 local_stats = *percpu_stats;
276 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); 272 } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
277 273
278 stats->rx_bytes += local_stats.rx_bytes; 274 stats->rx_bytes += local_stats.rx_bytes;
279 stats->rx_packets += local_stats.rx_packets; 275 stats->rx_packets += local_stats.rx_packets;
@@ -296,22 +292,24 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
296 * negative error code if a real error occurred. If an error occurs, @skb is 292 * negative error code if a real error occurred. If an error occurs, @skb is
297 * left unmodified. 293 * left unmodified.
298 * 294 *
299 * Must be called with RTNL lock or rcu_read_lock. 295 * Must be called with ovs_mutex or rcu_read_lock.
300 */ 296 */
301int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) 297int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
302{ 298{
303 struct nlattr *nla; 299 struct nlattr *nla;
300 int err;
301
302 if (!vport->ops->get_options)
303 return 0;
304 304
305 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS); 305 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
306 if (!nla) 306 if (!nla)
307 return -EMSGSIZE; 307 return -EMSGSIZE;
308 308
309 if (vport->ops->get_options) { 309 err = vport->ops->get_options(vport, skb);
310 int err = vport->ops->get_options(vport, skb); 310 if (err) {
311 if (err) { 311 nla_nest_cancel(skb, nla);
312 nla_nest_cancel(skb, nla); 312 return err;
313 return err;
314 }
315 } 313 }
316 314
317 nla_nest_end(skb, nla); 315 nla_nest_end(skb, nla);
@@ -329,13 +327,13 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
329 */ 327 */
330void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) 328void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
331{ 329{
332 struct vport_percpu_stats *stats; 330 struct pcpu_tstats *stats;
333 331
334 stats = this_cpu_ptr(vport->percpu_stats); 332 stats = this_cpu_ptr(vport->percpu_stats);
335 u64_stats_update_begin(&stats->sync); 333 u64_stats_update_begin(&stats->syncp);
336 stats->rx_packets++; 334 stats->rx_packets++;
337 stats->rx_bytes += skb->len; 335 stats->rx_bytes += skb->len;
338 u64_stats_update_end(&stats->sync); 336 u64_stats_update_end(&stats->syncp);
339 337
340 ovs_dp_process_received_packet(vport, skb); 338 ovs_dp_process_received_packet(vport, skb);
341} 339}
@@ -346,7 +344,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
346 * @vport: vport on which to send the packet 344 * @vport: vport on which to send the packet
347 * @skb: skb to send 345 * @skb: skb to send
348 * 346 *
349 * Sends the given packet and returns the length of data sent. Either RTNL 347 * Sends the given packet and returns the length of data sent. Either ovs
350 * lock or rcu_read_lock must be held. 348 * lock or rcu_read_lock must be held.
351 */ 349 */
352int ovs_vport_send(struct vport *vport, struct sk_buff *skb) 350int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
@@ -354,14 +352,14 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
354 int sent = vport->ops->send(vport, skb); 352 int sent = vport->ops->send(vport, skb);
355 353
356 if (likely(sent)) { 354 if (likely(sent)) {
357 struct vport_percpu_stats *stats; 355 struct pcpu_tstats *stats;
358 356
359 stats = this_cpu_ptr(vport->percpu_stats); 357 stats = this_cpu_ptr(vport->percpu_stats);
360 358
361 u64_stats_update_begin(&stats->sync); 359 u64_stats_update_begin(&stats->syncp);
362 stats->tx_packets++; 360 stats->tx_packets++;
363 stats->tx_bytes += sent; 361 stats->tx_bytes += sent;
364 u64_stats_update_end(&stats->sync); 362 u64_stats_update_end(&stats->syncp);
365 } 363 }
366 return sent; 364 return sent;
367} 365}