aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2012-08-23 15:40:54 -0400
committerJesse Gross <jesse@nicira.com>2012-09-03 22:20:49 -0400
commit15eac2a74277bc7de68a7c2a64a7c91b4b6f5961 (patch)
treecbe59331108927c14a1930a6303ffbb2b303b9a7 /net
parent46df7b814548849deee01f50bc75f8f5ae8cd767 (diff)
openvswitch: Increase maximum number of datapath ports.
Use hash table to store ports of datapath. Allow 64K ports per switch. Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net')
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c110
-rw-r--r--net/openvswitch/datapath.h33
-rw-r--r--net/openvswitch/flow.c11
-rw-r--r--net/openvswitch/flow.h3
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/openvswitch/vport.h4
7 files changed, 113 insertions, 51 deletions
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index f3f96badf5aa..0da687769f56 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -266,7 +266,7 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
266 if (unlikely(!skb)) 266 if (unlikely(!skb))
267 return -ENOMEM; 267 return -ENOMEM;
268 268
269 vport = rcu_dereference(dp->ports[out_port]); 269 vport = ovs_vport_rcu(dp, out_port);
270 if (unlikely(!vport)) { 270 if (unlikely(!vport)) {
271 kfree_skb(skb); 271 kfree_skb(skb);
272 return -ENODEV; 272 return -ENODEV;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index cad39fca75a9..105a0b5adc51 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -116,7 +116,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex)
116/* Must be called with rcu_read_lock or RTNL lock. */ 116/* Must be called with rcu_read_lock or RTNL lock. */
117const char *ovs_dp_name(const struct datapath *dp) 117const char *ovs_dp_name(const struct datapath *dp)
118{ 118{
119 struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]); 119 struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
120 return vport->ops->get_name(vport); 120 return vport->ops->get_name(vport);
121} 121}
122 122
@@ -127,7 +127,7 @@ static int get_dpifindex(struct datapath *dp)
127 127
128 rcu_read_lock(); 128 rcu_read_lock();
129 129
130 local = rcu_dereference(dp->ports[OVSP_LOCAL]); 130 local = ovs_vport_rcu(dp, OVSP_LOCAL);
131 if (local) 131 if (local)
132 ifindex = local->ops->get_ifindex(local); 132 ifindex = local->ops->get_ifindex(local);
133 else 133 else
@@ -145,9 +145,30 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
145 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); 145 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
146 free_percpu(dp->stats_percpu); 146 free_percpu(dp->stats_percpu);
147 release_net(ovs_dp_get_net(dp)); 147 release_net(ovs_dp_get_net(dp));
148 kfree(dp->ports);
148 kfree(dp); 149 kfree(dp);
149} 150}
150 151
152static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
153 u16 port_no)
154{
155 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
156}
157
158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159{
160 struct vport *vport;
161 struct hlist_node *n;
162 struct hlist_head *head;
163
164 head = vport_hash_bucket(dp, port_no);
165 hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
166 if (vport->port_no == port_no)
167 return vport;
168 }
169 return NULL;
170}
171
151/* Called with RTNL lock and genl_lock. */ 172/* Called with RTNL lock and genl_lock. */
152static struct vport *new_vport(const struct vport_parms *parms) 173static struct vport *new_vport(const struct vport_parms *parms)
153{ 174{
@@ -156,9 +177,9 @@ static struct vport *new_vport(const struct vport_parms *parms)
156 vport = ovs_vport_add(parms); 177 vport = ovs_vport_add(parms);
157 if (!IS_ERR(vport)) { 178 if (!IS_ERR(vport)) {
158 struct datapath *dp = parms->dp; 179 struct datapath *dp = parms->dp;
180 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
159 181
160 rcu_assign_pointer(dp->ports[parms->port_no], vport); 182 hlist_add_head_rcu(&vport->dp_hash_node, head);
161 list_add(&vport->node, &dp->port_list);
162 } 183 }
163 184
164 return vport; 185 return vport;
@@ -170,8 +191,7 @@ void ovs_dp_detach_port(struct vport *p)
170 ASSERT_RTNL(); 191 ASSERT_RTNL();
171 192
172 /* First drop references to device. */ 193 /* First drop references to device. */
173 list_del(&p->node); 194 hlist_del_rcu(&p->dp_hash_node);
174 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
175 195
176 /* Then destroy it. */ 196 /* Then destroy it. */
177 ovs_vport_del(p); 197 ovs_vport_del(p);
@@ -1248,7 +1268,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1248 struct datapath *dp; 1268 struct datapath *dp;
1249 struct vport *vport; 1269 struct vport *vport;
1250 struct ovs_net *ovs_net; 1270 struct ovs_net *ovs_net;
1251 int err; 1271 int err, i;
1252 1272
1253 err = -EINVAL; 1273 err = -EINVAL;
1254 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) 1274 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
@@ -1261,7 +1281,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1261 if (dp == NULL) 1281 if (dp == NULL)
1262 goto err_unlock_rtnl; 1282 goto err_unlock_rtnl;
1263 1283
1264 INIT_LIST_HEAD(&dp->port_list);
1265 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); 1284 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1266 1285
1267 /* Allocate table. */ 1286 /* Allocate table. */
@@ -1276,6 +1295,16 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1276 goto err_destroy_table; 1295 goto err_destroy_table;
1277 } 1296 }
1278 1297
1298 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1299 GFP_KERNEL);
1300 if (!dp->ports) {
1301 err = -ENOMEM;
1302 goto err_destroy_percpu;
1303 }
1304
1305 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1306 INIT_HLIST_HEAD(&dp->ports[i]);
1307
1279 /* Set up our datapath device. */ 1308 /* Set up our datapath device. */
1280 parms.name = nla_data(a[OVS_DP_ATTR_NAME]); 1309 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1281 parms.type = OVS_VPORT_TYPE_INTERNAL; 1310 parms.type = OVS_VPORT_TYPE_INTERNAL;
@@ -1290,7 +1319,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1290 if (err == -EBUSY) 1319 if (err == -EBUSY)
1291 err = -EEXIST; 1320 err = -EEXIST;
1292 1321
1293 goto err_destroy_percpu; 1322 goto err_destroy_ports_array;
1294 } 1323 }
1295 1324
1296 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, 1325 reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
@@ -1309,7 +1338,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1309 return 0; 1338 return 0;
1310 1339
1311err_destroy_local_port: 1340err_destroy_local_port:
1312 ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); 1341 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1342err_destroy_ports_array:
1343 kfree(dp->ports);
1313err_destroy_percpu: 1344err_destroy_percpu:
1314 free_percpu(dp->stats_percpu); 1345 free_percpu(dp->stats_percpu);
1315err_destroy_table: 1346err_destroy_table:
@@ -1326,15 +1357,21 @@ err:
1326/* Called with genl_mutex. */ 1357/* Called with genl_mutex. */
1327static void __dp_destroy(struct datapath *dp) 1358static void __dp_destroy(struct datapath *dp)
1328{ 1359{
1329 struct vport *vport, *next_vport; 1360 int i;
1330 1361
1331 rtnl_lock(); 1362 rtnl_lock();
1332 list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) 1363
1333 if (vport->port_no != OVSP_LOCAL) 1364 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1334 ovs_dp_detach_port(vport); 1365 struct vport *vport;
1366 struct hlist_node *node, *n;
1367
1368 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
1369 if (vport->port_no != OVSP_LOCAL)
1370 ovs_dp_detach_port(vport);
1371 }
1335 1372
1336 list_del(&dp->list_node); 1373 list_del(&dp->list_node);
1337 ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); 1374 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1338 1375
1339 /* rtnl_unlock() will wait until all the references to devices that 1376 /* rtnl_unlock() will wait until all the references to devices that
1340 * are pending unregistration have been dropped. We do it here to 1377 * are pending unregistration have been dropped. We do it here to
@@ -1566,7 +1603,7 @@ static struct vport *lookup_vport(struct net *net,
1566 if (!dp) 1603 if (!dp)
1567 return ERR_PTR(-ENODEV); 1604 return ERR_PTR(-ENODEV);
1568 1605
1569 vport = rcu_dereference_rtnl(dp->ports[port_no]); 1606 vport = ovs_vport_rtnl_rcu(dp, port_no);
1570 if (!vport) 1607 if (!vport)
1571 return ERR_PTR(-ENOENT); 1608 return ERR_PTR(-ENOENT);
1572 return vport; 1609 return vport;
@@ -1603,7 +1640,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1603 if (port_no >= DP_MAX_PORTS) 1640 if (port_no >= DP_MAX_PORTS)
1604 goto exit_unlock; 1641 goto exit_unlock;
1605 1642
1606 vport = rtnl_dereference(dp->ports[port_no]); 1643 vport = ovs_vport_rtnl_rcu(dp, port_no);
1607 err = -EBUSY; 1644 err = -EBUSY;
1608 if (vport) 1645 if (vport)
1609 goto exit_unlock; 1646 goto exit_unlock;
@@ -1613,7 +1650,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1613 err = -EFBIG; 1650 err = -EFBIG;
1614 goto exit_unlock; 1651 goto exit_unlock;
1615 } 1652 }
1616 vport = rtnl_dereference(dp->ports[port_no]); 1653 vport = ovs_vport_rtnl(dp, port_no);
1617 if (!vport) 1654 if (!vport)
1618 break; 1655 break;
1619 } 1656 }
@@ -1755,32 +1792,39 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1755{ 1792{
1756 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 1793 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1757 struct datapath *dp; 1794 struct datapath *dp;
1758 u32 port_no; 1795 int bucket = cb->args[0], skip = cb->args[1];
1759 int retval; 1796 int i, j = 0;
1760 1797
1761 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1798 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1762 if (!dp) 1799 if (!dp)
1763 return -ENODEV; 1800 return -ENODEV;
1764 1801
1765 rcu_read_lock(); 1802 rcu_read_lock();
1766 for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { 1803 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1767 struct vport *vport; 1804 struct vport *vport;
1768 1805 struct hlist_node *n;
1769 vport = rcu_dereference(dp->ports[port_no]); 1806
1770 if (!vport) 1807 j = 0;
1771 continue; 1808 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
1772 1809 if (j >= skip &&
1773 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid, 1810 ovs_vport_cmd_fill_info(vport, skb,
1774 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1811 NETLINK_CB(cb->skb).pid,
1775 OVS_VPORT_CMD_NEW) < 0) 1812 cb->nlh->nlmsg_seq,
1776 break; 1813 NLM_F_MULTI,
1814 OVS_VPORT_CMD_NEW) < 0)
1815 goto out;
1816
1817 j++;
1818 }
1819 skip = 0;
1777 } 1820 }
1821out:
1778 rcu_read_unlock(); 1822 rcu_read_unlock();
1779 1823
1780 cb->args[0] = port_no; 1824 cb->args[0] = i;
1781 retval = skb->len; 1825 cb->args[1] = j;
1782 1826
1783 return retval; 1827 return skb->len;
1784} 1828}
1785 1829
1786static struct genl_ops dp_vport_genl_ops[] = { 1830static struct genl_ops dp_vport_genl_ops[] = {
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 771c11e13e34..129ec5480758 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -29,7 +29,9 @@
29#include "flow.h" 29#include "flow.h"
30#include "vport.h" 30#include "vport.h"
31 31
32#define DP_MAX_PORTS 1024 32#define DP_MAX_PORTS USHRT_MAX
33#define DP_VPORT_HASH_BUCKETS 1024
34
33#define SAMPLE_ACTION_DEPTH 3 35#define SAMPLE_ACTION_DEPTH 3
34 36
35/** 37/**
@@ -57,10 +59,8 @@ struct dp_stats_percpu {
57 * @list_node: Element in global 'dps' list. 59 * @list_node: Element in global 'dps' list.
58 * @n_flows: Number of flows currently in flow table. 60 * @n_flows: Number of flows currently in flow table.
59 * @table: Current flow table. Protected by genl_lock and RCU. 61 * @table: Current flow table. Protected by genl_lock and RCU.
60 * @ports: Map from port number to &struct vport. %OVSP_LOCAL port 62 * @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
61 * always exists, other ports may be %NULL. Protected by RTNL and RCU. 63 * RTNL and RCU.
62 * @port_list: List of all ports in @ports in arbitrary order. RTNL required
63 * to iterate or modify.
64 * @stats_percpu: Per-CPU datapath statistics. 64 * @stats_percpu: Per-CPU datapath statistics.
65 * @net: Reference to net namespace. 65 * @net: Reference to net namespace.
66 * 66 *
@@ -75,8 +75,7 @@ struct datapath {
75 struct flow_table __rcu *table; 75 struct flow_table __rcu *table;
76 76
77 /* Switch ports. */ 77 /* Switch ports. */
78 struct vport __rcu *ports[DP_MAX_PORTS]; 78 struct hlist_head *ports;
79 struct list_head port_list;
80 79
81 /* Stats. */ 80 /* Stats. */
82 struct dp_stats_percpu __percpu *stats_percpu; 81 struct dp_stats_percpu __percpu *stats_percpu;
@@ -87,6 +86,26 @@ struct datapath {
87#endif 86#endif
88}; 87};
89 88
89struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
90
91static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
92{
93 WARN_ON_ONCE(!rcu_read_lock_held());
94 return ovs_lookup_vport(dp, port_no);
95}
96
97static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
98{
99 WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
100 return ovs_lookup_vport(dp, port_no);
101}
102
103static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
104{
105 ASSERT_RTNL();
106 return ovs_lookup_vport(dp, port_no);
107}
108
90/** 109/**
91 * struct ovs_skb_cb - OVS data in skb CB 110 * struct ovs_skb_cb - OVS data in skb CB
92 * @flow: The flow associated with this packet. May be %NULL if no flow. 111 * @flow: The flow associated with this packet. May be %NULL if no flow.
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b7f38b161909..f9f211d95ebe 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -203,10 +203,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
203 int actions_len = nla_len(actions); 203 int actions_len = nla_len(actions);
204 struct sw_flow_actions *sfa; 204 struct sw_flow_actions *sfa;
205 205
206 /* At least DP_MAX_PORTS actions are required to be able to flood a 206 if (actions_len > MAX_ACTIONS_BUFSIZE)
207 * packet to every port. Factor of 2 allows for setting VLAN tags,
208 * etc. */
209 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
210 return ERR_PTR(-EINVAL); 207 return ERR_PTR(-EINVAL);
211 208
212 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); 209 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
@@ -1000,7 +997,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1000 swkey->phy.in_port = in_port; 997 swkey->phy.in_port = in_port;
1001 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); 998 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
1002 } else { 999 } else {
1003 swkey->phy.in_port = USHRT_MAX; 1000 swkey->phy.in_port = DP_MAX_PORTS;
1004 } 1001 }
1005 1002
1006 /* Data attributes. */ 1003 /* Data attributes. */
@@ -1143,7 +1140,7 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
1143 const struct nlattr *nla; 1140 const struct nlattr *nla;
1144 int rem; 1141 int rem;
1145 1142
1146 *in_port = USHRT_MAX; 1143 *in_port = DP_MAX_PORTS;
1147 *priority = 0; 1144 *priority = 0;
1148 1145
1149 nla_for_each_nested(nla, attr, rem) { 1146 nla_for_each_nested(nla, attr, rem) {
@@ -1180,7 +1177,7 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1180 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) 1177 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
1181 goto nla_put_failure; 1178 goto nla_put_failure;
1182 1179
1183 if (swkey->phy.in_port != USHRT_MAX && 1180 if (swkey->phy.in_port != DP_MAX_PORTS &&
1184 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) 1181 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1185 goto nla_put_failure; 1182 goto nla_put_failure;
1186 1183
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 9b75617ca4e0..d92e22a638cf 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -43,7 +43,7 @@ struct sw_flow_actions {
43struct sw_flow_key { 43struct sw_flow_key {
44 struct { 44 struct {
45 u32 priority; /* Packet QoS priority. */ 45 u32 priority; /* Packet QoS priority. */
46 u16 in_port; /* Input switch port (or USHRT_MAX). */ 46 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
47 } phy; 47 } phy;
48 struct { 48 struct {
49 u8 src[ETH_ALEN]; /* Ethernet source address. */ 49 u8 src[ETH_ALEN]; /* Ethernet source address. */
@@ -161,6 +161,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
161int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, 161int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
162 const struct nlattr *); 162 const struct nlattr *);
163 163
164#define MAX_ACTIONS_BUFSIZE (16 * 1024)
164#define TBL_MIN_BUCKETS 1024 165#define TBL_MIN_BUCKETS 1024
165 166
166struct flow_table { 167struct flow_table {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 9873acea9785..1abd9609ba78 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -127,6 +127,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
127 vport->port_no = parms->port_no; 127 vport->port_no = parms->port_no;
128 vport->upcall_pid = parms->upcall_pid; 128 vport->upcall_pid = parms->upcall_pid;
129 vport->ops = ops; 129 vport->ops = ops;
130 INIT_HLIST_NODE(&vport->dp_hash_node);
130 131
131 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); 132 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
132 if (!vport->percpu_stats) { 133 if (!vport->percpu_stats) {
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 97cef08d981b..c56e4836e93b 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -70,10 +70,10 @@ struct vport_err_stats {
70 * @rcu: RCU callback head for deferred destruction. 70 * @rcu: RCU callback head for deferred destruction.
71 * @port_no: Index into @dp's @ports array. 71 * @port_no: Index into @dp's @ports array.
72 * @dp: Datapath to which this port belongs. 72 * @dp: Datapath to which this port belongs.
73 * @node: Element in @dp's @port_list.
74 * @upcall_pid: The Netlink port to use for packets received on this port that 73 * @upcall_pid: The Netlink port to use for packets received on this port that
75 * miss the flow table. 74 * miss the flow table.
76 * @hash_node: Element in @dev_table hash table in vport.c. 75 * @hash_node: Element in @dev_table hash table in vport.c.
76 * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
77 * @ops: Class structure. 77 * @ops: Class structure.
78 * @percpu_stats: Points to per-CPU statistics used and maintained by vport 78 * @percpu_stats: Points to per-CPU statistics used and maintained by vport
79 * @stats_lock: Protects @err_stats; 79 * @stats_lock: Protects @err_stats;
@@ -83,10 +83,10 @@ struct vport {
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 u16 port_no; 84 u16 port_no;
85 struct datapath *dp; 85 struct datapath *dp;
86 struct list_head node;
87 u32 upcall_pid; 86 u32 upcall_pid;
88 87
89 struct hlist_node hash_node; 88 struct hlist_node hash_node;
89 struct hlist_node dp_hash_node;
90 const struct vport_ops *ops; 90 const struct vport_ops *ops;
91 91
92 struct vport_percpu_stats __percpu *percpu_stats; 92 struct vport_percpu_stats __percpu *percpu_stats;