aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211
diff options
context:
space:
mode:
authorBob Copeland <me@bobcopeland.com>2016-03-18 22:11:30 -0400
committerJohannes Berg <johannes.berg@intel.com>2016-04-05 15:34:51 -0400
commit18b27ff7d2e232b0f07f2f51aa8052ff2a617908 (patch)
tree78c8d16a5ef5430a1eaf062b6ec296fbca033972 /net/mac80211
parent47a0489ce1e518f4936c7fedb93b3d2abd7ccd2e (diff)
mac80211: mesh: embed gates hlist head directly
Since we have converted the mesh path tables to rhashtable, we are no longer swapping out the entire mesh_pathtbl pointer with RCU. As a result, we no longer need indirection to the hlist head for the gates list and can simply embed it, saving a pair of pointer-sized allocations. Signed-off-by: Bob Copeland <me@bobcopeland.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_pathtbl.c18
2 files changed, 5 insertions, 15 deletions
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index bc3f9a32b5a4..46b540a25d9d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -134,7 +134,7 @@ struct mesh_path {
134 */ 134 */
135struct mesh_table { 135struct mesh_table {
136 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 136 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
137 struct hlist_head *known_gates; 137 struct hlist_head known_gates;
138 spinlock_t gates_lock; 138 spinlock_t gates_lock;
139 139
140 struct rhashtable rhead; 140 struct rhashtable rhead;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 1c9412a29ca3..6db2ddfa0695 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -58,12 +58,7 @@ static struct mesh_table *mesh_table_alloc(void)
58 if (!newtbl) 58 if (!newtbl)
59 return NULL; 59 return NULL;
60 60
61 newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); 61 INIT_HLIST_HEAD(&newtbl->known_gates);
62 if (!newtbl->known_gates) {
63 kfree(newtbl);
64 return NULL;
65 }
66 INIT_HLIST_HEAD(newtbl->known_gates);
67 atomic_set(&newtbl->entries, 0); 62 atomic_set(&newtbl->entries, 0);
68 spin_lock_init(&newtbl->gates_lock); 63 spin_lock_init(&newtbl->gates_lock);
69 64
@@ -341,7 +336,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
341 mpath->sdata->u.mesh.num_gates++; 336 mpath->sdata->u.mesh.num_gates++;
342 337
343 spin_lock(&tbl->gates_lock); 338 spin_lock(&tbl->gates_lock);
344 hlist_add_head_rcu(&mpath->gate_list, tbl->known_gates); 339 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
345 spin_unlock(&tbl->gates_lock); 340 spin_unlock(&tbl->gates_lock);
346 341
347 spin_unlock_bh(&mpath->state_lock); 342 spin_unlock_bh(&mpath->state_lock);
@@ -759,16 +754,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
759 struct mesh_path *from_mpath = mpath; 754 struct mesh_path *from_mpath = mpath;
760 struct mesh_path *gate; 755 struct mesh_path *gate;
761 bool copy = false; 756 bool copy = false;
762 struct hlist_head *known_gates;
763 757
764 tbl = sdata->u.mesh.mesh_paths; 758 tbl = sdata->u.mesh.mesh_paths;
765 known_gates = tbl->known_gates;
766
767 if (!known_gates)
768 return -EHOSTUNREACH;
769 759
770 rcu_read_lock(); 760 rcu_read_lock();
771 hlist_for_each_entry_rcu(gate, known_gates, gate_list) { 761 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
772 if (gate->flags & MESH_PATH_ACTIVE) { 762 if (gate->flags & MESH_PATH_ACTIVE) {
773 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 763 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
774 mesh_path_move_to_queue(gate, from_mpath, copy); 764 mesh_path_move_to_queue(gate, from_mpath, copy);
@@ -781,7 +771,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
781 } 771 }
782 } 772 }
783 773
784 hlist_for_each_entry_rcu(gate, known_gates, gate_list) { 774 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
785 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 775 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
786 mesh_path_tx_pending(gate); 776 mesh_path_tx_pending(gate);
787 } 777 }