aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/devinet.c10
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c23
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_fragment.c10
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c7
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp_ipv4.c7
10 files changed, 49 insertions, 82 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5281314886c1..f678507bc829 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
139 u32 hash = inet_addr_hash(net, addr); 139 u32 hash = inet_addr_hash(net, addr);
140 struct net_device *result = NULL; 140 struct net_device *result = NULL;
141 struct in_ifaddr *ifa; 141 struct in_ifaddr *ifa;
142 struct hlist_node *node;
143 142
144 rcu_read_lock(); 143 rcu_read_lock();
145 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { 144 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
146 if (ifa->ifa_local == addr) { 145 if (ifa->ifa_local == addr) {
147 struct net_device *dev = ifa->ifa_dev->dev; 146 struct net_device *dev = ifa->ifa_dev->dev;
148 147
@@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work)
588{ 587{
589 unsigned long now, next, next_sec, next_sched; 588 unsigned long now, next, next_sec, next_sched;
590 struct in_ifaddr *ifa; 589 struct in_ifaddr *ifa;
591 struct hlist_node *node;
592 int i; 590 int i;
593 591
594 now = jiffies; 592 now = jiffies;
@@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work)
596 594
597 rcu_read_lock(); 595 rcu_read_lock();
598 for (i = 0; i < IN4_ADDR_HSIZE; i++) { 596 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
599 hlist_for_each_entry_rcu(ifa, node, 597 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
600 &inet_addr_lst[i], hash) {
601 unsigned long age; 598 unsigned long age;
602 599
603 if (ifa->ifa_flags & IFA_F_PERMANENT) 600 if (ifa->ifa_flags & IFA_F_PERMANENT)
@@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1493 struct in_device *in_dev; 1490 struct in_device *in_dev;
1494 struct in_ifaddr *ifa; 1491 struct in_ifaddr *ifa;
1495 struct hlist_head *head; 1492 struct hlist_head *head;
1496 struct hlist_node *node;
1497 1493
1498 s_h = cb->args[0]; 1494 s_h = cb->args[0];
1499 s_idx = idx = cb->args[1]; 1495 s_idx = idx = cb->args[1];
@@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1503 idx = 0; 1499 idx = 0;
1504 head = &net->dev_index_head[h]; 1500 head = &net->dev_index_head[h];
1505 rcu_read_lock(); 1501 rcu_read_lock();
1506 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1502 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1507 if (idx < s_idx) 1503 if (idx < s_idx)
1508 goto cont; 1504 goto cont;
1509 if (h > s_h || idx > s_idx) 1505 if (h > s_h || idx > s_idx)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 99f00d39d10b..eb4bb12b3eb4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
112struct fib_table *fib_get_table(struct net *net, u32 id) 112struct fib_table *fib_get_table(struct net *net, u32 id)
113{ 113{
114 struct fib_table *tb; 114 struct fib_table *tb;
115 struct hlist_node *node;
116 struct hlist_head *head; 115 struct hlist_head *head;
117 unsigned int h; 116 unsigned int h;
118 117
@@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
122 121
123 rcu_read_lock(); 122 rcu_read_lock();
124 head = &net->ipv4.fib_table_hash[h]; 123 head = &net->ipv4.fib_table_hash[h];
125 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 124 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
126 if (tb->tb_id == id) { 125 if (tb->tb_id == id) {
127 rcu_read_unlock(); 126 rcu_read_unlock();
128 return tb; 127 return tb;
@@ -137,13 +136,12 @@ static void fib_flush(struct net *net)
137{ 136{
138 int flushed = 0; 137 int flushed = 0;
139 struct fib_table *tb; 138 struct fib_table *tb;
140 struct hlist_node *node;
141 struct hlist_head *head; 139 struct hlist_head *head;
142 unsigned int h; 140 unsigned int h;
143 141
144 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 142 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
145 head = &net->ipv4.fib_table_hash[h]; 143 head = &net->ipv4.fib_table_hash[h];
146 hlist_for_each_entry(tb, node, head, tb_hlist) 144 hlist_for_each_entry(tb, head, tb_hlist)
147 flushed += fib_table_flush(tb); 145 flushed += fib_table_flush(tb);
148 } 146 }
149 147
@@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
656 unsigned int h, s_h; 654 unsigned int h, s_h;
657 unsigned int e = 0, s_e; 655 unsigned int e = 0, s_e;
658 struct fib_table *tb; 656 struct fib_table *tb;
659 struct hlist_node *node;
660 struct hlist_head *head; 657 struct hlist_head *head;
661 int dumped = 0; 658 int dumped = 0;
662 659
@@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
670 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { 667 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
671 e = 0; 668 e = 0;
672 head = &net->ipv4.fib_table_hash[h]; 669 head = &net->ipv4.fib_table_hash[h];
673 hlist_for_each_entry(tb, node, head, tb_hlist) { 670 hlist_for_each_entry(tb, head, tb_hlist) {
674 if (e < s_e) 671 if (e < s_e)
675 goto next; 672 goto next;
676 if (dumped) 673 if (dumped)
@@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net)
1117 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1114 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1118 struct fib_table *tb; 1115 struct fib_table *tb;
1119 struct hlist_head *head; 1116 struct hlist_head *head;
1120 struct hlist_node *node, *tmp; 1117 struct hlist_node *tmp;
1121 1118
1122 head = &net->ipv4.fib_table_hash[i]; 1119 head = &net->ipv4.fib_table_hash[i];
1123 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { 1120 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1124 hlist_del(node); 1121 hlist_del(&tb->tb_hlist);
1125 fib_table_flush(tb); 1122 fib_table_flush(tb);
1126 fib_free_table(tb); 1123 fib_free_table(tb);
1127 } 1124 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 4797a800faf8..8f6cb7a87cd6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
298static struct fib_info *fib_find_info(const struct fib_info *nfi) 298static struct fib_info *fib_find_info(const struct fib_info *nfi)
299{ 299{
300 struct hlist_head *head; 300 struct hlist_head *head;
301 struct hlist_node *node;
302 struct fib_info *fi; 301 struct fib_info *fi;
303 unsigned int hash; 302 unsigned int hash;
304 303
305 hash = fib_info_hashfn(nfi); 304 hash = fib_info_hashfn(nfi);
306 head = &fib_info_hash[hash]; 305 head = &fib_info_hash[hash];
307 306
308 hlist_for_each_entry(fi, node, head, fib_hash) { 307 hlist_for_each_entry(fi, head, fib_hash) {
309 if (!net_eq(fi->fib_net, nfi->fib_net)) 308 if (!net_eq(fi->fib_net, nfi->fib_net))
310 continue; 309 continue;
311 if (fi->fib_nhs != nfi->fib_nhs) 310 if (fi->fib_nhs != nfi->fib_nhs)
@@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
331int ip_fib_check_default(__be32 gw, struct net_device *dev) 330int ip_fib_check_default(__be32 gw, struct net_device *dev)
332{ 331{
333 struct hlist_head *head; 332 struct hlist_head *head;
334 struct hlist_node *node;
335 struct fib_nh *nh; 333 struct fib_nh *nh;
336 unsigned int hash; 334 unsigned int hash;
337 335
@@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
339 337
340 hash = fib_devindex_hashfn(dev->ifindex); 338 hash = fib_devindex_hashfn(dev->ifindex);
341 head = &fib_info_devhash[hash]; 339 head = &fib_info_devhash[hash];
342 hlist_for_each_entry(nh, node, head, nh_hash) { 340 hlist_for_each_entry(nh, head, nh_hash) {
343 if (nh->nh_dev == dev && 341 if (nh->nh_dev == dev &&
344 nh->nh_gw == gw && 342 nh->nh_gw == gw &&
345 !(nh->nh_flags & RTNH_F_DEAD)) { 343 !(nh->nh_flags & RTNH_F_DEAD)) {
@@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
721 719
722 for (i = 0; i < old_size; i++) { 720 for (i = 0; i < old_size; i++) {
723 struct hlist_head *head = &fib_info_hash[i]; 721 struct hlist_head *head = &fib_info_hash[i];
724 struct hlist_node *node, *n; 722 struct hlist_node *n;
725 struct fib_info *fi; 723 struct fib_info *fi;
726 724
727 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 725 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
728 struct hlist_head *dest; 726 struct hlist_head *dest;
729 unsigned int new_hash; 727 unsigned int new_hash;
730 728
@@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
739 737
740 for (i = 0; i < old_size; i++) { 738 for (i = 0; i < old_size; i++) {
741 struct hlist_head *lhead = &fib_info_laddrhash[i]; 739 struct hlist_head *lhead = &fib_info_laddrhash[i];
742 struct hlist_node *node, *n; 740 struct hlist_node *n;
743 struct fib_info *fi; 741 struct fib_info *fi;
744 742
745 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 743 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
746 struct hlist_head *ldest; 744 struct hlist_head *ldest;
747 unsigned int new_hash; 745 unsigned int new_hash;
748 746
@@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local)
1096 int ret = 0; 1094 int ret = 0;
1097 unsigned int hash = fib_laddr_hashfn(local); 1095 unsigned int hash = fib_laddr_hashfn(local);
1098 struct hlist_head *head = &fib_info_laddrhash[hash]; 1096 struct hlist_head *head = &fib_info_laddrhash[hash];
1099 struct hlist_node *node;
1100 struct fib_info *fi; 1097 struct fib_info *fi;
1101 1098
1102 if (fib_info_laddrhash == NULL || local == 0) 1099 if (fib_info_laddrhash == NULL || local == 0)
1103 return 0; 1100 return 0;
1104 1101
1105 hlist_for_each_entry(fi, node, head, fib_lhash) { 1102 hlist_for_each_entry(fi, head, fib_lhash) {
1106 if (!net_eq(fi->fib_net, net)) 1103 if (!net_eq(fi->fib_net, net))
1107 continue; 1104 continue;
1108 if (fi->fib_prefsrc == local) { 1105 if (fi->fib_prefsrc == local) {
@@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1120 struct fib_info *prev_fi = NULL; 1117 struct fib_info *prev_fi = NULL;
1121 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1118 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1122 struct hlist_head *head = &fib_info_devhash[hash]; 1119 struct hlist_head *head = &fib_info_devhash[hash];
1123 struct hlist_node *node;
1124 struct fib_nh *nh; 1120 struct fib_nh *nh;
1125 1121
1126 if (force) 1122 if (force)
1127 scope = -1; 1123 scope = -1;
1128 1124
1129 hlist_for_each_entry(nh, node, head, nh_hash) { 1125 hlist_for_each_entry(nh, head, nh_hash) {
1130 struct fib_info *fi = nh->nh_parent; 1126 struct fib_info *fi = nh->nh_parent;
1131 int dead; 1127 int dead;
1132 1128
@@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev)
1232 struct fib_info *prev_fi; 1228 struct fib_info *prev_fi;
1233 unsigned int hash; 1229 unsigned int hash;
1234 struct hlist_head *head; 1230 struct hlist_head *head;
1235 struct hlist_node *node;
1236 struct fib_nh *nh; 1231 struct fib_nh *nh;
1237 int ret; 1232 int ret;
1238 1233
@@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev)
1244 head = &fib_info_devhash[hash]; 1239 head = &fib_info_devhash[hash];
1245 ret = 0; 1240 ret = 0;
1246 1241
1247 hlist_for_each_entry(nh, node, head, nh_hash) { 1242 hlist_for_each_entry(nh, head, nh_hash) {
1248 struct fib_info *fi = nh->nh_parent; 1243 struct fib_info *fi = nh->nh_parent;
1249 int alive; 1244 int alive;
1250 1245
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 61e03da3e1f5..ff06b7543d9f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -920,10 +920,9 @@ nomem:
920static struct leaf_info *find_leaf_info(struct leaf *l, int plen) 920static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
921{ 921{
922 struct hlist_head *head = &l->list; 922 struct hlist_head *head = &l->list;
923 struct hlist_node *node;
924 struct leaf_info *li; 923 struct leaf_info *li;
925 924
926 hlist_for_each_entry_rcu(li, node, head, hlist) 925 hlist_for_each_entry_rcu(li, head, hlist)
927 if (li->plen == plen) 926 if (li->plen == plen)
928 return li; 927 return li;
929 928
@@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
943static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) 942static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
944{ 943{
945 struct leaf_info *li = NULL, *last = NULL; 944 struct leaf_info *li = NULL, *last = NULL;
946 struct hlist_node *node;
947 945
948 if (hlist_empty(head)) { 946 if (hlist_empty(head)) {
949 hlist_add_head_rcu(&new->hlist, head); 947 hlist_add_head_rcu(&new->hlist, head);
950 } else { 948 } else {
951 hlist_for_each_entry(li, node, head, hlist) { 949 hlist_for_each_entry(li, head, hlist) {
952 if (new->plen > li->plen) 950 if (new->plen > li->plen)
953 break; 951 break;
954 952
@@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1354{ 1352{
1355 struct leaf_info *li; 1353 struct leaf_info *li;
1356 struct hlist_head *hhead = &l->list; 1354 struct hlist_head *hhead = &l->list;
1357 struct hlist_node *node;
1358 1355
1359 hlist_for_each_entry_rcu(li, node, hhead, hlist) { 1356 hlist_for_each_entry_rcu(li, hhead, hlist) {
1360 struct fib_alias *fa; 1357 struct fib_alias *fa;
1361 1358
1362 if (l->key != (key & li->mask_plen)) 1359 if (l->key != (key & li->mask_plen))
@@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l)
1740{ 1737{
1741 int found = 0; 1738 int found = 0;
1742 struct hlist_head *lih = &l->list; 1739 struct hlist_head *lih = &l->list;
1743 struct hlist_node *node, *tmp; 1740 struct hlist_node *tmp;
1744 struct leaf_info *li = NULL; 1741 struct leaf_info *li = NULL;
1745 1742
1746 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { 1743 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
1747 found += trie_flush_list(&li->falh); 1744 found += trie_flush_list(&li->falh);
1748 1745
1749 if (list_empty(&li->falh)) { 1746 if (list_empty(&li->falh)) {
@@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1895 struct sk_buff *skb, struct netlink_callback *cb) 1892 struct sk_buff *skb, struct netlink_callback *cb)
1896{ 1893{
1897 struct leaf_info *li; 1894 struct leaf_info *li;
1898 struct hlist_node *node;
1899 int i, s_i; 1895 int i, s_i;
1900 1896
1901 s_i = cb->args[4]; 1897 s_i = cb->args[4];
1902 i = 0; 1898 i = 0;
1903 1899
1904 /* rcu_read_lock is hold by caller */ 1900 /* rcu_read_lock is hold by caller */
1905 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 1901 hlist_for_each_entry_rcu(li, &l->list, hlist) {
1906 if (i < s_i) { 1902 if (i < s_i) {
1907 i++; 1903 i++;
1908 continue; 1904 continue;
@@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2092 if (IS_LEAF(n)) { 2088 if (IS_LEAF(n)) {
2093 struct leaf *l = (struct leaf *)n; 2089 struct leaf *l = (struct leaf *)n;
2094 struct leaf_info *li; 2090 struct leaf_info *li;
2095 struct hlist_node *tmp;
2096 2091
2097 s->leaves++; 2092 s->leaves++;
2098 s->totdepth += iter.depth; 2093 s->totdepth += iter.depth;
2099 if (iter.depth > s->maxdepth) 2094 if (iter.depth > s->maxdepth)
2100 s->maxdepth = iter.depth; 2095 s->maxdepth = iter.depth;
2101 2096
2102 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) 2097 hlist_for_each_entry_rcu(li, &l->list, hlist)
2103 ++s->prefixes; 2098 ++s->prefixes;
2104 } else { 2099 } else {
2105 const struct tnode *tn = (const struct tnode *) n; 2100 const struct tnode *tn = (const struct tnode *) n;
@@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2200 2195
2201 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 2196 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2202 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2197 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2203 struct hlist_node *node;
2204 struct fib_table *tb; 2198 struct fib_table *tb;
2205 2199
2206 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 2200 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2207 struct trie *t = (struct trie *) tb->tb_data; 2201 struct trie *t = (struct trie *) tb->tb_data;
2208 struct trie_stat stat; 2202 struct trie_stat stat;
2209 2203
@@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2245 2239
2246 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 2240 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2247 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2241 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2248 struct hlist_node *node;
2249 struct fib_table *tb; 2242 struct fib_table *tb;
2250 2243
2251 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { 2244 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2252 struct rt_trie_node *n; 2245 struct rt_trie_node *n;
2253 2246
2254 for (n = fib_trie_get_first(iter, 2247 for (n = fib_trie_get_first(iter,
@@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2298 /* new hash chain */ 2291 /* new hash chain */
2299 while (++h < FIB_TABLE_HASHSZ) { 2292 while (++h < FIB_TABLE_HASHSZ) {
2300 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; 2293 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2301 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) { 2294 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2302 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2295 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2303 if (n) 2296 if (n)
2304 goto found; 2297 goto found;
@@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2381 } else { 2374 } else {
2382 struct leaf *l = (struct leaf *) n; 2375 struct leaf *l = (struct leaf *) n;
2383 struct leaf_info *li; 2376 struct leaf_info *li;
2384 struct hlist_node *node;
2385 __be32 val = htonl(l->key); 2377 __be32 val = htonl(l->key);
2386 2378
2387 seq_indent(seq, iter->depth); 2379 seq_indent(seq, iter->depth);
2388 seq_printf(seq, " |-- %pI4\n", &val); 2380 seq_printf(seq, " |-- %pI4\n", &val);
2389 2381
2390 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2382 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2391 struct fib_alias *fa; 2383 struct fib_alias *fa;
2392 2384
2393 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2385 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2532{ 2524{
2533 struct leaf *l = v; 2525 struct leaf *l = v;
2534 struct leaf_info *li; 2526 struct leaf_info *li;
2535 struct hlist_node *node;
2536 2527
2537 if (v == SEQ_START_TOKEN) { 2528 if (v == SEQ_START_TOKEN) {
2538 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " 2529 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2541 return 0; 2532 return 0;
2542 } 2533 }
2543 2534
2544 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2535 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2545 struct fib_alias *fa; 2536 struct fib_alias *fa;
2546 __be32 mask, prefix; 2537 __be32 mask, prefix;
2547 2538
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 11cb4979a465..7d1874be1df3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
57 const struct inet_bind_bucket *tb, bool relax) 57 const struct inet_bind_bucket *tb, bool relax)
58{ 58{
59 struct sock *sk2; 59 struct sock *sk2;
60 struct hlist_node *node;
61 int reuse = sk->sk_reuse; 60 int reuse = sk->sk_reuse;
62 int reuseport = sk->sk_reuseport; 61 int reuseport = sk->sk_reuseport;
63 kuid_t uid = sock_i_uid((struct sock *)sk); 62 kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
69 * one this bucket belongs to. 68 * one this bucket belongs to.
70 */ 69 */
71 70
72 sk_for_each_bound(sk2, node, &tb->owners) { 71 sk_for_each_bound(sk2, &tb->owners) {
73 if (sk != sk2 && 72 if (sk != sk2 &&
74 !inet_v6_ipv6only(sk2) && 73 !inet_v6_ipv6only(sk2) &&
75 (!sk->sk_bound_dev_if || 74 (!sk->sk_bound_dev_if ||
@@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
95 } 94 }
96 } 95 }
97 } 96 }
98 return node != NULL; 97 return sk2 != NULL;
99} 98}
100EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); 99EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
101 100
@@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
106{ 105{
107 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 106 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108 struct inet_bind_hashbucket *head; 107 struct inet_bind_hashbucket *head;
109 struct hlist_node *node;
110 struct inet_bind_bucket *tb; 108 struct inet_bind_bucket *tb;
111 int ret, attempts = 5; 109 int ret, attempts = 5;
112 struct net *net = sock_net(sk); 110 struct net *net = sock_net(sk);
@@ -129,7 +127,7 @@ again:
129 head = &hashinfo->bhash[inet_bhashfn(net, rover, 127 head = &hashinfo->bhash[inet_bhashfn(net, rover,
130 hashinfo->bhash_size)]; 128 hashinfo->bhash_size)];
131 spin_lock(&head->lock); 129 spin_lock(&head->lock);
132 inet_bind_bucket_for_each(tb, node, &head->chain) 130 inet_bind_bucket_for_each(tb, &head->chain)
133 if (net_eq(ib_net(tb), net) && tb->port == rover) { 131 if (net_eq(ib_net(tb), net) && tb->port == rover) {
134 if (((tb->fastreuse > 0 && 132 if (((tb->fastreuse > 0 &&
135 sk->sk_reuse && 133 sk->sk_reuse &&
@@ -183,7 +181,7 @@ have_snum:
183 head = &hashinfo->bhash[inet_bhashfn(net, snum, 181 head = &hashinfo->bhash[inet_bhashfn(net, snum,
184 hashinfo->bhash_size)]; 182 hashinfo->bhash_size)];
185 spin_lock(&head->lock); 183 spin_lock(&head->lock);
186 inet_bind_bucket_for_each(tb, node, &head->chain) 184 inet_bind_bucket_for_each(tb, &head->chain)
187 if (net_eq(ib_net(tb), net) && tb->port == snum) 185 if (net_eq(ib_net(tb), net) && tb->port == snum)
188 goto tb_found; 186 goto tb_found;
189 } 187 }
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 2e453bde6992..245ae078a07f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
33 get_random_bytes(&f->rnd, sizeof(u32)); 33 get_random_bytes(&f->rnd, sizeof(u32));
34 for (i = 0; i < INETFRAGS_HASHSZ; i++) { 34 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
35 struct inet_frag_queue *q; 35 struct inet_frag_queue *q;
36 struct hlist_node *p, *n; 36 struct hlist_node *n;
37 37
38 hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { 38 hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
39 unsigned int hval = f->hashfn(q); 39 unsigned int hval = f->hashfn(q);
40 40
41 if (hval != i) { 41 if (hval != i) {
@@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
203{ 203{
204 struct inet_frag_queue *qp; 204 struct inet_frag_queue *qp;
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206 struct hlist_node *n;
207#endif 206#endif
208 unsigned int hash; 207 unsigned int hash;
209 208
@@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
219 * such entry could be created on other cpu, while we 218 * such entry could be created on other cpu, while we
220 * promoted read lock to write lock. 219 * promoted read lock to write lock.
221 */ 220 */
222 hlist_for_each_entry(qp, n, &f->hash[hash], list) { 221 hlist_for_each_entry(qp, &f->hash[hash], list) {
223 if (qp->net == nf && f->match(qp, arg)) { 222 if (qp->net == nf && f->match(qp, arg)) {
224 atomic_inc(&qp->refcnt); 223 atomic_inc(&qp->refcnt);
225 write_unlock(&f->lock); 224 write_unlock(&f->lock);
@@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
278 __releases(&f->lock) 277 __releases(&f->lock)
279{ 278{
280 struct inet_frag_queue *q; 279 struct inet_frag_queue *q;
281 struct hlist_node *n;
282 280
283 hlist_for_each_entry(q, n, &f->hash[hash], list) { 281 hlist_for_each_entry(q, &f->hash[hash], list) {
284 if (q->net == nf && f->match(q, key)) { 282 if (q->net == nf && f->match(q, key)) {
285 atomic_inc(&q->refcnt); 283 atomic_inc(&q->refcnt);
286 read_unlock(&f->lock); 284 read_unlock(&f->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0ce0595d9861..6af375afeeef 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
120 * that the listener socket's icsk_bind_hash is the same 120 * that the listener socket's icsk_bind_hash is the same
121 * as that of the child socket. We have to look up or 121 * as that of the child socket. We have to look up or
122 * create a new bind bucket for the child here. */ 122 * create a new bind bucket for the child here. */
123 struct hlist_node *node; 123 inet_bind_bucket_for_each(tb, &head->chain) {
124 inet_bind_bucket_for_each(tb, node, &head->chain) {
125 if (net_eq(ib_net(tb), sock_net(sk)) && 124 if (net_eq(ib_net(tb), sock_net(sk)) &&
126 tb->port == port) 125 tb->port == port)
127 break; 126 break;
128 } 127 }
129 if (!node) { 128 if (!tb) {
130 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 129 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
131 sock_net(sk), head, port); 130 sock_net(sk), head, port);
132 if (!tb) { 131 if (!tb) {
@@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
493 int i, remaining, low, high, port; 492 int i, remaining, low, high, port;
494 static u32 hint; 493 static u32 hint;
495 u32 offset = hint + port_offset; 494 u32 offset = hint + port_offset;
496 struct hlist_node *node;
497 struct inet_timewait_sock *tw = NULL; 495 struct inet_timewait_sock *tw = NULL;
498 496
499 inet_get_local_port_range(&low, &high); 497 inet_get_local_port_range(&low, &high);
@@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
512 * because the established check is already 510 * because the established check is already
513 * unique enough. 511 * unique enough.
514 */ 512 */
515 inet_bind_bucket_for_each(tb, node, &head->chain) { 513 inet_bind_bucket_for_each(tb, &head->chain) {
516 if (net_eq(ib_net(tb), net) && 514 if (net_eq(ib_net(tb), net) &&
517 tb->port == port) { 515 tb->port == port) {
518 if (tb->fastreuse >= 0 || 516 if (tb->fastreuse >= 0 ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2784db3155fb..1f27c9f4afd0 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
216 const int slot) 216 const int slot)
217{ 217{
218 struct inet_timewait_sock *tw; 218 struct inet_timewait_sock *tw;
219 struct hlist_node *node;
220 unsigned int killed; 219 unsigned int killed;
221 int ret; 220 int ret;
222 221
@@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
229 killed = 0; 228 killed = 0;
230 ret = 0; 229 ret = 0;
231rescan: 230rescan:
232 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { 231 inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
233 __inet_twsk_del_dead_node(tw); 232 __inet_twsk_del_dead_node(tw);
234 spin_unlock(&twdr->death_lock); 233 spin_unlock(&twdr->death_lock);
235 __inet_twsk_kill(tw, twdr->hashinfo); 234 __inet_twsk_kill(tw, twdr->hashinfo);
@@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data)
438 437
439 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { 438 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
440 if (time_before_eq(j, now)) { 439 if (time_before_eq(j, now)) {
441 struct hlist_node *node, *safe; 440 struct hlist_node *safe;
442 struct inet_timewait_sock *tw; 441 struct inet_timewait_sock *tw;
443 442
444 inet_twsk_for_each_inmate_safe(tw, node, safe, 443 inet_twsk_for_each_inmate_safe(tw, safe,
445 &twdr->twcal_row[slot]) { 444 &twdr->twcal_row[slot]) {
446 __inet_twsk_del_dead_node(tw); 445 __inet_twsk_del_dead_node(tw);
447 __inet_twsk_kill(tw, twdr->hashinfo); 446 __inet_twsk_kill(tw, twdr->hashinfo);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 53ddebc292b6..dd44e0ab600c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk);
111static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, 111static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
112 unsigned short num, __be32 raddr, __be32 laddr, int dif) 112 unsigned short num, __be32 raddr, __be32 laddr, int dif)
113{ 113{
114 struct hlist_node *node; 114 sk_for_each_from(sk) {
115
116 sk_for_each_from(sk, node) {
117 struct inet_sock *inet = inet_sk(sk); 115 struct inet_sock *inet = inet_sk(sk);
118 116
119 if (net_eq(sock_net(sk), net) && inet->inet_num == num && 117 if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
@@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
914 912
915 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; 913 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
916 ++state->bucket) { 914 ++state->bucket) {
917 struct hlist_node *node; 915 sk_for_each(sk, &state->h->ht[state->bucket])
918
919 sk_for_each(sk, node, &state->h->ht[state->bucket])
920 if (sock_net(sk) == seq_file_net(seq)) 916 if (sock_net(sk) == seq_file_net(seq))
921 goto found; 917 goto found;
922 } 918 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 145d3bf8df86..4a8ec457310f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
954{ 954{
955 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
956 struct tcp_md5sig_key *key; 956 struct tcp_md5sig_key *key;
957 struct hlist_node *pos;
958 unsigned int size = sizeof(struct in_addr); 957 unsigned int size = sizeof(struct in_addr);
959 struct tcp_md5sig_info *md5sig; 958 struct tcp_md5sig_info *md5sig;
960 959
@@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
968 if (family == AF_INET6) 967 if (family == AF_INET6)
969 size = sizeof(struct in6_addr); 968 size = sizeof(struct in6_addr);
970#endif 969#endif
971 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { 970 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
972 if (key->family != family) 971 if (key->family != family)
973 continue; 972 continue;
974 if (!memcmp(&key->addr, addr, size)) 973 if (!memcmp(&key->addr, addr, size))
@@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk)
1069{ 1068{
1070 struct tcp_sock *tp = tcp_sk(sk); 1069 struct tcp_sock *tp = tcp_sk(sk);
1071 struct tcp_md5sig_key *key; 1070 struct tcp_md5sig_key *key;
1072 struct hlist_node *pos, *n; 1071 struct hlist_node *n;
1073 struct tcp_md5sig_info *md5sig; 1072 struct tcp_md5sig_info *md5sig;
1074 1073
1075 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); 1074 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1076 1075
1077 if (!hlist_empty(&md5sig->head)) 1076 if (!hlist_empty(&md5sig->head))
1078 tcp_free_md5sig_pool(); 1077 tcp_free_md5sig_pool();
1079 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { 1078 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1080 hlist_del_rcu(&key->node); 1079 hlist_del_rcu(&key->node);
1081 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); 1080 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1082 kfree_rcu(key, rcu); 1081 kfree_rcu(key, rcu);