diff options
author | David S. Miller <davem@davemloft.net> | 2014-11-14 01:01:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-14 01:01:12 -0500 |
commit | 076ce4482569ea1a2c27b4ca71a309adaf91d398 (patch) | |
tree | 2ae9e42612f35be897f190983fc292d7af781cd2 /lib | |
parent | d649a7a81f3b5bacb1d60abd7529894d8234a666 (diff) | |
parent | b23dc5a7cc6ebc9a0d57351da7a0e8454c9ffea3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
sge.c was overlapping two changes, one to use the new
__dev_alloc_page() in net-next, and one to use s->fl_pg_order in net.
ixgbe_phy.c was a set of overlapping whitespace changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 25e4c213b08a..e5f5e69c7a7b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -229,7 +229,7 @@ int rhashtable_expand(struct rhashtable *ht) | |||
229 | ht->shift++; | 229 | ht->shift++; |
230 | 230 | ||
231 | /* For each new bucket, search the corresponding old bucket | 231 | /* For each new bucket, search the corresponding old bucket |
232 | * for the first entry that hashes to the new bucket, and | 232 | * for the first entry that hashes to the new bucket, and |
233 | * link the new bucket to that entry. Since all the entries | 233 | * link the new bucket to that entry. Since all the entries |
234 | * which will end up in the new bucket appear in the same | 234 | * which will end up in the new bucket appear in the same |
235 | * old bucket, this constructs an entirely valid new hash | 235 | * old bucket, this constructs an entirely valid new hash |
@@ -247,8 +247,8 @@ int rhashtable_expand(struct rhashtable *ht) | |||
247 | } | 247 | } |
248 | 248 | ||
249 | /* Publish the new table pointer. Lookups may now traverse | 249 | /* Publish the new table pointer. Lookups may now traverse |
250 | * the new table, but they will not benefit from any | 250 | * the new table, but they will not benefit from any |
251 | * additional efficiency until later steps unzip the buckets. | 251 | * additional efficiency until later steps unzip the buckets. |
252 | */ | 252 | */ |
253 | rcu_assign_pointer(ht->tbl, new_tbl); | 253 | rcu_assign_pointer(ht->tbl, new_tbl); |
254 | 254 | ||
@@ -304,14 +304,14 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
304 | 304 | ||
305 | ht->shift--; | 305 | ht->shift--; |
306 | 306 | ||
307 | /* Link each bucket in the new table to the first bucket | 307 | /* Link each bucket in the new table to the first bucket |
308 | * in the old table that contains entries which will hash | 308 | * in the old table that contains entries which will hash |
309 | * to the new bucket. | 309 | * to the new bucket. |
310 | */ | 310 | */ |
311 | for (i = 0; i < ntbl->size; i++) { | 311 | for (i = 0; i < ntbl->size; i++) { |
312 | ntbl->buckets[i] = tbl->buckets[i]; | 312 | ntbl->buckets[i] = tbl->buckets[i]; |
313 | 313 | ||
314 | /* Link each bucket in the new table to the first bucket | 314 | /* Link each bucket in the new table to the first bucket |
315 | * in the old table that contains entries which will hash | 315 | * in the old table that contains entries which will hash |
316 | * to the new bucket. | 316 | * to the new bucket. |
317 | */ | 317 | */ |