aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authordpward <david.ward@ll.mit.edu>2011-09-05 12:47:24 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-11-11 12:37:17 -0500
commit3fa57c1bf5fb311544199b7837a08b9f5bf5e6e4 (patch)
tree9285297f1b3fee391dca9773416e900d0b49b7a8 /net/core
parent867ca3109d0289d0a62bb3c7fc3d365e9d478fae (diff)
net: Handle different key sizes between address families in flow cache
commit aa1c366e4febc7f5c2b84958a2dd7cd70e28f9d0 upstream. With the conversion of struct flowi to a union of AF-specific structs, some operations on the flow cache need to account for the exact size of the key. Signed-off-by: David Ward <david.ward@ll.mit.edu> Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/flow.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 990703b8863..a6bda2a514f 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -172,29 +172,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
172 172
173static u32 flow_hash_code(struct flow_cache *fc, 173static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp, 174 struct flow_cache_percpu *fcp,
175 const struct flowi *key) 175 const struct flowi *key,
176 size_t keysize)
176{ 177{
177 const u32 *k = (const u32 *) key; 178 const u32 *k = (const u32 *) key;
179 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
178 180
179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 181 return jhash2(k, length, fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1); 182 & (flow_cache_hash_size(fc) - 1);
181} 183}
182 184
183typedef unsigned long flow_compare_t;
184
185/* I hear what you're saying, use memcmp. But memcmp cannot make 185/* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment and 186 * important assumptions that we can here, such as alignment.
187 * constant size.
188 */ 187 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 188static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
189 size_t keysize)
190{ 190{
191 const flow_compare_t *k1, *k1_lim, *k2; 191 const flow_compare_t *k1, *k1_lim, *k2;
192 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195 192
196 k1 = (const flow_compare_t *) key1; 193 k1 = (const flow_compare_t *) key1;
197 k1_lim = k1 + n_elem; 194 k1_lim = k1 + keysize;
198 195
199 k2 = (const flow_compare_t *) key2; 196 k2 = (const flow_compare_t *) key2;
200 197
@@ -215,6 +212,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
215 struct flow_cache_entry *fle, *tfle; 212 struct flow_cache_entry *fle, *tfle;
216 struct hlist_node *entry; 213 struct hlist_node *entry;
217 struct flow_cache_object *flo; 214 struct flow_cache_object *flo;
215 size_t keysize;
218 unsigned int hash; 216 unsigned int hash;
219 217
220 local_bh_disable(); 218 local_bh_disable();
@@ -222,6 +220,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
222 220
223 fle = NULL; 221 fle = NULL;
224 flo = NULL; 222 flo = NULL;
223
224 keysize = flow_key_size(family);
225 if (!keysize)
226 goto nocache;
227
225 /* Packet really early in init? Making flow_cache_init a 228 /* Packet really early in init? Making flow_cache_init a
226 * pre-smp initcall would solve this. --RR */ 229 * pre-smp initcall would solve this. --RR */
227 if (!fcp->hash_table) 230 if (!fcp->hash_table)
@@ -230,11 +233,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
230 if (fcp->hash_rnd_recalc) 233 if (fcp->hash_rnd_recalc)
231 flow_new_hash_rnd(fc, fcp); 234 flow_new_hash_rnd(fc, fcp);
232 235
233 hash = flow_hash_code(fc, fcp, key); 236 hash = flow_hash_code(fc, fcp, key, keysize);
234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 237 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235 if (tfle->family == family && 238 if (tfle->family == family &&
236 tfle->dir == dir && 239 tfle->dir == dir &&
237 flow_key_compare(key, &tfle->key) == 0) { 240 flow_key_compare(key, &tfle->key, keysize) == 0) {
238 fle = tfle; 241 fle = tfle;
239 break; 242 break;
240 } 243 }
@@ -248,7 +251,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
248 if (fle) { 251 if (fle) {
249 fle->family = family; 252 fle->family = family;
250 fle->dir = dir; 253 fle->dir = dir;
251 memcpy(&fle->key, key, sizeof(*key)); 254 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 fle->object = NULL; 255 fle->object = NULL;
253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 256 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 fcp->hash_count++; 257 fcp->hash_count++;