diff options
author | Divy Le Ray <divy@chelsio.com> | 2007-01-18 22:04:14 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:46 -0500 |
commit | 4d22de3e6cc4a09c369b504cd8bcde3385a974cd (patch) | |
tree | af13a2ee582105d961c79fc4e55fce0b5e043310 /drivers/net/cxgb3/l2t.c | |
parent | 0bf94faf64afaba6e7b49fd11541b59d2ba06d0e (diff) |
Add support for the latest 1G/10G Chelsio adapter, T3.
This driver is required by the Chelsio T3 RDMA driver posted by
Steve Wise.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/cxgb3/l2t.c')
-rw-r--r-- | drivers/net/cxgb3/l2t.c | 450 |
1 files changed, 450 insertions, 0 deletions
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c new file mode 100644 index 000000000000..9997138a4fdc --- /dev/null +++ b/drivers/net/cxgb3/l2t.c | |||
@@ -0,0 +1,450 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | ||
3 | * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/if.h> | ||
36 | #include <linux/if_vlan.h> | ||
37 | #include <linux/jhash.h> | ||
38 | #include <net/neighbour.h> | ||
39 | #include "common.h" | ||
40 | #include "t3cdev.h" | ||
41 | #include "cxgb3_defs.h" | ||
42 | #include "l2t.h" | ||
43 | #include "t3_cpl.h" | ||
44 | #include "firmware_exports.h" | ||
45 | |||
46 | #define VLAN_NONE 0xfff | ||
47 | |||
48 | /* | ||
49 | * Module locking notes: There is a RW lock protecting the L2 table as a | ||
50 | * whole plus a spinlock per L2T entry. Entry lookups and allocations happen | ||
51 | * under the protection of the table lock, individual entry changes happen | ||
52 | * while holding that entry's spinlock. The table lock nests outside the | ||
53 | * entry locks. Allocations of new entries take the table lock as writers so | ||
54 | * no other lookups can happen while allocating new entries. Entry updates | ||
55 | * take the table lock as readers so multiple entries can be updated in | ||
56 | * parallel. An L2T entry can be dropped by decrementing its reference count | ||
57 | * and therefore can happen in parallel with entry allocation but no entry | ||
58 | * can change state or increment its ref count during allocation as both of | ||
59 | * these perform lookups. | ||
60 | */ | ||
61 | |||
62 | static inline unsigned int vlan_prio(const struct l2t_entry *e) | ||
63 | { | ||
64 | return e->vlan >> 13; | ||
65 | } | ||
66 | |||
67 | static inline unsigned int arp_hash(u32 key, int ifindex, | ||
68 | const struct l2t_data *d) | ||
69 | { | ||
70 | return jhash_2words(key, ifindex, 0) & (d->nentries - 1); | ||
71 | } | ||
72 | |||
73 | static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) | ||
74 | { | ||
75 | neigh_hold(n); | ||
76 | if (e->neigh) | ||
77 | neigh_release(e->neigh); | ||
78 | e->neigh = n; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * Set up an L2T entry and send any packets waiting in the arp queue. The | ||
83 | * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the | ||
84 | * entry locked. | ||
85 | */ | ||
86 | static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, | ||
87 | struct l2t_entry *e) | ||
88 | { | ||
89 | struct cpl_l2t_write_req *req; | ||
90 | |||
91 | if (!skb) { | ||
92 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | ||
93 | if (!skb) | ||
94 | return -ENOMEM; | ||
95 | } | ||
96 | |||
97 | req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); | ||
98 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | ||
99 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); | ||
100 | req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | | ||
101 | V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | | ||
102 | V_L2T_W_PRIO(vlan_prio(e))); | ||
103 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); | ||
104 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); | ||
105 | skb->priority = CPL_PRIORITY_CONTROL; | ||
106 | cxgb3_ofld_send(dev, skb); | ||
107 | while (e->arpq_head) { | ||
108 | skb = e->arpq_head; | ||
109 | e->arpq_head = skb->next; | ||
110 | skb->next = NULL; | ||
111 | cxgb3_ofld_send(dev, skb); | ||
112 | } | ||
113 | e->arpq_tail = NULL; | ||
114 | e->state = L2T_STATE_VALID; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Add a packet to the an L2T entry's queue of packets awaiting resolution. | ||
121 | * Must be called with the entry's lock held. | ||
122 | */ | ||
123 | static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) | ||
124 | { | ||
125 | skb->next = NULL; | ||
126 | if (e->arpq_head) | ||
127 | e->arpq_tail->next = skb; | ||
128 | else | ||
129 | e->arpq_head = skb; | ||
130 | e->arpq_tail = skb; | ||
131 | } | ||
132 | |||
133 | int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, | ||
134 | struct l2t_entry *e) | ||
135 | { | ||
136 | again: | ||
137 | switch (e->state) { | ||
138 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | ||
139 | neigh_event_send(e->neigh, NULL); | ||
140 | spin_lock_bh(&e->lock); | ||
141 | if (e->state == L2T_STATE_STALE) | ||
142 | e->state = L2T_STATE_VALID; | ||
143 | spin_unlock_bh(&e->lock); | ||
144 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | ||
145 | return cxgb3_ofld_send(dev, skb); | ||
146 | case L2T_STATE_RESOLVING: | ||
147 | spin_lock_bh(&e->lock); | ||
148 | if (e->state != L2T_STATE_RESOLVING) { | ||
149 | /* ARP already completed */ | ||
150 | spin_unlock_bh(&e->lock); | ||
151 | goto again; | ||
152 | } | ||
153 | arpq_enqueue(e, skb); | ||
154 | spin_unlock_bh(&e->lock); | ||
155 | |||
156 | /* | ||
157 | * Only the first packet added to the arpq should kick off | ||
158 | * resolution. However, because the alloc_skb below can fail, | ||
159 | * we allow each packet added to the arpq to retry resolution | ||
160 | * as a way of recovering from transient memory exhaustion. | ||
161 | * A better way would be to use a work request to retry L2T | ||
162 | * entries when there's no memory. | ||
163 | */ | ||
164 | if (!neigh_event_send(e->neigh, NULL)) { | ||
165 | skb = alloc_skb(sizeof(struct cpl_l2t_write_req), | ||
166 | GFP_ATOMIC); | ||
167 | if (!skb) | ||
168 | break; | ||
169 | |||
170 | spin_lock_bh(&e->lock); | ||
171 | if (e->arpq_head) | ||
172 | setup_l2e_send_pending(dev, skb, e); | ||
173 | else /* we lost the race */ | ||
174 | __kfree_skb(skb); | ||
175 | spin_unlock_bh(&e->lock); | ||
176 | } | ||
177 | } | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | EXPORT_SYMBOL(t3_l2t_send_slow); | ||
182 | |||
183 | void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) | ||
184 | { | ||
185 | again: | ||
186 | switch (e->state) { | ||
187 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | ||
188 | neigh_event_send(e->neigh, NULL); | ||
189 | spin_lock_bh(&e->lock); | ||
190 | if (e->state == L2T_STATE_STALE) { | ||
191 | e->state = L2T_STATE_VALID; | ||
192 | } | ||
193 | spin_unlock_bh(&e->lock); | ||
194 | return; | ||
195 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | ||
196 | return; | ||
197 | case L2T_STATE_RESOLVING: | ||
198 | spin_lock_bh(&e->lock); | ||
199 | if (e->state != L2T_STATE_RESOLVING) { | ||
200 | /* ARP already completed */ | ||
201 | spin_unlock_bh(&e->lock); | ||
202 | goto again; | ||
203 | } | ||
204 | spin_unlock_bh(&e->lock); | ||
205 | |||
206 | /* | ||
207 | * Only the first packet added to the arpq should kick off | ||
208 | * resolution. However, because the alloc_skb below can fail, | ||
209 | * we allow each packet added to the arpq to retry resolution | ||
210 | * as a way of recovering from transient memory exhaustion. | ||
211 | * A better way would be to use a work request to retry L2T | ||
212 | * entries when there's no memory. | ||
213 | */ | ||
214 | neigh_event_send(e->neigh, NULL); | ||
215 | } | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | EXPORT_SYMBOL(t3_l2t_send_event); | ||
220 | |||
221 | /* | ||
222 | * Allocate a free L2T entry. Must be called with l2t_data.lock held. | ||
223 | */ | ||
224 | static struct l2t_entry *alloc_l2e(struct l2t_data *d) | ||
225 | { | ||
226 | struct l2t_entry *end, *e, **p; | ||
227 | |||
228 | if (!atomic_read(&d->nfree)) | ||
229 | return NULL; | ||
230 | |||
231 | /* there's definitely a free entry */ | ||
232 | for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) | ||
233 | if (atomic_read(&e->refcnt) == 0) | ||
234 | goto found; | ||
235 | |||
236 | for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; | ||
237 | found: | ||
238 | d->rover = e + 1; | ||
239 | atomic_dec(&d->nfree); | ||
240 | |||
241 | /* | ||
242 | * The entry we found may be an inactive entry that is | ||
243 | * presently in the hash table. We need to remove it. | ||
244 | */ | ||
245 | if (e->state != L2T_STATE_UNUSED) { | ||
246 | int hash = arp_hash(e->addr, e->ifindex, d); | ||
247 | |||
248 | for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) | ||
249 | if (*p == e) { | ||
250 | *p = e->next; | ||
251 | break; | ||
252 | } | ||
253 | e->state = L2T_STATE_UNUSED; | ||
254 | } | ||
255 | return e; | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * Called when an L2T entry has no more users. The entry is left in the hash | ||
260 | * table since it is likely to be reused but we also bump nfree to indicate | ||
261 | * that the entry can be reallocated for a different neighbor. We also drop | ||
262 | * the existing neighbor reference in case the neighbor is going away and is | ||
263 | * waiting on our reference. | ||
264 | * | ||
265 | * Because entries can be reallocated to other neighbors once their ref count | ||
266 | * drops to 0 we need to take the entry's lock to avoid races with a new | ||
267 | * incarnation. | ||
268 | */ | ||
269 | void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) | ||
270 | { | ||
271 | spin_lock_bh(&e->lock); | ||
272 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ | ||
273 | if (e->neigh) { | ||
274 | neigh_release(e->neigh); | ||
275 | e->neigh = NULL; | ||
276 | } | ||
277 | } | ||
278 | spin_unlock_bh(&e->lock); | ||
279 | atomic_inc(&d->nfree); | ||
280 | } | ||
281 | |||
282 | EXPORT_SYMBOL(t3_l2e_free); | ||
283 | |||
284 | /* | ||
285 | * Update an L2T entry that was previously used for the same next hop as neigh. | ||
286 | * Must be called with softirqs disabled. | ||
287 | */ | ||
288 | static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | ||
289 | { | ||
290 | unsigned int nud_state; | ||
291 | |||
292 | spin_lock(&e->lock); /* avoid race with t3_l2t_free */ | ||
293 | |||
294 | if (neigh != e->neigh) | ||
295 | neigh_replace(e, neigh); | ||
296 | nud_state = neigh->nud_state; | ||
297 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || | ||
298 | !(nud_state & NUD_VALID)) | ||
299 | e->state = L2T_STATE_RESOLVING; | ||
300 | else if (nud_state & NUD_CONNECTED) | ||
301 | e->state = L2T_STATE_VALID; | ||
302 | else | ||
303 | e->state = L2T_STATE_STALE; | ||
304 | spin_unlock(&e->lock); | ||
305 | } | ||
306 | |||
307 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | ||
308 | struct net_device *dev) | ||
309 | { | ||
310 | struct l2t_entry *e; | ||
311 | struct l2t_data *d = L2DATA(cdev); | ||
312 | u32 addr = *(u32 *) neigh->primary_key; | ||
313 | int ifidx = neigh->dev->ifindex; | ||
314 | int hash = arp_hash(addr, ifidx, d); | ||
315 | struct port_info *p = netdev_priv(dev); | ||
316 | int smt_idx = p->port_id; | ||
317 | |||
318 | write_lock_bh(&d->lock); | ||
319 | for (e = d->l2tab[hash].first; e; e = e->next) | ||
320 | if (e->addr == addr && e->ifindex == ifidx && | ||
321 | e->smt_idx == smt_idx) { | ||
322 | l2t_hold(d, e); | ||
323 | if (atomic_read(&e->refcnt) == 1) | ||
324 | reuse_entry(e, neigh); | ||
325 | goto done; | ||
326 | } | ||
327 | |||
328 | /* Need to allocate a new entry */ | ||
329 | e = alloc_l2e(d); | ||
330 | if (e) { | ||
331 | spin_lock(&e->lock); /* avoid race with t3_l2t_free */ | ||
332 | e->next = d->l2tab[hash].first; | ||
333 | d->l2tab[hash].first = e; | ||
334 | e->state = L2T_STATE_RESOLVING; | ||
335 | e->addr = addr; | ||
336 | e->ifindex = ifidx; | ||
337 | e->smt_idx = smt_idx; | ||
338 | atomic_set(&e->refcnt, 1); | ||
339 | neigh_replace(e, neigh); | ||
340 | if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) | ||
341 | e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id; | ||
342 | else | ||
343 | e->vlan = VLAN_NONE; | ||
344 | spin_unlock(&e->lock); | ||
345 | } | ||
346 | done: | ||
347 | write_unlock_bh(&d->lock); | ||
348 | return e; | ||
349 | } | ||
350 | |||
351 | EXPORT_SYMBOL(t3_l2t_get); | ||
352 | |||
353 | /* | ||
354 | * Called when address resolution fails for an L2T entry to handle packets | ||
355 | * on the arpq head. If a packet specifies a failure handler it is invoked, | ||
356 | * otherwise the packets is sent to the offload device. | ||
357 | * | ||
358 | * XXX: maybe we should abandon the latter behavior and just require a failure | ||
359 | * handler. | ||
360 | */ | ||
361 | static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq) | ||
362 | { | ||
363 | while (arpq) { | ||
364 | struct sk_buff *skb = arpq; | ||
365 | struct l2t_skb_cb *cb = L2T_SKB_CB(skb); | ||
366 | |||
367 | arpq = skb->next; | ||
368 | skb->next = NULL; | ||
369 | if (cb->arp_failure_handler) | ||
370 | cb->arp_failure_handler(dev, skb); | ||
371 | else | ||
372 | cxgb3_ofld_send(dev, skb); | ||
373 | } | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * Called when the host's ARP layer makes a change to some entry that is | ||
378 | * loaded into the HW L2 table. | ||
379 | */ | ||
380 | void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) | ||
381 | { | ||
382 | struct l2t_entry *e; | ||
383 | struct sk_buff *arpq = NULL; | ||
384 | struct l2t_data *d = L2DATA(dev); | ||
385 | u32 addr = *(u32 *) neigh->primary_key; | ||
386 | int ifidx = neigh->dev->ifindex; | ||
387 | int hash = arp_hash(addr, ifidx, d); | ||
388 | |||
389 | read_lock_bh(&d->lock); | ||
390 | for (e = d->l2tab[hash].first; e; e = e->next) | ||
391 | if (e->addr == addr && e->ifindex == ifidx) { | ||
392 | spin_lock(&e->lock); | ||
393 | goto found; | ||
394 | } | ||
395 | read_unlock_bh(&d->lock); | ||
396 | return; | ||
397 | |||
398 | found: | ||
399 | read_unlock(&d->lock); | ||
400 | if (atomic_read(&e->refcnt)) { | ||
401 | if (neigh != e->neigh) | ||
402 | neigh_replace(e, neigh); | ||
403 | |||
404 | if (e->state == L2T_STATE_RESOLVING) { | ||
405 | if (neigh->nud_state & NUD_FAILED) { | ||
406 | arpq = e->arpq_head; | ||
407 | e->arpq_head = e->arpq_tail = NULL; | ||
408 | } else if (neigh_is_connected(neigh)) | ||
409 | setup_l2e_send_pending(dev, NULL, e); | ||
410 | } else { | ||
411 | e->state = neigh_is_connected(neigh) ? | ||
412 | L2T_STATE_VALID : L2T_STATE_STALE; | ||
413 | if (memcmp(e->dmac, neigh->ha, 6)) | ||
414 | setup_l2e_send_pending(dev, NULL, e); | ||
415 | } | ||
416 | } | ||
417 | spin_unlock_bh(&e->lock); | ||
418 | |||
419 | if (arpq) | ||
420 | handle_failed_resolution(dev, arpq); | ||
421 | } | ||
422 | |||
423 | struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) | ||
424 | { | ||
425 | struct l2t_data *d; | ||
426 | int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); | ||
427 | |||
428 | d = cxgb_alloc_mem(size); | ||
429 | if (!d) | ||
430 | return NULL; | ||
431 | |||
432 | d->nentries = l2t_capacity; | ||
433 | d->rover = &d->l2tab[1]; /* entry 0 is not used */ | ||
434 | atomic_set(&d->nfree, l2t_capacity - 1); | ||
435 | rwlock_init(&d->lock); | ||
436 | |||
437 | for (i = 0; i < l2t_capacity; ++i) { | ||
438 | d->l2tab[i].idx = i; | ||
439 | d->l2tab[i].state = L2T_STATE_UNUSED; | ||
440 | spin_lock_init(&d->l2tab[i].lock); | ||
441 | atomic_set(&d->l2tab[i].refcnt, 0); | ||
442 | } | ||
443 | return d; | ||
444 | } | ||
445 | |||
446 | void t3_free_l2t(struct l2t_data *d) | ||
447 | { | ||
448 | cxgb_free_mem(d); | ||
449 | } | ||
450 | |||