aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/cxgb4/cxgb4.h741
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h239
-rw-r--r--drivers/net/cxgb4/l2t.c624
-rw-r--r--drivers/net/cxgb4/l2t.h110
4 files changed, 1714 insertions, 0 deletions
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
new file mode 100644
index 000000000000..3d8ff4889b56
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -0,0 +1,741 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_H__
36#define __CXGB4_H__
37
38#include <linux/bitops.h>
39#include <linux/cache.h>
40#include <linux/interrupt.h>
41#include <linux/list.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/spinlock.h>
45#include <linux/timer.h>
46#include <asm/io.h>
47#include "cxgb4_uld.h"
48#include "t4_hw.h"
49
50#define FW_VERSION_MAJOR 1
51#define FW_VERSION_MINOR 1
52#define FW_VERSION_MICRO 0
53
54enum {
55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 16, /* Serial # length */
57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */
59};
60
61enum {
62 MEM_EDC0,
63 MEM_EDC1,
64 MEM_MC
65};
66
67enum dev_master {
68 MASTER_CANT,
69 MASTER_MAY,
70 MASTER_MUST
71};
72
73enum dev_state {
74 DEV_STATE_UNINIT,
75 DEV_STATE_INIT,
76 DEV_STATE_ERR
77};
78
79enum {
80 PAUSE_RX = 1 << 0,
81 PAUSE_TX = 1 << 1,
82 PAUSE_AUTONEG = 1 << 2
83};
84
85struct port_stats {
86 u64 tx_octets; /* total # of octets in good frames */
87 u64 tx_frames; /* all good frames */
88 u64 tx_bcast_frames; /* all broadcast frames */
89 u64 tx_mcast_frames; /* all multicast frames */
90 u64 tx_ucast_frames; /* all unicast frames */
91 u64 tx_error_frames; /* all error frames */
92
93 u64 tx_frames_64; /* # of Tx frames in a particular range */
94 u64 tx_frames_65_127;
95 u64 tx_frames_128_255;
96 u64 tx_frames_256_511;
97 u64 tx_frames_512_1023;
98 u64 tx_frames_1024_1518;
99 u64 tx_frames_1519_max;
100
101 u64 tx_drop; /* # of dropped Tx frames */
102 u64 tx_pause; /* # of transmitted pause frames */
103 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
104 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
105 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
106 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
107 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
108 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
109 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
110 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
111
112 u64 rx_octets; /* total # of octets in good frames */
113 u64 rx_frames; /* all good frames */
114 u64 rx_bcast_frames; /* all broadcast frames */
115 u64 rx_mcast_frames; /* all multicast frames */
116 u64 rx_ucast_frames; /* all unicast frames */
117 u64 rx_too_long; /* # of frames exceeding MTU */
118 u64 rx_jabber; /* # of jabber frames */
119 u64 rx_fcs_err; /* # of received frames with bad FCS */
120 u64 rx_len_err; /* # of received frames with length error */
121 u64 rx_symbol_err; /* symbol errors */
122 u64 rx_runt; /* # of short frames */
123
124 u64 rx_frames_64; /* # of Rx frames in a particular range */
125 u64 rx_frames_65_127;
126 u64 rx_frames_128_255;
127 u64 rx_frames_256_511;
128 u64 rx_frames_512_1023;
129 u64 rx_frames_1024_1518;
130 u64 rx_frames_1519_max;
131
132 u64 rx_pause; /* # of received pause frames */
133 u64 rx_ppp0; /* # of received PPP prio 0 frames */
134 u64 rx_ppp1; /* # of received PPP prio 1 frames */
135 u64 rx_ppp2; /* # of received PPP prio 2 frames */
136 u64 rx_ppp3; /* # of received PPP prio 3 frames */
137 u64 rx_ppp4; /* # of received PPP prio 4 frames */
138 u64 rx_ppp5; /* # of received PPP prio 5 frames */
139 u64 rx_ppp6; /* # of received PPP prio 6 frames */
140 u64 rx_ppp7; /* # of received PPP prio 7 frames */
141
142 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
143 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
144 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
145 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
146 u64 rx_trunc0; /* buffer-group 0 truncated packets */
147 u64 rx_trunc1; /* buffer-group 1 truncated packets */
148 u64 rx_trunc2; /* buffer-group 2 truncated packets */
149 u64 rx_trunc3; /* buffer-group 3 truncated packets */
150};
151
152struct lb_port_stats {
153 u64 octets;
154 u64 frames;
155 u64 bcast_frames;
156 u64 mcast_frames;
157 u64 ucast_frames;
158 u64 error_frames;
159
160 u64 frames_64;
161 u64 frames_65_127;
162 u64 frames_128_255;
163 u64 frames_256_511;
164 u64 frames_512_1023;
165 u64 frames_1024_1518;
166 u64 frames_1519_max;
167
168 u64 drop;
169
170 u64 ovflow0;
171 u64 ovflow1;
172 u64 ovflow2;
173 u64 ovflow3;
174 u64 trunc0;
175 u64 trunc1;
176 u64 trunc2;
177 u64 trunc3;
178};
179
180struct tp_tcp_stats {
181 u32 tcpOutRsts;
182 u64 tcpInSegs;
183 u64 tcpOutSegs;
184 u64 tcpRetransSegs;
185};
186
187struct tp_err_stats {
188 u32 macInErrs[4];
189 u32 hdrInErrs[4];
190 u32 tcpInErrs[4];
191 u32 tnlCongDrops[4];
192 u32 ofldChanDrops[4];
193 u32 tnlTxDrops[4];
194 u32 ofldVlanDrops[4];
195 u32 tcp6InErrs[4];
196 u32 ofldNoNeigh;
197 u32 ofldCongDefer;
198};
199
200struct tp_params {
201 unsigned int ntxchan; /* # of Tx channels */
202 unsigned int tre; /* log2 of core clocks per TP tick */
203};
204
205struct vpd_params {
206 unsigned int cclk;
207 u8 ec[EC_LEN + 1];
208 u8 sn[SERNUM_LEN + 1];
209 u8 id[ID_LEN + 1];
210};
211
212struct pci_params {
213 unsigned char speed;
214 unsigned char width;
215};
216
217struct adapter_params {
218 struct tp_params tp;
219 struct vpd_params vpd;
220 struct pci_params pci;
221
222 unsigned int fw_vers;
223 unsigned int tp_vers;
224 u8 api_vers[7];
225
226 unsigned short mtus[NMTUS];
227 unsigned short a_wnd[NCCTRL_WIN];
228 unsigned short b_wnd[NCCTRL_WIN];
229
230 unsigned char nports; /* # of ethernet ports */
231 unsigned char portvec;
232 unsigned char rev; /* chip revision */
233 unsigned char offload;
234
235 unsigned int ofldq_wr_cred;
236};
237
238struct trace_params {
239 u32 data[TRACE_LEN / 4];
240 u32 mask[TRACE_LEN / 4];
241 unsigned short snap_len;
242 unsigned short min_len;
243 unsigned char skip_ofst;
244 unsigned char skip_len;
245 unsigned char invert;
246 unsigned char port;
247};
248
249struct link_config {
250 unsigned short supported; /* link capabilities */
251 unsigned short advertising; /* advertised capabilities */
252 unsigned short requested_speed; /* speed user has requested */
253 unsigned short speed; /* actual link speed */
254 unsigned char requested_fc; /* flow control user has requested */
255 unsigned char fc; /* actual link flow control */
256 unsigned char autoneg; /* autonegotiating? */
257 unsigned char link_ok; /* link up? */
258};
259
260#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
261
262enum {
263 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
264 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
265 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
266 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
267};
268
269enum {
270 MAX_EGRQ = 128, /* max # of egress queues, including FLs */
271 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
272};
273
274struct adapter;
275struct vlan_group;
276struct sge_rspq;
277
278struct port_info {
279 struct adapter *adapter;
280 struct vlan_group *vlan_grp;
281 u16 viid;
282 s16 xact_addr_filt; /* index of exact MAC address filter */
283 u16 rss_size; /* size of VI's RSS table slice */
284 s8 mdio_addr;
285 u8 port_type;
286 u8 mod_type;
287 u8 port_id;
288 u8 tx_chan;
289 u8 lport; /* associated offload logical port */
290 u8 rx_offload; /* CSO, etc */
291 u8 nqsets; /* # of qsets */
292 u8 first_qset; /* index of first qset */
293 struct link_config link_cfg;
294};
295
296/* port_info.rx_offload flags */
297enum {
298 RX_CSO = 1 << 0,
299};
300
301struct dentry;
302struct work_struct;
303
304enum { /* adapter flags */
305 FULL_INIT_DONE = (1 << 0),
306 USING_MSI = (1 << 1),
307 USING_MSIX = (1 << 2),
308 QUEUES_BOUND = (1 << 3),
309 FW_OK = (1 << 4),
310};
311
312struct rx_sw_desc;
313
314struct sge_fl { /* SGE free-buffer queue state */
315 unsigned int avail; /* # of available Rx buffers */
316 unsigned int pend_cred; /* new buffers since last FL DB ring */
317 unsigned int cidx; /* consumer index */
318 unsigned int pidx; /* producer index */
319 unsigned long alloc_failed; /* # of times buffer allocation failed */
320 unsigned long large_alloc_failed;
321 unsigned long starving;
322 /* RO fields */
323 unsigned int cntxt_id; /* SGE context id for the free list */
324 unsigned int size; /* capacity of free list */
325 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
326 __be64 *desc; /* address of HW Rx descriptor ring */
327 dma_addr_t addr; /* bus address of HW ring start */
328};
329
330/* A packet gather list */
331struct pkt_gl {
332 skb_frag_t frags[MAX_SKB_FRAGS];
333 void *va; /* virtual address of first byte */
334 unsigned int nfrags; /* # of fragments */
335 unsigned int tot_len; /* total length of fragments */
336};
337
338typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
339 const struct pkt_gl *gl);
340
341struct sge_rspq { /* state for an SGE response queue */
342 struct napi_struct napi;
343 const __be64 *cur_desc; /* current descriptor in queue */
344 unsigned int cidx; /* consumer index */
345 u8 gen; /* current generation bit */
346 u8 intr_params; /* interrupt holdoff parameters */
347 u8 next_intr_params; /* holdoff params for next interrupt */
348 u8 pktcnt_idx; /* interrupt packet threshold */
349 u8 uld; /* ULD handling this queue */
350 u8 idx; /* queue index within its group */
351 int offset; /* offset into current Rx buffer */
352 u16 cntxt_id; /* SGE context id for the response q */
353 u16 abs_id; /* absolute SGE id for the response q */
354 __be64 *desc; /* address of HW response ring */
355 dma_addr_t phys_addr; /* physical address of the ring */
356 unsigned int iqe_len; /* entry size */
357 unsigned int size; /* capacity of response queue */
358 struct adapter *adap;
359 struct net_device *netdev; /* associated net device */
360 rspq_handler_t handler;
361};
362
363struct sge_eth_stats { /* Ethernet queue statistics */
364 unsigned long pkts; /* # of ethernet packets */
365 unsigned long lro_pkts; /* # of LRO super packets */
366 unsigned long lro_merged; /* # of wire packets merged by LRO */
367 unsigned long rx_cso; /* # of Rx checksum offloads */
368 unsigned long vlan_ex; /* # of Rx VLAN extractions */
369 unsigned long rx_drops; /* # of packets dropped due to no mem */
370};
371
372struct sge_eth_rxq { /* SW Ethernet Rx queue */
373 struct sge_rspq rspq;
374 struct sge_fl fl;
375 struct sge_eth_stats stats;
376} ____cacheline_aligned_in_smp;
377
378struct sge_ofld_stats { /* offload queue statistics */
379 unsigned long pkts; /* # of packets */
380 unsigned long imm; /* # of immediate-data packets */
381 unsigned long an; /* # of asynchronous notifications */
382 unsigned long nomem; /* # of responses deferred due to no mem */
383};
384
385struct sge_ofld_rxq { /* SW offload Rx queue */
386 struct sge_rspq rspq;
387 struct sge_fl fl;
388 struct sge_ofld_stats stats;
389} ____cacheline_aligned_in_smp;
390
391struct tx_desc {
392 __be64 flit[8];
393};
394
395struct tx_sw_desc;
396
397struct sge_txq {
398 unsigned int in_use; /* # of in-use Tx descriptors */
399 unsigned int size; /* # of descriptors */
400 unsigned int cidx; /* SW consumer index */
401 unsigned int pidx; /* producer index */
402 unsigned long stops; /* # of times q has been stopped */
403 unsigned long restarts; /* # of queue restarts */
404 unsigned int cntxt_id; /* SGE context id for the Tx q */
405 struct tx_desc *desc; /* address of HW Tx descriptor ring */
406 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
407 struct sge_qstat *stat; /* queue status entry */
408 dma_addr_t phys_addr; /* physical address of the ring */
409};
410
411struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
412 struct sge_txq q;
413 struct netdev_queue *txq; /* associated netdev TX queue */
414 unsigned long tso; /* # of TSO requests */
415 unsigned long tx_cso; /* # of Tx checksum offloads */
416 unsigned long vlan_ins; /* # of Tx VLAN insertions */
417 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
418} ____cacheline_aligned_in_smp;
419
420struct sge_ofld_txq { /* state for an SGE offload Tx queue */
421 struct sge_txq q;
422 struct adapter *adap;
423 struct sk_buff_head sendq; /* list of backpressured packets */
424 struct tasklet_struct qresume_tsk; /* restarts the queue */
425 u8 full; /* the Tx ring is full */
426 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
427} ____cacheline_aligned_in_smp;
428
429struct sge_ctrl_txq { /* state for an SGE control Tx queue */
430 struct sge_txq q;
431 struct adapter *adap;
432 struct sk_buff_head sendq; /* list of backpressured packets */
433 struct tasklet_struct qresume_tsk; /* restarts the queue */
434 u8 full; /* the Tx ring is full */
435} ____cacheline_aligned_in_smp;
436
437struct sge {
438 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
439 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
440 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
441
442 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
443 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
444 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
445 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
446
447 struct sge_rspq intrq ____cacheline_aligned_in_smp;
448 spinlock_t intrq_lock;
449
450 u16 max_ethqsets; /* # of available Ethernet queue sets */
451 u16 ethqsets; /* # of active Ethernet queue sets */
452 u16 ethtxq_rover; /* Tx queue to clean up next */
453 u16 ofldqsets; /* # of active offload queue sets */
454 u16 rdmaqs; /* # of available RDMA Rx queues */
455 u16 ofld_rxq[MAX_OFLD_QSETS];
456 u16 rdma_rxq[NCHAN];
457 u16 timer_val[SGE_NTIMERS];
458 u8 counter_val[SGE_NCOUNTERS];
459 unsigned int starve_thres;
460 u8 idma_state[2];
461 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
462 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
463 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
464 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
465 struct timer_list rx_timer; /* refills starving FLs */
466 struct timer_list tx_timer; /* checks Tx queues */
467};
468
469#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
470#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
471#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
472
473struct l2t_data;
474
475struct adapter {
476 void __iomem *regs;
477 struct pci_dev *pdev;
478 struct device *pdev_dev;
479 unsigned long registered_device_map;
480 unsigned long open_device_map;
481 unsigned long flags;
482
483 const char *name;
484 int msg_enable;
485
486 struct adapter_params params;
487 struct cxgb4_virt_res vres;
488 unsigned int swintr;
489
490 unsigned int wol;
491
492 struct {
493 unsigned short vec;
494 char desc[14];
495 } msix_info[MAX_INGQ + 1];
496
497 struct sge sge;
498
499 struct net_device *port[MAX_NPORTS];
500 u8 chan_map[NCHAN]; /* channel -> port map */
501
502 struct l2t_data *l2t;
503 void *uld_handle[CXGB4_ULD_MAX];
504 struct list_head list_node;
505
506 struct tid_info tids;
507 void **tid_release_head;
508 spinlock_t tid_release_lock;
509 struct work_struct tid_release_task;
510 bool tid_release_task_busy;
511
512 struct dentry *debugfs_root;
513
514 spinlock_t stats_lock;
515};
516
517static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
518{
519 return readl(adap->regs + reg_addr);
520}
521
522static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
523{
524 writel(val, adap->regs + reg_addr);
525}
526
527#ifndef readq
528static inline u64 readq(const volatile void __iomem *addr)
529{
530 return readl(addr) + ((u64)readl(addr + 4) << 32);
531}
532
533static inline void writeq(u64 val, volatile void __iomem *addr)
534{
535 writel(val, addr);
536 writel(val >> 32, addr + 4);
537}
538#endif
539
540static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
541{
542 return readq(adap->regs + reg_addr);
543}
544
545static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
546{
547 writeq(val, adap->regs + reg_addr);
548}
549
550/**
551 * netdev2pinfo - return the port_info structure associated with a net_device
552 * @dev: the netdev
553 *
554 * Return the struct port_info associated with a net_device
555 */
556static inline struct port_info *netdev2pinfo(const struct net_device *dev)
557{
558 return netdev_priv(dev);
559}
560
561/**
562 * adap2pinfo - return the port_info of a port
563 * @adap: the adapter
564 * @idx: the port index
565 *
566 * Return the port_info structure for the port of the given index.
567 */
568static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
569{
570 return netdev_priv(adap->port[idx]);
571}
572
573/**
574 * netdev2adap - return the adapter structure associated with a net_device
575 * @dev: the netdev
576 *
577 * Return the struct adapter associated with a net_device
578 */
579static inline struct adapter *netdev2adap(const struct net_device *dev)
580{
581 return netdev2pinfo(dev)->adapter;
582}
583
584void t4_os_portmod_changed(const struct adapter *adap, int port_id);
585void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
586
587void *t4_alloc_mem(size_t size);
588void t4_free_mem(void *addr);
589
590void t4_free_sge_resources(struct adapter *adap);
591irq_handler_t t4_intr_handler(struct adapter *adap);
592netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
593int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
594 const struct pkt_gl *gl);
595int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
596int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
597int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
598 struct net_device *dev, int intr_idx,
599 struct sge_fl *fl, rspq_handler_t hnd);
600int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
601 struct net_device *dev, struct netdev_queue *netdevq,
602 unsigned int iqid);
603int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
604 struct net_device *dev, unsigned int iqid,
605 unsigned int cmplqid);
606int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
607 struct net_device *dev, unsigned int iqid);
608irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
609void t4_sge_init(struct adapter *adap);
610void t4_sge_start(struct adapter *adap);
611void t4_sge_stop(struct adapter *adap);
612
613#define for_each_port(adapter, iter) \
614 for (iter = 0; iter < (adapter)->params.nports; ++iter)
615
616static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
617{
618 return adap->params.vpd.cclk / 1000;
619}
620
621static inline unsigned int us_to_core_ticks(const struct adapter *adap,
622 unsigned int us)
623{
624 return (us * adap->params.vpd.cclk) / 1000;
625}
626
627void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
628 u32 val);
629
630int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
631 void *rpl, bool sleep_ok);
632
633static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
634 int size, void *rpl)
635{
636 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
637}
638
639static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
640 int size, void *rpl)
641{
642 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
643}
644
645void t4_intr_enable(struct adapter *adapter);
646void t4_intr_disable(struct adapter *adapter);
647void t4_intr_clear(struct adapter *adapter);
648int t4_slow_intr_handler(struct adapter *adapter);
649
650int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
651 struct link_config *lc);
652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
653int t4_seeprom_wp(struct adapter *adapter, bool enable);
654int t4_read_flash(struct adapter *adapter, unsigned int addr,
655 unsigned int nwords, u32 *data, int byte_oriented);
656int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
657int t4_check_fw_version(struct adapter *adapter);
658int t4_prep_adapter(struct adapter *adapter);
659int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
660void t4_fatal_err(struct adapter *adapter);
661void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
662int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
663 int filter_index, int enable);
664void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
665 int filter_index, int *enabled);
666int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
667 int start, int n, const u16 *rspq, unsigned int nrspq);
668int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
669 unsigned int flags);
670int t4_read_rss(struct adapter *adapter, u16 *entries);
671int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
672int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
673 u64 *parity);
674
675void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
676void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
677
678void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
679void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
680void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
681 struct tp_tcp_stats *v6);
682void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
683 const unsigned short *alpha, const unsigned short *beta);
684
685void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
686 const u8 *addr);
687int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
688 u64 mask0, u64 mask1, unsigned int crc, bool enable);
689
690int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
691 enum dev_master master, enum dev_state *state);
692int t4_fw_bye(struct adapter *adap, unsigned int mbox);
693int t4_early_init(struct adapter *adap, unsigned int mbox);
694int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
695int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
696 unsigned int vf, unsigned int nparams, const u32 *params,
697 u32 *val);
698int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
699 unsigned int vf, unsigned int nparams, const u32 *params,
700 const u32 *val);
701int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
702 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
703 unsigned int rxqi, unsigned int rxq, unsigned int tc,
704 unsigned int vi, unsigned int cmask, unsigned int pmask,
705 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
706int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
707 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
708 unsigned int *rss_size);
709int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
710 unsigned int vf, unsigned int viid);
711int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
712 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
713int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
714 unsigned int viid, bool free, unsigned int naddr,
715 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
716int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
717 int idx, const u8 *addr, bool persist, bool add_smt);
718int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
719 bool ucast, u64 vec, bool sleep_ok);
720int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
721 bool rx_en, bool tx_en);
722int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
723 unsigned int nblinks);
724int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
725 unsigned int mmd, unsigned int reg, u16 *valp);
726int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
727 unsigned int mmd, unsigned int reg, u16 val);
728int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
729 unsigned int pf, unsigned int vf, unsigned int iqid,
730 unsigned int fl0id, unsigned int fl1id);
731int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
732 unsigned int vf, unsigned int iqtype, unsigned int iqid,
733 unsigned int fl0id, unsigned int fl1id);
734int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
735 unsigned int vf, unsigned int eqid);
736int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
737 unsigned int vf, unsigned int eqid);
738int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
739 unsigned int vf, unsigned int eqid);
740int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
741#endif /* __CXGB4_H__ */
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
new file mode 100644
index 000000000000..5b98546ac92d
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_OFLD_H
36#define __CXGB4_OFLD_H
37
38#include <linux/cache.h>
39#include <linux/spinlock.h>
40#include <linux/skbuff.h>
41#include <asm/atomic.h>
42
43/* CPL message priority levels */
44enum {
45 CPL_PRIORITY_DATA = 0, /* data messages */
46 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
47 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
48 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
49 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
50 CPL_PRIORITY_CONTROL = 1 /* control messages */
51};
52
53#define INIT_TP_WR(w, tid) do { \
54 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
55 FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
56 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
57 FW_WR_FLOWID(tid)); \
58 (w)->wr.wr_lo = cpu_to_be64(0); \
59} while (0)
60
61#define INIT_TP_WR_CPL(w, cpl, tid) do { \
62 INIT_TP_WR(w, tid); \
63 OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
64} while (0)
65
66#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
67 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
68 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
69 FW_WR_FLOWID(tid)); \
70 (w)->wr.wr_lo = cpu_to_be64(0); \
71} while (0)
72
73/* Special asynchronous notification message */
74#define CXGB4_MSG_AN ((void *)1)
75
76struct serv_entry {
77 void *data;
78};
79
80union aopen_entry {
81 void *data;
82 union aopen_entry *next;
83};
84
85/*
86 * Holds the size, base address, free list start, etc of the TID, server TID,
87 * and active-open TID tables. The tables themselves are allocated dynamically.
88 */
89struct tid_info {
90 void **tid_tab;
91 unsigned int ntids;
92
93 struct serv_entry *stid_tab;
94 unsigned long *stid_bmap;
95 unsigned int nstids;
96 unsigned int stid_base;
97
98 union aopen_entry *atid_tab;
99 unsigned int natids;
100
101 unsigned int nftids;
102 unsigned int ftid_base;
103
104 spinlock_t atid_lock ____cacheline_aligned_in_smp;
105 union aopen_entry *afree;
106 unsigned int atids_in_use;
107
108 spinlock_t stid_lock;
109 unsigned int stids_in_use;
110
111 atomic_t tids_in_use;
112};
113
114static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
115{
116 return tid < t->ntids ? t->tid_tab[tid] : NULL;
117}
118
119static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
120{
121 return atid < t->natids ? t->atid_tab[atid].data : NULL;
122}
123
124static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
125{
126 stid -= t->stid_base;
127 return stid < t->nstids ? t->stid_tab[stid].data : NULL;
128}
129
130static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
131 unsigned int tid)
132{
133 t->tid_tab[tid] = data;
134 atomic_inc(&t->tids_in_use);
135}
136
137int cxgb4_alloc_atid(struct tid_info *t, void *data);
138int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
139void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
142void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
143 unsigned int tid);
144
145struct in6_addr;
146
147int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
148 __be32 sip, __be16 sport, unsigned int queue);
149int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
150 const struct in6_addr *sip, __be16 sport,
151 unsigned int queue);
152
153static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
154{
155 skb_set_queue_mapping(skb, (queue << 1) | prio);
156}
157
158enum cxgb4_uld {
159 CXGB4_ULD_RDMA,
160 CXGB4_ULD_ISCSI,
161 CXGB4_ULD_MAX
162};
163
164enum cxgb4_state {
165 CXGB4_STATE_UP,
166 CXGB4_STATE_START_RECOVERY,
167 CXGB4_STATE_DOWN,
168 CXGB4_STATE_DETACH
169};
170
171struct pci_dev;
172struct l2t_data;
173struct net_device;
174struct pkt_gl;
175struct tp_tcp_stats;
176
177struct cxgb4_range {
178 unsigned int start;
179 unsigned int size;
180};
181
182struct cxgb4_virt_res { /* virtualized HW resources */
183 struct cxgb4_range ddp;
184 struct cxgb4_range iscsi;
185 struct cxgb4_range stag;
186 struct cxgb4_range rq;
187 struct cxgb4_range pbl;
188};
189
190/*
191 * Block of information the LLD provides to ULDs attaching to a device.
192 */
193struct cxgb4_lld_info {
194 struct pci_dev *pdev; /* associated PCI device */
195 struct l2t_data *l2t; /* L2 table */
196 struct tid_info *tids; /* TID table */
197 struct net_device **ports; /* device ports */
198 const struct cxgb4_virt_res *vr; /* assorted HW resources */
199 const unsigned short *mtus; /* MTU table */
200 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
201 unsigned short nrxq; /* # of Rx queues */
202 unsigned short ntxq; /* # of Tx queues */
203 unsigned char nchan:4; /* # of channels */
204 unsigned char nports:4; /* # of ports */
205 unsigned char wr_cred; /* WR 16-byte credits */
206 unsigned char adapter_type; /* type of adapter */
207 unsigned char fw_api_ver; /* FW API version */
208 unsigned int fw_vers; /* FW version */
209 unsigned int iscsi_iolen; /* iSCSI max I/O length */
210 unsigned short udb_density; /* # of user DB/page */
211 unsigned short ucq_density; /* # of user CQs/page */
212 void __iomem *gts_reg; /* address of GTS register */
213 void __iomem *db_reg; /* address of kernel doorbell */
214};
215
216struct cxgb4_uld_info {
217 const char *name;
218 void *(*add)(const struct cxgb4_lld_info *p);
219 int (*rx_handler)(void *handle, const __be64 *rsp,
220 const struct pkt_gl *gl);
221 int (*state_change)(void *handle, enum cxgb4_state new_state);
222};
223
224int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
225int cxgb4_unregister_uld(enum cxgb4_uld type);
226int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
227unsigned int cxgb4_port_chan(const struct net_device *dev);
228unsigned int cxgb4_port_viid(const struct net_device *dev);
229unsigned int cxgb4_port_idx(const struct net_device *dev);
230struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
231unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
232 unsigned int *idx);
233void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
234 struct tp_tcp_stats *v6);
235void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
236 const unsigned int *pgsz_order);
237struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
238 unsigned int skb_len, unsigned int pull_len);
239#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
new file mode 100644
index 000000000000..9f96724a133a
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.c
@@ -0,0 +1,624 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <linux/jhash.h>
40#include <net/neighbour.h>
41#include "cxgb4.h"
42#include "l2t.h"
43#include "t4_msg.h"
44#include "t4fw_api.h"
45
46#define VLAN_NONE 0xfff
47
48/* identifies sync vs async L2T_WRITE_REQs */
49#define F_SYNC_WR (1 << 12)
50
51enum {
52 L2T_STATE_VALID, /* entry is up to date */
53 L2T_STATE_STALE, /* entry may be used but needs revalidation */
54 L2T_STATE_RESOLVING, /* entry needs address resolution */
55 L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
56
57 /* when state is one of the below the entry is not hashed */
58 L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
59 L2T_STATE_UNUSED /* entry not in use */
60};
61
62struct l2t_data {
63 rwlock_t lock;
64 atomic_t nfree; /* number of free entries */
65 struct l2t_entry *rover; /* starting point for next allocation */
66 struct l2t_entry l2tab[L2T_SIZE];
67};
68
69static inline unsigned int vlan_prio(const struct l2t_entry *e)
70{
71 return e->vlan >> 13;
72}
73
74static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
75{
76 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
77 atomic_dec(&d->nfree);
78}
79
80/*
81 * To avoid having to check address families we do not allow v4 and v6
82 * neighbors to be on the same hash chain. We keep v4 entries in the first
83 * half of available hash buckets and v6 in the second.
84 */
85enum {
86 L2T_SZ_HALF = L2T_SIZE / 2,
87 L2T_HASH_MASK = L2T_SZ_HALF - 1
88};
89
90static inline unsigned int arp_hash(const u32 *key, int ifindex)
91{
92 return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
93}
94
95static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
96{
97 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
98
99 return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
100}
101
102static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
103{
104 return addr_len == 4 ? arp_hash(addr, ifindex) :
105 ipv6_hash(addr, ifindex);
106}
107
108/*
109 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
110 * whether the L2T entry and the address are of the same address family.
111 * Callers ensure an address is only checked against L2T entries of the same
112 * family, something made trivial by the separation of IP and IPv6 hash chains
113 * mentioned above. Returns 0 if there's a match,
114 */
115static int addreq(const struct l2t_entry *e, const u32 *addr)
116{
117 if (e->v6)
118 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
119 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
120 return e->addr[0] ^ addr[0];
121}
122
123static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
124{
125 neigh_hold(n);
126 if (e->neigh)
127 neigh_release(e->neigh);
128 e->neigh = n;
129}
130
131/*
132 * Write an L2T entry. Must be called with the entry locked.
133 * The write may be synchronous or asynchronous.
134 */
135static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
136{
137 struct sk_buff *skb;
138 struct cpl_l2t_write_req *req;
139
140 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
141 if (!skb)
142 return -ENOMEM;
143
144 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
145 INIT_TP_WR(req, 0);
146
147 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
148 e->idx | (sync ? F_SYNC_WR : 0) |
149 TID_QID(adap->sge.fw_evtq.abs_id)));
150 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
151 req->l2t_idx = htons(e->idx);
152 req->vlan = htons(e->vlan);
153 if (e->neigh)
154 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
155 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
156
157 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
158 t4_ofld_send(adap, skb);
159
160 if (sync && e->state != L2T_STATE_SWITCHING)
161 e->state = L2T_STATE_SYNC_WRITE;
162 return 0;
163}
164
165/*
166 * Send packets waiting in an L2T entry's ARP queue. Must be called with the
167 * entry locked.
168 */
169static void send_pending(struct adapter *adap, struct l2t_entry *e)
170{
171 while (e->arpq_head) {
172 struct sk_buff *skb = e->arpq_head;
173
174 e->arpq_head = skb->next;
175 skb->next = NULL;
176 t4_ofld_send(adap, skb);
177 }
178 e->arpq_tail = NULL;
179}
180
181/*
182 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
183 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
184 * index it refers to.
185 */
186void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
187{
188 unsigned int tid = GET_TID(rpl);
189 unsigned int idx = tid & (L2T_SIZE - 1);
190
191 if (unlikely(rpl->status != CPL_ERR_NONE)) {
192 dev_err(adap->pdev_dev,
193 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
194 rpl->status, idx);
195 return;
196 }
197
198 if (tid & F_SYNC_WR) {
199 struct l2t_entry *e = &adap->l2t->l2tab[idx];
200
201 spin_lock(&e->lock);
202 if (e->state != L2T_STATE_SWITCHING) {
203 send_pending(adap, e);
204 e->state = (e->neigh->nud_state & NUD_STALE) ?
205 L2T_STATE_STALE : L2T_STATE_VALID;
206 }
207 spin_unlock(&e->lock);
208 }
209}
210
211/*
212 * Add a packet to an L2T entry's queue of packets awaiting resolution.
213 * Must be called with the entry's lock held.
214 */
215static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
216{
217 skb->next = NULL;
218 if (e->arpq_head)
219 e->arpq_tail->next = skb;
220 else
221 e->arpq_head = skb;
222 e->arpq_tail = skb;
223}
224
225int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
226 struct l2t_entry *e)
227{
228 struct adapter *adap = netdev2adap(dev);
229
230again:
231 switch (e->state) {
232 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
233 neigh_event_send(e->neigh, NULL);
234 spin_lock_bh(&e->lock);
235 if (e->state == L2T_STATE_STALE)
236 e->state = L2T_STATE_VALID;
237 spin_unlock_bh(&e->lock);
238 case L2T_STATE_VALID: /* fast-path, send the packet on */
239 return t4_ofld_send(adap, skb);
240 case L2T_STATE_RESOLVING:
241 case L2T_STATE_SYNC_WRITE:
242 spin_lock_bh(&e->lock);
243 if (e->state != L2T_STATE_SYNC_WRITE &&
244 e->state != L2T_STATE_RESOLVING) {
245 spin_unlock_bh(&e->lock);
246 goto again;
247 }
248 arpq_enqueue(e, skb);
249 spin_unlock_bh(&e->lock);
250
251 if (e->state == L2T_STATE_RESOLVING &&
252 !neigh_event_send(e->neigh, NULL)) {
253 spin_lock_bh(&e->lock);
254 if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
255 write_l2e(adap, e, 1);
256 spin_unlock_bh(&e->lock);
257 }
258 }
259 return 0;
260}
261EXPORT_SYMBOL(cxgb4_l2t_send);
262
263/*
264 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
265 */
266static struct l2t_entry *alloc_l2e(struct l2t_data *d)
267{
268 struct l2t_entry *end, *e, **p;
269
270 if (!atomic_read(&d->nfree))
271 return NULL;
272
273 /* there's definitely a free entry */
274 for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
275 if (atomic_read(&e->refcnt) == 0)
276 goto found;
277
278 for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
279 ;
280found:
281 d->rover = e + 1;
282 atomic_dec(&d->nfree);
283
284 /*
285 * The entry we found may be an inactive entry that is
286 * presently in the hash table. We need to remove it.
287 */
288 if (e->state < L2T_STATE_SWITCHING)
289 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
290 if (*p == e) {
291 *p = e->next;
292 e->next = NULL;
293 break;
294 }
295
296 e->state = L2T_STATE_UNUSED;
297 return e;
298}
299
300/*
301 * Called when an L2T entry has no more users.
302 */
303static void t4_l2e_free(struct l2t_entry *e)
304{
305 struct l2t_data *d;
306
307 spin_lock_bh(&e->lock);
308 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
309 if (e->neigh) {
310 neigh_release(e->neigh);
311 e->neigh = NULL;
312 }
313 }
314 spin_unlock_bh(&e->lock);
315
316 d = container_of(e, struct l2t_data, l2tab[e->idx]);
317 atomic_inc(&d->nfree);
318}
319
320void cxgb4_l2t_release(struct l2t_entry *e)
321{
322 if (atomic_dec_and_test(&e->refcnt))
323 t4_l2e_free(e);
324}
325EXPORT_SYMBOL(cxgb4_l2t_release);
326
327/*
328 * Update an L2T entry that was previously used for the same next hop as neigh.
329 * Must be called with softirqs disabled.
330 */
331static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
332{
333 unsigned int nud_state;
334
335 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
336 if (neigh != e->neigh)
337 neigh_replace(e, neigh);
338 nud_state = neigh->nud_state;
339 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
340 !(nud_state & NUD_VALID))
341 e->state = L2T_STATE_RESOLVING;
342 else if (nud_state & NUD_CONNECTED)
343 e->state = L2T_STATE_VALID;
344 else
345 e->state = L2T_STATE_STALE;
346 spin_unlock(&e->lock);
347}
348
349struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
350 const struct net_device *physdev,
351 unsigned int priority)
352{
353 u8 lport;
354 u16 vlan;
355 struct l2t_entry *e;
356 int addr_len = neigh->tbl->key_len;
357 u32 *addr = (u32 *)neigh->primary_key;
358 int ifidx = neigh->dev->ifindex;
359 int hash = addr_hash(addr, addr_len, ifidx);
360
361 if (neigh->dev->flags & IFF_LOOPBACK)
362 lport = netdev2pinfo(physdev)->tx_chan + 4;
363 else
364 lport = netdev2pinfo(physdev)->lport;
365
366 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
367 vlan = vlan_dev_vlan_id(neigh->dev);
368 else
369 vlan = VLAN_NONE;
370
371 write_lock_bh(&d->lock);
372 for (e = d->l2tab[hash].first; e; e = e->next)
373 if (!addreq(e, addr) && e->ifindex == ifidx &&
374 e->vlan == vlan && e->lport == lport) {
375 l2t_hold(d, e);
376 if (atomic_read(&e->refcnt) == 1)
377 reuse_entry(e, neigh);
378 goto done;
379 }
380
381 /* Need to allocate a new entry */
382 e = alloc_l2e(d);
383 if (e) {
384 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
385 e->state = L2T_STATE_RESOLVING;
386 memcpy(e->addr, addr, addr_len);
387 e->ifindex = ifidx;
388 e->hash = hash;
389 e->lport = lport;
390 e->v6 = addr_len == 16;
391 atomic_set(&e->refcnt, 1);
392 neigh_replace(e, neigh);
393 e->vlan = vlan;
394 e->next = d->l2tab[hash].first;
395 d->l2tab[hash].first = e;
396 spin_unlock(&e->lock);
397 }
398done:
399 write_unlock_bh(&d->lock);
400 return e;
401}
402EXPORT_SYMBOL(cxgb4_l2t_get);
403
404/*
405 * Called when address resolution fails for an L2T entry to handle packets
406 * on the arpq head. If a packet specifies a failure handler it is invoked,
407 * otherwise the packet is sent to the device.
408 */
409static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
410{
411 while (arpq) {
412 struct sk_buff *skb = arpq;
413 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
414
415 arpq = skb->next;
416 skb->next = NULL;
417 if (cb->arp_err_handler)
418 cb->arp_err_handler(cb->handle, skb);
419 else
420 t4_ofld_send(adap, skb);
421 }
422}
423
424/*
425 * Called when the host's neighbor layer makes a change to some entry that is
426 * loaded into the HW L2 table.
427 */
428void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
429{
430 struct l2t_entry *e;
431 struct sk_buff *arpq = NULL;
432 struct l2t_data *d = adap->l2t;
433 int addr_len = neigh->tbl->key_len;
434 u32 *addr = (u32 *) neigh->primary_key;
435 int ifidx = neigh->dev->ifindex;
436 int hash = addr_hash(addr, addr_len, ifidx);
437
438 read_lock_bh(&d->lock);
439 for (e = d->l2tab[hash].first; e; e = e->next)
440 if (!addreq(e, addr) && e->ifindex == ifidx) {
441 spin_lock(&e->lock);
442 if (atomic_read(&e->refcnt))
443 goto found;
444 spin_unlock(&e->lock);
445 break;
446 }
447 read_unlock_bh(&d->lock);
448 return;
449
450 found:
451 read_unlock(&d->lock);
452
453 if (neigh != e->neigh)
454 neigh_replace(e, neigh);
455
456 if (e->state == L2T_STATE_RESOLVING) {
457 if (neigh->nud_state & NUD_FAILED) {
458 arpq = e->arpq_head;
459 e->arpq_head = e->arpq_tail = NULL;
460 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
461 e->arpq_head) {
462 write_l2e(adap, e, 1);
463 }
464 } else {
465 e->state = neigh->nud_state & NUD_CONNECTED ?
466 L2T_STATE_VALID : L2T_STATE_STALE;
467 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
468 write_l2e(adap, e, 0);
469 }
470
471 spin_unlock_bh(&e->lock);
472
473 if (arpq)
474 handle_failed_resolution(adap, arpq);
475}
476
477/*
478 * Allocate an L2T entry for use by a switching rule. Such entries need to be
479 * explicitly freed and while busy they are not on any hash chain, so normal
480 * address resolution updates do not see them.
481 */
482struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
483{
484 struct l2t_entry *e;
485
486 write_lock_bh(&d->lock);
487 e = alloc_l2e(d);
488 if (e) {
489 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
490 e->state = L2T_STATE_SWITCHING;
491 atomic_set(&e->refcnt, 1);
492 spin_unlock(&e->lock);
493 }
494 write_unlock_bh(&d->lock);
495 return e;
496}
497
498/*
499 * Sets/updates the contents of a switching L2T entry that has been allocated
500 * with an earlier call to @t4_l2t_alloc_switching.
501 */
502int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
503 u8 port, u8 *eth_addr)
504{
505 e->vlan = vlan;
506 e->lport = port;
507 memcpy(e->dmac, eth_addr, ETH_ALEN);
508 return write_l2e(adap, e, 0);
509}
510
511struct l2t_data *t4_init_l2t(void)
512{
513 int i;
514 struct l2t_data *d;
515
516 d = t4_alloc_mem(sizeof(*d));
517 if (!d)
518 return NULL;
519
520 d->rover = d->l2tab;
521 atomic_set(&d->nfree, L2T_SIZE);
522 rwlock_init(&d->lock);
523
524 for (i = 0; i < L2T_SIZE; ++i) {
525 d->l2tab[i].idx = i;
526 d->l2tab[i].state = L2T_STATE_UNUSED;
527 spin_lock_init(&d->l2tab[i].lock);
528 atomic_set(&d->l2tab[i].refcnt, 0);
529 }
530 return d;
531}
532
533#include <linux/module.h>
534#include <linux/debugfs.h>
535#include <linux/seq_file.h>
536
537static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
538{
539 struct l2t_entry *l2tab = seq->private;
540
541 return pos >= L2T_SIZE ? NULL : &l2tab[pos];
542}
543
544static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
545{
546 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
547}
548
549static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
550{
551 v = l2t_get_idx(seq, *pos);
552 if (v)
553 ++*pos;
554 return v;
555}
556
557static void l2t_seq_stop(struct seq_file *seq, void *v)
558{
559}
560
561static char l2e_state(const struct l2t_entry *e)
562{
563 switch (e->state) {
564 case L2T_STATE_VALID: return 'V';
565 case L2T_STATE_STALE: return 'S';
566 case L2T_STATE_SYNC_WRITE: return 'W';
567 case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
568 case L2T_STATE_SWITCHING: return 'X';
569 default:
570 return 'U';
571 }
572}
573
574static int l2t_seq_show(struct seq_file *seq, void *v)
575{
576 if (v == SEQ_START_TOKEN)
577 seq_puts(seq, " Idx IP address "
578 "Ethernet address VLAN/P LP State Users Port\n");
579 else {
580 char ip[60];
581 struct l2t_entry *e = v;
582
583 spin_lock_bh(&e->lock);
584 if (e->state == L2T_STATE_SWITCHING)
585 ip[0] = '\0';
586 else
587 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
588 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
589 e->idx, ip, e->dmac,
590 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
591 l2e_state(e), atomic_read(&e->refcnt),
592 e->neigh ? e->neigh->dev->name : "");
593 spin_unlock_bh(&e->lock);
594 }
595 return 0;
596}
597
598static const struct seq_operations l2t_seq_ops = {
599 .start = l2t_seq_start,
600 .next = l2t_seq_next,
601 .stop = l2t_seq_stop,
602 .show = l2t_seq_show
603};
604
605static int l2t_seq_open(struct inode *inode, struct file *file)
606{
607 int rc = seq_open(file, &l2t_seq_ops);
608
609 if (!rc) {
610 struct adapter *adap = inode->i_private;
611 struct seq_file *seq = file->private_data;
612
613 seq->private = adap->l2t->l2tab;
614 }
615 return rc;
616}
617
618const struct file_operations t4_l2t_fops = {
619 .owner = THIS_MODULE,
620 .open = l2t_seq_open,
621 .read = seq_read,
622 .llseek = seq_lseek,
623 .release = seq_release,
624};
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
new file mode 100644
index 000000000000..643f27ed3cf4
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.h
@@ -0,0 +1,110 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_L2T_H
36#define __CXGB4_L2T_H
37
38#include <linux/spinlock.h>
39#include <linux/if_ether.h>
40#include <asm/atomic.h>
41
42struct adapter;
43struct l2t_data;
44struct neighbour;
45struct net_device;
46struct file_operations;
47struct cpl_l2t_write_rpl;
48
49/*
50 * Each L2T entry plays multiple roles. First of all, it keeps state for the
51 * corresponding entry of the HW L2 table and maintains a queue of offload
52 * packets awaiting address resolution. Second, it is a node of a hash table
53 * chain, where the nodes of the chain are linked together through their next
54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
55 * first element in its chain through its first pointer.
56 */
57struct l2t_entry {
58 u16 state; /* entry state */
59 u16 idx; /* entry index */
60 u32 addr[4]; /* next hop IP or IPv6 address */
61 int ifindex; /* neighbor's net_device's ifindex */
62 struct neighbour *neigh; /* associated neighbour */
63 struct l2t_entry *first; /* start of hash chain */
64 struct l2t_entry *next; /* next l2t_entry on chain */
65 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
66 struct sk_buff *arpq_tail;
67 spinlock_t lock;
68 atomic_t refcnt; /* entry reference count */
69 u16 hash; /* hash bucket the entry is on */
70 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
71 u8 v6; /* whether entry is for IPv6 */
72 u8 lport; /* associated offload logical interface */
73 u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
74};
75
76typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
77
78/*
79 * Callback stored in an skb to handle address resolution failure.
80 */
81struct l2t_skb_cb {
82 void *handle;
83 arp_err_handler_t arp_err_handler;
84};
85
86#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
87
88static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
89 arp_err_handler_t handler)
90{
91 L2T_SKB_CB(skb)->handle = handle;
92 L2T_SKB_CB(skb)->arp_err_handler = handler;
93}
94
95void cxgb4_l2t_release(struct l2t_entry *e);
96int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
97 struct l2t_entry *e);
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev,
100 unsigned int priority);
101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
105 u8 port, u8 *eth_addr);
106struct l2t_data *t4_init_l2t(void);
107void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
108
109extern const struct file_operations t4_l2t_fops;
110#endif /* __CXGB4_L2T_H */