diff options
author | stephen hemminger <shemminger@vyatta.com> | 2010-10-18 01:39:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-21 07:25:49 -0400 |
commit | 31b9c19bfe32bed7fdf80cd0b1aa9d0f0569844a (patch) | |
tree | 47ffbb9e16062bf88e713f0332f4e0e4e2c887a7 /drivers/net/cxgb4 | |
parent | b003f4e171304234eae9cc11c9fd7f1cbaaf0d6b (diff) |
cxgb4: function namespace cleanup (v3)
Make functions only used in one file local.
Remove lots of dead code, relating to unsupported functions
in mainline driver like RSS, IPv6, and TCP offload.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Acked-by: Dimitris Michailidis <dm@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r-- | drivers/net/cxgb4/cxgb4.h | 15 | ||||
-rw-r--r-- | drivers/net/cxgb4/cxgb4_main.c | 68 | ||||
-rw-r--r-- | drivers/net/cxgb4/cxgb4_uld.h | 6 | ||||
-rw-r--r-- | drivers/net/cxgb4/l2t.c | 34 | ||||
-rw-r--r-- | drivers/net/cxgb4/l2t.h | 3 | ||||
-rw-r--r-- | drivers/net/cxgb4/t4_hw.c | 332 |
6 files changed, 3 insertions, 455 deletions
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h index 3ece9f5069fa..eaa49e4119f1 100644 --- a/drivers/net/cxgb4/cxgb4.h +++ b/drivers/net/cxgb4/cxgb4.h | |||
@@ -592,7 +592,6 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id); | |||
592 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); | 592 | void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); |
593 | 593 | ||
594 | void *t4_alloc_mem(size_t size); | 594 | void *t4_alloc_mem(size_t size); |
595 | void t4_free_mem(void *addr); | ||
596 | 595 | ||
597 | void t4_free_sge_resources(struct adapter *adap); | 596 | void t4_free_sge_resources(struct adapter *adap); |
598 | irq_handler_t t4_intr_handler(struct adapter *adap); | 597 | irq_handler_t t4_intr_handler(struct adapter *adap); |
@@ -651,7 +650,6 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, | |||
651 | 650 | ||
652 | void t4_intr_enable(struct adapter *adapter); | 651 | void t4_intr_enable(struct adapter *adapter); |
653 | void t4_intr_disable(struct adapter *adapter); | 652 | void t4_intr_disable(struct adapter *adapter); |
654 | void t4_intr_clear(struct adapter *adapter); | ||
655 | int t4_slow_intr_handler(struct adapter *adapter); | 653 | int t4_slow_intr_handler(struct adapter *adapter); |
656 | 654 | ||
657 | int t4_wait_dev_ready(struct adapter *adap); | 655 | int t4_wait_dev_ready(struct adapter *adap); |
@@ -664,24 +662,16 @@ int t4_check_fw_version(struct adapter *adapter); | |||
664 | int t4_prep_adapter(struct adapter *adapter); | 662 | int t4_prep_adapter(struct adapter *adapter); |
665 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); | 663 | int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); |
666 | void t4_fatal_err(struct adapter *adapter); | 664 | void t4_fatal_err(struct adapter *adapter); |
667 | int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, | ||
668 | int filter_index, int enable); | ||
669 | void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, | ||
670 | int filter_index, int *enabled); | ||
671 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, | 665 | int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, |
672 | int start, int n, const u16 *rspq, unsigned int nrspq); | 666 | int start, int n, const u16 *rspq, unsigned int nrspq); |
673 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | 667 | int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, |
674 | unsigned int flags); | 668 | unsigned int flags); |
675 | int t4_read_rss(struct adapter *adapter, u16 *entries); | ||
676 | int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); | 669 | int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); |
677 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, | 670 | int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, |
678 | u64 *parity); | 671 | u64 *parity); |
679 | 672 | ||
680 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); | 673 | void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); |
681 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); | ||
682 | |||
683 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); | 674 | void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); |
684 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); | ||
685 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, | 675 | void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, |
686 | struct tp_tcp_stats *v6); | 676 | struct tp_tcp_stats *v6); |
687 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | 677 | void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, |
@@ -711,8 +701,6 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, | |||
711 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | 701 | int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, |
712 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, | 702 | unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, |
713 | unsigned int *rss_size); | 703 | unsigned int *rss_size); |
714 | int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
715 | unsigned int vf, unsigned int viid); | ||
716 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, | 704 | int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, |
717 | int mtu, int promisc, int all_multi, int bcast, int vlanex, | 705 | int mtu, int promisc, int all_multi, int bcast, int vlanex, |
718 | bool sleep_ok); | 706 | bool sleep_ok); |
@@ -731,9 +719,6 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | |||
731 | unsigned int mmd, unsigned int reg, u16 *valp); | 719 | unsigned int mmd, unsigned int reg, u16 *valp); |
732 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, | 720 | int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, |
733 | unsigned int mmd, unsigned int reg, u16 val); | 721 | unsigned int mmd, unsigned int reg, u16 val); |
734 | int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, | ||
735 | unsigned int pf, unsigned int vf, unsigned int iqid, | ||
736 | unsigned int fl0id, unsigned int fl1id); | ||
737 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | 722 | int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, |
738 | unsigned int vf, unsigned int iqtype, unsigned int iqid, | 723 | unsigned int vf, unsigned int iqtype, unsigned int iqid, |
739 | unsigned int fl0id, unsigned int fl1id); | 724 | unsigned int fl0id, unsigned int fl1id); |
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index 22169a73b7f7..930bd075a43e 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -880,7 +880,7 @@ void *t4_alloc_mem(size_t size) | |||
880 | /* | 880 | /* |
881 | * Free memory allocated through alloc_mem(). | 881 | * Free memory allocated through alloc_mem(). |
882 | */ | 882 | */ |
883 | void t4_free_mem(void *addr) | 883 | static void t4_free_mem(void *addr) |
884 | { | 884 | { |
885 | if (is_vmalloc_addr(addr)) | 885 | if (is_vmalloc_addr(addr)) |
886 | vfree(addr); | 886 | vfree(addr); |
@@ -2206,8 +2206,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan, | |||
2206 | * Queue a TID release request and if necessary schedule a work queue to | 2206 | * Queue a TID release request and if necessary schedule a work queue to |
2207 | * process it. | 2207 | * process it. |
2208 | */ | 2208 | */ |
2209 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | 2209 | static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, |
2210 | unsigned int tid) | 2210 | unsigned int tid) |
2211 | { | 2211 | { |
2212 | void **p = &t->tid_tab[tid]; | 2212 | void **p = &t->tid_tab[tid]; |
2213 | struct adapter *adap = container_of(t, struct adapter, tids); | 2213 | struct adapter *adap = container_of(t, struct adapter, tids); |
@@ -2222,7 +2222,6 @@ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | |||
2222 | } | 2222 | } |
2223 | spin_unlock_bh(&adap->tid_release_lock); | 2223 | spin_unlock_bh(&adap->tid_release_lock); |
2224 | } | 2224 | } |
2225 | EXPORT_SYMBOL(cxgb4_queue_tid_release); | ||
2226 | 2225 | ||
2227 | /* | 2226 | /* |
2228 | * Process the list of pending TID release requests. | 2227 | * Process the list of pending TID release requests. |
@@ -2355,48 +2354,6 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | |||
2355 | EXPORT_SYMBOL(cxgb4_create_server); | 2354 | EXPORT_SYMBOL(cxgb4_create_server); |
2356 | 2355 | ||
2357 | /** | 2356 | /** |
2358 | * cxgb4_create_server6 - create an IPv6 server | ||
2359 | * @dev: the device | ||
2360 | * @stid: the server TID | ||
2361 | * @sip: local IPv6 address to bind server to | ||
2362 | * @sport: the server's TCP port | ||
2363 | * @queue: queue to direct messages from this server to | ||
2364 | * | ||
2365 | * Create an IPv6 server for the given port and address. | ||
2366 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | ||
2367 | */ | ||
2368 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
2369 | const struct in6_addr *sip, __be16 sport, | ||
2370 | unsigned int queue) | ||
2371 | { | ||
2372 | unsigned int chan; | ||
2373 | struct sk_buff *skb; | ||
2374 | struct adapter *adap; | ||
2375 | struct cpl_pass_open_req6 *req; | ||
2376 | |||
2377 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | ||
2378 | if (!skb) | ||
2379 | return -ENOMEM; | ||
2380 | |||
2381 | adap = netdev2adap(dev); | ||
2382 | req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); | ||
2383 | INIT_TP_WR(req, 0); | ||
2384 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); | ||
2385 | req->local_port = sport; | ||
2386 | req->peer_port = htons(0); | ||
2387 | req->local_ip_hi = *(__be64 *)(sip->s6_addr); | ||
2388 | req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); | ||
2389 | req->peer_ip_hi = cpu_to_be64(0); | ||
2390 | req->peer_ip_lo = cpu_to_be64(0); | ||
2391 | chan = rxq_to_chan(&adap->sge, queue); | ||
2392 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | ||
2393 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | ||
2394 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | ||
2395 | return t4_mgmt_tx(adap, skb); | ||
2396 | } | ||
2397 | EXPORT_SYMBOL(cxgb4_create_server6); | ||
2398 | |||
2399 | /** | ||
2400 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU | 2357 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU |
2401 | * @mtus: the HW MTU table | 2358 | * @mtus: the HW MTU table |
2402 | * @mtu: the target MTU | 2359 | * @mtu: the target MTU |
@@ -2455,25 +2412,6 @@ unsigned int cxgb4_port_idx(const struct net_device *dev) | |||
2455 | } | 2412 | } |
2456 | EXPORT_SYMBOL(cxgb4_port_idx); | 2413 | EXPORT_SYMBOL(cxgb4_port_idx); |
2457 | 2414 | ||
2458 | /** | ||
2459 | * cxgb4_netdev_by_hwid - return the net device of a HW port | ||
2460 | * @pdev: identifies the adapter | ||
2461 | * @id: the HW port id | ||
2462 | * | ||
2463 | * Return the net device associated with the interface with the given HW | ||
2464 | * id. | ||
2465 | */ | ||
2466 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) | ||
2467 | { | ||
2468 | const struct adapter *adap = pci_get_drvdata(pdev); | ||
2469 | |||
2470 | if (!adap || id >= NCHAN) | ||
2471 | return NULL; | ||
2472 | id = adap->chan_map[id]; | ||
2473 | return id < MAX_NPORTS ? adap->port[id] : NULL; | ||
2474 | } | ||
2475 | EXPORT_SYMBOL(cxgb4_netdev_by_hwid); | ||
2476 | |||
2477 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | 2415 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, |
2478 | struct tp_tcp_stats *v6) | 2416 | struct tp_tcp_stats *v6) |
2479 | { | 2417 | { |
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h index 85d74e751ce0..1b48c0170145 100644 --- a/drivers/net/cxgb4/cxgb4_uld.h +++ b/drivers/net/cxgb4/cxgb4_uld.h | |||
@@ -139,16 +139,11 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); | |||
139 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid); | 139 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid); |
140 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); | 140 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); |
141 | void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); | 141 | void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); |
142 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | ||
143 | unsigned int tid); | ||
144 | 142 | ||
145 | struct in6_addr; | 143 | struct in6_addr; |
146 | 144 | ||
147 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | 145 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, |
148 | __be32 sip, __be16 sport, unsigned int queue); | 146 | __be32 sip, __be16 sport, unsigned int queue); |
149 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
150 | const struct in6_addr *sip, __be16 sport, | ||
151 | unsigned int queue); | ||
152 | 147 | ||
153 | static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) | 148 | static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) |
154 | { | 149 | { |
@@ -233,7 +228,6 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); | |||
233 | unsigned int cxgb4_port_chan(const struct net_device *dev); | 228 | unsigned int cxgb4_port_chan(const struct net_device *dev); |
234 | unsigned int cxgb4_port_viid(const struct net_device *dev); | 229 | unsigned int cxgb4_port_viid(const struct net_device *dev); |
235 | unsigned int cxgb4_port_idx(const struct net_device *dev); | 230 | unsigned int cxgb4_port_idx(const struct net_device *dev); |
236 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id); | ||
237 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | 231 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, |
238 | unsigned int *idx); | 232 | unsigned int *idx); |
239 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | 233 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, |
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c index e8f0f55e9d08..a2d323c473f8 100644 --- a/drivers/net/cxgb4/l2t.c +++ b/drivers/net/cxgb4/l2t.c | |||
@@ -481,40 +481,6 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) | |||
481 | handle_failed_resolution(adap, arpq); | 481 | handle_failed_resolution(adap, arpq); |
482 | } | 482 | } |
483 | 483 | ||
484 | /* | ||
485 | * Allocate an L2T entry for use by a switching rule. Such entries need to be | ||
486 | * explicitly freed and while busy they are not on any hash chain, so normal | ||
487 | * address resolution updates do not see them. | ||
488 | */ | ||
489 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) | ||
490 | { | ||
491 | struct l2t_entry *e; | ||
492 | |||
493 | write_lock_bh(&d->lock); | ||
494 | e = alloc_l2e(d); | ||
495 | if (e) { | ||
496 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | ||
497 | e->state = L2T_STATE_SWITCHING; | ||
498 | atomic_set(&e->refcnt, 1); | ||
499 | spin_unlock(&e->lock); | ||
500 | } | ||
501 | write_unlock_bh(&d->lock); | ||
502 | return e; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Sets/updates the contents of a switching L2T entry that has been allocated | ||
507 | * with an earlier call to @t4_l2t_alloc_switching. | ||
508 | */ | ||
509 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | ||
510 | u8 port, u8 *eth_addr) | ||
511 | { | ||
512 | e->vlan = vlan; | ||
513 | e->lport = port; | ||
514 | memcpy(e->dmac, eth_addr, ETH_ALEN); | ||
515 | return write_l2e(adap, e, 0); | ||
516 | } | ||
517 | |||
518 | struct l2t_data *t4_init_l2t(void) | 484 | struct l2t_data *t4_init_l2t(void) |
519 | { | 485 | { |
520 | int i; | 486 | int i; |
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h index 643f27ed3cf4..7bd8f42378ff 100644 --- a/drivers/net/cxgb4/l2t.h +++ b/drivers/net/cxgb4/l2t.h | |||
@@ -100,9 +100,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | |||
100 | unsigned int priority); | 100 | unsigned int priority); |
101 | 101 | ||
102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); | 102 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); |
103 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); | ||
104 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | ||
105 | u8 port, u8 *eth_addr); | ||
106 | struct l2t_data *t4_init_l2t(void); | 103 | struct l2t_data *t4_init_l2t(void); |
107 | void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); | 104 | void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); |
108 | 105 | ||
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c index 9e1a4b49b47a..bb813d94aea8 100644 --- a/drivers/net/cxgb4/t4_hw.c +++ b/drivers/net/cxgb4/t4_hw.c | |||
@@ -120,30 +120,6 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, | |||
120 | } | 120 | } |
121 | } | 121 | } |
122 | 122 | ||
123 | #if 0 | ||
124 | /** | ||
125 | * t4_write_indirect - write indirectly addressed registers | ||
126 | * @adap: the adapter | ||
127 | * @addr_reg: register holding the indirect addresses | ||
128 | * @data_reg: register holding the value for the indirect registers | ||
129 | * @vals: values to write | ||
130 | * @nregs: how many indirect registers to write | ||
131 | * @start_idx: address of first indirect register to write | ||
132 | * | ||
133 | * Writes a sequential block of registers that are accessed indirectly | ||
134 | * through an address/data register pair. | ||
135 | */ | ||
136 | static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, | ||
137 | unsigned int data_reg, const u32 *vals, | ||
138 | unsigned int nregs, unsigned int start_idx) | ||
139 | { | ||
140 | while (nregs--) { | ||
141 | t4_write_reg(adap, addr_reg, start_idx++); | ||
142 | t4_write_reg(adap, data_reg, *vals++); | ||
143 | } | ||
144 | } | ||
145 | #endif | ||
146 | |||
147 | /* | 123 | /* |
148 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. | 124 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. |
149 | */ | 125 | */ |
@@ -1560,44 +1536,6 @@ void t4_intr_disable(struct adapter *adapter) | |||
1560 | } | 1536 | } |
1561 | 1537 | ||
1562 | /** | 1538 | /** |
1563 | * t4_intr_clear - clear all interrupts | ||
1564 | * @adapter: the adapter whose interrupts should be cleared | ||
1565 | * | ||
1566 | * Clears all interrupts. The caller must be a PCI function managing | ||
1567 | * global interrupts. | ||
1568 | */ | ||
1569 | void t4_intr_clear(struct adapter *adapter) | ||
1570 | { | ||
1571 | static const unsigned int cause_reg[] = { | ||
1572 | SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3, | ||
1573 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | ||
1574 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | ||
1575 | PCIE_NONFAT_ERR, PCIE_INT_CAUSE, | ||
1576 | MC_INT_CAUSE, | ||
1577 | MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE, | ||
1578 | EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1), | ||
1579 | CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE, | ||
1580 | MYPF_REG(CIM_PF_HOST_INT_CAUSE), | ||
1581 | TP_INT_CAUSE, | ||
1582 | ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE, | ||
1583 | PM_RX_INT_CAUSE, PM_TX_INT_CAUSE, | ||
1584 | MPS_RX_PERR_INT_CAUSE, | ||
1585 | CPL_INTR_CAUSE, | ||
1586 | MYPF_REG(PL_PF_INT_CAUSE), | ||
1587 | PL_PL_INT_CAUSE, | ||
1588 | LE_DB_INT_CAUSE, | ||
1589 | }; | ||
1590 | |||
1591 | unsigned int i; | ||
1592 | |||
1593 | for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) | ||
1594 | t4_write_reg(adapter, cause_reg[i], 0xffffffff); | ||
1595 | |||
1596 | t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK); | ||
1597 | (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ | ||
1598 | } | ||
1599 | |||
1600 | /** | ||
1601 | * hash_mac_addr - return the hash value of a MAC address | 1539 | * hash_mac_addr - return the hash value of a MAC address |
1602 | * @addr: the 48-bit Ethernet MAC address | 1540 | * @addr: the 48-bit Ethernet MAC address |
1603 | * | 1541 | * |
@@ -1709,36 +1647,6 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, | |||
1709 | return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); | 1647 | return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); |
1710 | } | 1648 | } |
1711 | 1649 | ||
1712 | /* Read an RSS table row */ | ||
1713 | static int rd_rss_row(struct adapter *adap, int row, u32 *val) | ||
1714 | { | ||
1715 | t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row); | ||
1716 | return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1, | ||
1717 | 5, 0, val); | ||
1718 | } | ||
1719 | |||
1720 | /** | ||
1721 | * t4_read_rss - read the contents of the RSS mapping table | ||
1722 | * @adapter: the adapter | ||
1723 | * @map: holds the contents of the RSS mapping table | ||
1724 | * | ||
1725 | * Reads the contents of the RSS hash->queue mapping table. | ||
1726 | */ | ||
1727 | int t4_read_rss(struct adapter *adapter, u16 *map) | ||
1728 | { | ||
1729 | u32 val; | ||
1730 | int i, ret; | ||
1731 | |||
1732 | for (i = 0; i < RSS_NENTRIES / 2; ++i) { | ||
1733 | ret = rd_rss_row(adapter, i, &val); | ||
1734 | if (ret) | ||
1735 | return ret; | ||
1736 | *map++ = LKPTBLQUEUE0_GET(val); | ||
1737 | *map++ = LKPTBLQUEUE1_GET(val); | ||
1738 | } | ||
1739 | return 0; | ||
1740 | } | ||
1741 | |||
1742 | /** | 1650 | /** |
1743 | * t4_tp_get_tcp_stats - read TP's TCP MIB counters | 1651 | * t4_tp_get_tcp_stats - read TP's TCP MIB counters |
1744 | * @adap: the adapter | 1652 | * @adap: the adapter |
@@ -1779,29 +1687,6 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, | |||
1779 | } | 1687 | } |
1780 | 1688 | ||
1781 | /** | 1689 | /** |
1782 | * t4_tp_get_err_stats - read TP's error MIB counters | ||
1783 | * @adap: the adapter | ||
1784 | * @st: holds the counter values | ||
1785 | * | ||
1786 | * Returns the values of TP's error counters. | ||
1787 | */ | ||
1788 | void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) | ||
1789 | { | ||
1790 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs, | ||
1791 | 12, TP_MIB_MAC_IN_ERR_0); | ||
1792 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops, | ||
1793 | 8, TP_MIB_TNL_CNG_DROP_0); | ||
1794 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops, | ||
1795 | 4, TP_MIB_TNL_DROP_0); | ||
1796 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops, | ||
1797 | 4, TP_MIB_OFD_VLN_DROP_0); | ||
1798 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs, | ||
1799 | 4, TP_MIB_TCP_V6IN_ERR_0); | ||
1800 | t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh, | ||
1801 | 2, TP_MIB_OFD_ARP_DROP); | ||
1802 | } | ||
1803 | |||
1804 | /** | ||
1805 | * t4_read_mtu_tbl - returns the values in the HW path MTU table | 1690 | * t4_read_mtu_tbl - returns the values in the HW path MTU table |
1806 | * @adap: the adapter | 1691 | * @adap: the adapter |
1807 | * @mtus: where to store the MTU values | 1692 | * @mtus: where to store the MTU values |
@@ -1916,122 +1801,6 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, | |||
1916 | } | 1801 | } |
1917 | 1802 | ||
1918 | /** | 1803 | /** |
1919 | * t4_set_trace_filter - configure one of the tracing filters | ||
1920 | * @adap: the adapter | ||
1921 | * @tp: the desired trace filter parameters | ||
1922 | * @idx: which filter to configure | ||
1923 | * @enable: whether to enable or disable the filter | ||
1924 | * | ||
1925 | * Configures one of the tracing filters available in HW. If @enable is | ||
1926 | * %0 @tp is not examined and may be %NULL. | ||
1927 | */ | ||
1928 | int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, | ||
1929 | int idx, int enable) | ||
1930 | { | ||
1931 | int i, ofst = idx * 4; | ||
1932 | u32 data_reg, mask_reg, cfg; | ||
1933 | u32 multitrc = TRCMULTIFILTER; | ||
1934 | |||
1935 | if (!enable) { | ||
1936 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); | ||
1937 | goto out; | ||
1938 | } | ||
1939 | |||
1940 | if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f || | ||
1941 | tp->skip_ofst > 0x1f || tp->min_len > 0x1ff || | ||
1942 | tp->snap_len > 9600 || (idx && tp->snap_len > 256)) | ||
1943 | return -EINVAL; | ||
1944 | |||
1945 | if (tp->snap_len > 256) { /* must be tracer 0 */ | ||
1946 | if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) | | ||
1947 | t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) | | ||
1948 | t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN) | ||
1949 | return -EINVAL; /* other tracers are enabled */ | ||
1950 | multitrc = 0; | ||
1951 | } else if (idx) { | ||
1952 | i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B); | ||
1953 | if (TFCAPTUREMAX_GET(i) > 256 && | ||
1954 | (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN)) | ||
1955 | return -EINVAL; | ||
1956 | } | ||
1957 | |||
1958 | /* stop the tracer we'll be changing */ | ||
1959 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); | ||
1960 | |||
1961 | /* disable tracing globally if running in the wrong single/multi mode */ | ||
1962 | cfg = t4_read_reg(adap, MPS_TRC_CFG); | ||
1963 | if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) { | ||
1964 | t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN); | ||
1965 | t4_read_reg(adap, MPS_TRC_CFG); /* flush */ | ||
1966 | msleep(1); | ||
1967 | if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY)) | ||
1968 | return -ETIMEDOUT; | ||
1969 | } | ||
1970 | /* | ||
1971 | * At this point either the tracing is enabled and in the right mode or | ||
1972 | * disabled. | ||
1973 | */ | ||
1974 | |||
1975 | idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH); | ||
1976 | data_reg = MPS_TRC_FILTER0_MATCH + idx; | ||
1977 | mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx; | ||
1978 | |||
1979 | for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { | ||
1980 | t4_write_reg(adap, data_reg, tp->data[i]); | ||
1981 | t4_write_reg(adap, mask_reg, ~tp->mask[i]); | ||
1982 | } | ||
1983 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst, | ||
1984 | TFCAPTUREMAX(tp->snap_len) | | ||
1985 | TFMINPKTSIZE(tp->min_len)); | ||
1986 | t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, | ||
1987 | TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) | | ||
1988 | TFPORT(tp->port) | TFEN | | ||
1989 | (tp->invert ? TFINVERTMATCH : 0)); | ||
1990 | |||
1991 | cfg &= ~TRCMULTIFILTER; | ||
1992 | t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc); | ||
1993 | out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */ | ||
1994 | return 0; | ||
1995 | } | ||
1996 | |||
1997 | /** | ||
1998 | * t4_get_trace_filter - query one of the tracing filters | ||
1999 | * @adap: the adapter | ||
2000 | * @tp: the current trace filter parameters | ||
2001 | * @idx: which trace filter to query | ||
2002 | * @enabled: non-zero if the filter is enabled | ||
2003 | * | ||
2004 | * Returns the current settings of one of the HW tracing filters. | ||
2005 | */ | ||
2006 | void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, | ||
2007 | int *enabled) | ||
2008 | { | ||
2009 | u32 ctla, ctlb; | ||
2010 | int i, ofst = idx * 4; | ||
2011 | u32 data_reg, mask_reg; | ||
2012 | |||
2013 | ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst); | ||
2014 | ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst); | ||
2015 | |||
2016 | *enabled = !!(ctla & TFEN); | ||
2017 | tp->snap_len = TFCAPTUREMAX_GET(ctlb); | ||
2018 | tp->min_len = TFMINPKTSIZE_GET(ctlb); | ||
2019 | tp->skip_ofst = TFOFFSET_GET(ctla); | ||
2020 | tp->skip_len = TFLENGTH_GET(ctla); | ||
2021 | tp->invert = !!(ctla & TFINVERTMATCH); | ||
2022 | tp->port = TFPORT_GET(ctla); | ||
2023 | |||
2024 | ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx; | ||
2025 | data_reg = MPS_TRC_FILTER0_MATCH + ofst; | ||
2026 | mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst; | ||
2027 | |||
2028 | for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { | ||
2029 | tp->mask[i] = ~t4_read_reg(adap, mask_reg); | ||
2030 | tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; | ||
2031 | } | ||
2032 | } | ||
2033 | |||
2034 | /** | ||
2035 | * get_mps_bg_map - return the buffer groups associated with a port | 1804 | * get_mps_bg_map - return the buffer groups associated with a port |
2036 | * @adap: the adapter | 1805 | * @adap: the adapter |
2037 | * @idx: the port index | 1806 | * @idx: the port index |
@@ -2133,52 +1902,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) | |||
2133 | } | 1902 | } |
2134 | 1903 | ||
2135 | /** | 1904 | /** |
2136 | * t4_get_lb_stats - collect loopback port statistics | ||
2137 | * @adap: the adapter | ||
2138 | * @idx: the loopback port index | ||
2139 | * @p: the stats structure to fill | ||
2140 | * | ||
2141 | * Return HW statistics for the given loopback port. | ||
2142 | */ | ||
2143 | void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) | ||
2144 | { | ||
2145 | u32 bgmap = get_mps_bg_map(adap, idx); | ||
2146 | |||
2147 | #define GET_STAT(name) \ | ||
2148 | t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)) | ||
2149 | #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) | ||
2150 | |||
2151 | p->octets = GET_STAT(BYTES); | ||
2152 | p->frames = GET_STAT(FRAMES); | ||
2153 | p->bcast_frames = GET_STAT(BCAST); | ||
2154 | p->mcast_frames = GET_STAT(MCAST); | ||
2155 | p->ucast_frames = GET_STAT(UCAST); | ||
2156 | p->error_frames = GET_STAT(ERROR); | ||
2157 | |||
2158 | p->frames_64 = GET_STAT(64B); | ||
2159 | p->frames_65_127 = GET_STAT(65B_127B); | ||
2160 | p->frames_128_255 = GET_STAT(128B_255B); | ||
2161 | p->frames_256_511 = GET_STAT(256B_511B); | ||
2162 | p->frames_512_1023 = GET_STAT(512B_1023B); | ||
2163 | p->frames_1024_1518 = GET_STAT(1024B_1518B); | ||
2164 | p->frames_1519_max = GET_STAT(1519B_MAX); | ||
2165 | p->drop = t4_read_reg(adap, PORT_REG(idx, | ||
2166 | MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); | ||
2167 | |||
2168 | p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; | ||
2169 | p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; | ||
2170 | p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; | ||
2171 | p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; | ||
2172 | p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; | ||
2173 | p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; | ||
2174 | p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; | ||
2175 | p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; | ||
2176 | |||
2177 | #undef GET_STAT | ||
2178 | #undef GET_STAT_COM | ||
2179 | } | ||
2180 | |||
2181 | /** | ||
2182 | * t4_wol_magic_enable - enable/disable magic packet WoL | 1905 | * t4_wol_magic_enable - enable/disable magic packet WoL |
2183 | * @adap: the adapter | 1906 | * @adap: the adapter |
2184 | * @port: the physical port index | 1907 | * @port: the physical port index |
@@ -2584,30 +2307,6 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, | |||
2584 | } | 2307 | } |
2585 | 2308 | ||
2586 | /** | 2309 | /** |
2587 | * t4_free_vi - free a virtual interface | ||
2588 | * @adap: the adapter | ||
2589 | * @mbox: mailbox to use for the FW command | ||
2590 | * @pf: the PF owning the VI | ||
2591 | * @vf: the VF owning the VI | ||
2592 | * @viid: virtual interface identifiler | ||
2593 | * | ||
2594 | * Free a previously allocated virtual interface. | ||
2595 | */ | ||
2596 | int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, | ||
2597 | unsigned int vf, unsigned int viid) | ||
2598 | { | ||
2599 | struct fw_vi_cmd c; | ||
2600 | |||
2601 | memset(&c, 0, sizeof(c)); | ||
2602 | c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | | ||
2603 | FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | | ||
2604 | FW_VI_CMD_VFN(vf)); | ||
2605 | c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); | ||
2606 | c.type_viid = htons(FW_VI_CMD_VIID(viid)); | ||
2607 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | ||
2608 | } | ||
2609 | |||
2610 | /** | ||
2611 | * t4_set_rxmode - set Rx properties of a virtual interface | 2310 | * t4_set_rxmode - set Rx properties of a virtual interface |
2612 | * @adap: the adapter | 2311 | * @adap: the adapter |
2613 | * @mbox: mailbox to use for the FW command | 2312 | * @mbox: mailbox to use for the FW command |
@@ -2833,37 +2532,6 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, | |||
2833 | } | 2532 | } |
2834 | 2533 | ||
2835 | /** | 2534 | /** |
2836 | * t4_iq_start_stop - enable/disable an ingress queue and its FLs | ||
2837 | * @adap: the adapter | ||
2838 | * @mbox: mailbox to use for the FW command | ||
2839 | * @start: %true to enable the queues, %false to disable them | ||
2840 | * @pf: the PF owning the queues | ||
2841 | * @vf: the VF owning the queues | ||
2842 | * @iqid: ingress queue id | ||
2843 | * @fl0id: FL0 queue id or 0xffff if no attached FL0 | ||
2844 | * @fl1id: FL1 queue id or 0xffff if no attached FL1 | ||
2845 | * | ||
2846 | * Starts or stops an ingress queue and its associated FLs, if any. | ||
2847 | */ | ||
2848 | int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, | ||
2849 | unsigned int pf, unsigned int vf, unsigned int iqid, | ||
2850 | unsigned int fl0id, unsigned int fl1id) | ||
2851 | { | ||
2852 | struct fw_iq_cmd c; | ||
2853 | |||
2854 | memset(&c, 0, sizeof(c)); | ||
2855 | c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | | ||
2856 | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | | ||
2857 | FW_IQ_CMD_VFN(vf)); | ||
2858 | c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) | | ||
2859 | FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); | ||
2860 | c.iqid = htons(iqid); | ||
2861 | c.fl0id = htons(fl0id); | ||
2862 | c.fl1id = htons(fl1id); | ||
2863 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
2864 | } | ||
2865 | |||
2866 | /** | ||
2867 | * t4_iq_free - free an ingress queue and its FLs | 2535 | * t4_iq_free - free an ingress queue and its FLs |
2868 | * @adap: the adapter | 2536 | * @adap: the adapter |
2869 | * @mbox: mailbox to use for the FW command | 2537 | * @mbox: mailbox to use for the FW command |