aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-23 14:47:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-23 14:47:02 -0400
commit5f05647dd81c11a6a165ccc8f0c1370b16f3bcb0 (patch)
tree7851ef1c93aa1aba7ef327ca4b75fd35e6d10f29 /drivers/net/cxgb4
parent02f36038c568111ad4fc433f6fa760ff5e38fab4 (diff)
parentec37a48d1d16c30b655ac5280209edf52a6775d4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1699 commits) bnx2/bnx2x: Unsupported Ethtool operations should return -EINVAL. vlan: Calling vlan_hwaccel_do_receive() is always valid. tproxy: use the interface primary IP address as a default value for --on-ip tproxy: added IPv6 support to the socket match cxgb3: function namespace cleanup tproxy: added IPv6 support to the TPROXY target tproxy: added IPv6 socket lookup function to nf_tproxy_core be2net: Changes to use only priority codes allowed by f/w tproxy: allow non-local binds of IPv6 sockets if IP_TRANSPARENT is enabled tproxy: added tproxy sockopt interface in the IPV6 layer tproxy: added udp6_lib_lookup function tproxy: added const specifiers to udp lookup functions tproxy: split off ipv6 defragmentation to a separate module l2tp: small cleanup nf_nat: restrict ICMP translation for embedded header can: mcp251x: fix generation of error frames can: mcp251x: fix endless loop in interrupt handler if CANINTF_MERRF is set can-raw: add msg_flags to distinguish local traffic 9p: client code cleanup rds: make local functions/variables static ... Fix up conflicts in net/core/dev.c, drivers/net/pcmcia/smc91c92_cs.c and drivers/net/wireless/ath/ath9k/debug.c as per David
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r--drivers/net/cxgb4/cxgb4.h17
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c167
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/cxgb4/l2t.c34
-rw-r--r--drivers/net/cxgb4/l2t.h3
-rw-r--r--drivers/net/cxgb4/sge.c19
-rw-r--r--drivers/net/cxgb4/t4_hw.c332
-rw-r--r--drivers/net/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h5
9 files changed, 96 insertions, 488 deletions
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 6e562c0dad7d..eaa49e4119f1 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -463,6 +463,8 @@ struct sge {
463 u8 counter_val[SGE_NCOUNTERS]; 463 u8 counter_val[SGE_NCOUNTERS];
464 unsigned int starve_thres; 464 unsigned int starve_thres;
465 u8 idma_state[2]; 465 u8 idma_state[2];
466 unsigned int egr_start;
467 unsigned int ingr_start;
466 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 468 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
467 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 469 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
468 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 470 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
@@ -590,7 +592,6 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id);
590void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 592void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
591 593
592void *t4_alloc_mem(size_t size); 594void *t4_alloc_mem(size_t size);
593void t4_free_mem(void *addr);
594 595
595void t4_free_sge_resources(struct adapter *adap); 596void t4_free_sge_resources(struct adapter *adap);
596irq_handler_t t4_intr_handler(struct adapter *adap); 597irq_handler_t t4_intr_handler(struct adapter *adap);
@@ -649,7 +650,6 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
649 650
650void t4_intr_enable(struct adapter *adapter); 651void t4_intr_enable(struct adapter *adapter);
651void t4_intr_disable(struct adapter *adapter); 652void t4_intr_disable(struct adapter *adapter);
652void t4_intr_clear(struct adapter *adapter);
653int t4_slow_intr_handler(struct adapter *adapter); 653int t4_slow_intr_handler(struct adapter *adapter);
654 654
655int t4_wait_dev_ready(struct adapter *adap); 655int t4_wait_dev_ready(struct adapter *adap);
@@ -662,24 +662,16 @@ int t4_check_fw_version(struct adapter *adapter);
662int t4_prep_adapter(struct adapter *adapter); 662int t4_prep_adapter(struct adapter *adapter);
663int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 663int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
664void t4_fatal_err(struct adapter *adapter); 664void t4_fatal_err(struct adapter *adapter);
665int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
666 int filter_index, int enable);
667void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
668 int filter_index, int *enabled);
669int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 665int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
670 int start, int n, const u16 *rspq, unsigned int nrspq); 666 int start, int n, const u16 *rspq, unsigned int nrspq);
671int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 667int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
672 unsigned int flags); 668 unsigned int flags);
673int t4_read_rss(struct adapter *adapter, u16 *entries);
674int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); 669int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
675int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 670int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
676 u64 *parity); 671 u64 *parity);
677 672
678void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 673void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
679void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
680
681void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 674void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
682void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
683void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 675void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
684 struct tp_tcp_stats *v6); 676 struct tp_tcp_stats *v6);
685void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 677void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -709,8 +701,6 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
709int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 701int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
710 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 702 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
711 unsigned int *rss_size); 703 unsigned int *rss_size);
712int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
713 unsigned int vf, unsigned int viid);
714int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 704int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
715 int mtu, int promisc, int all_multi, int bcast, int vlanex, 705 int mtu, int promisc, int all_multi, int bcast, int vlanex,
716 bool sleep_ok); 706 bool sleep_ok);
@@ -729,9 +719,6 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
729 unsigned int mmd, unsigned int reg, u16 *valp); 719 unsigned int mmd, unsigned int reg, u16 *valp);
730int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 720int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
731 unsigned int mmd, unsigned int reg, u16 val); 721 unsigned int mmd, unsigned int reg, u16 val);
732int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
733 unsigned int pf, unsigned int vf, unsigned int iqid,
734 unsigned int fl0id, unsigned int fl1id);
735int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 722int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
736 unsigned int vf, unsigned int iqtype, unsigned int iqid, 723 unsigned int vf, unsigned int iqtype, unsigned int iqid,
737 unsigned int fl0id, unsigned int fl1id); 724 unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index e2bf10d90add..87054e0a5746 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -175,16 +175,26 @@ enum {
175 175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { 176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
177 CH_DEVICE(0xa000, 0), /* PE10K */ 177 CH_DEVICE(0xa000, 0), /* PE10K */
178 CH_DEVICE(0x4001, 0), 178 CH_DEVICE(0x4001, -1),
179 CH_DEVICE(0x4002, 0), 179 CH_DEVICE(0x4002, -1),
180 CH_DEVICE(0x4003, 0), 180 CH_DEVICE(0x4003, -1),
181 CH_DEVICE(0x4004, 0), 181 CH_DEVICE(0x4004, -1),
182 CH_DEVICE(0x4005, 0), 182 CH_DEVICE(0x4005, -1),
183 CH_DEVICE(0x4006, 0), 183 CH_DEVICE(0x4006, -1),
184 CH_DEVICE(0x4007, 0), 184 CH_DEVICE(0x4007, -1),
185 CH_DEVICE(0x4008, 0), 185 CH_DEVICE(0x4008, -1),
186 CH_DEVICE(0x4009, 0), 186 CH_DEVICE(0x4009, -1),
187 CH_DEVICE(0x400a, 0), 187 CH_DEVICE(0x400a, -1),
188 CH_DEVICE(0x4401, 4),
189 CH_DEVICE(0x4402, 4),
190 CH_DEVICE(0x4403, 4),
191 CH_DEVICE(0x4404, 4),
192 CH_DEVICE(0x4405, 4),
193 CH_DEVICE(0x4406, 4),
194 CH_DEVICE(0x4407, 4),
195 CH_DEVICE(0x4408, 4),
196 CH_DEVICE(0x4409, 4),
197 CH_DEVICE(0x440a, 4),
188 { 0, } 198 { 0, }
189}; 199};
190 200
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
423 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 433 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
424 const struct cpl_sge_egr_update *p = (void *)rsp; 434 const struct cpl_sge_egr_update *p = (void *)rsp;
425 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 435 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
426 struct sge_txq *txq = q->adap->sge.egr_map[qid]; 436 struct sge_txq *txq;
427 437
438 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
428 txq->restarts++; 439 txq->restarts++;
429 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { 440 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
430 struct sge_eth_txq *eq; 441 struct sge_eth_txq *eq;
431 442
432 eq = container_of(txq, struct sge_eth_txq, q); 443 eq = container_of(txq, struct sge_eth_txq, q);
@@ -658,6 +669,15 @@ static int setup_rss(struct adapter *adap)
658} 669}
659 670
660/* 671/*
672 * Return the channel of the ingress queue with the given qid.
673 */
674static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
675{
676 qid -= p->ingr_start;
677 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
678}
679
680/*
661 * Wait until all NAPI handlers are descheduled. 681 * Wait until all NAPI handlers are descheduled.
662 */ 682 */
663static void quiesce_rx(struct adapter *adap) 683static void quiesce_rx(struct adapter *adap)
@@ -860,7 +880,7 @@ void *t4_alloc_mem(size_t size)
860/* 880/*
861 * Free memory allocated through alloc_mem(). 881 * Free memory allocated through alloc_mem().
862 */ 882 */
863void t4_free_mem(void *addr) 883static void t4_free_mem(void *addr)
864{ 884{
865 if (is_vmalloc_addr(addr)) 885 if (is_vmalloc_addr(addr))
866 vfree(addr); 886 vfree(addr);
@@ -1671,27 +1691,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671 return 0; 1691 return 0;
1672} 1692}
1673 1693
1674/* 1694/**
1675 * Translate a physical EEPROM address to virtual. The first 1K is accessed 1695 * eeprom_ptov - translate a physical EEPROM address to virtual
1676 * through virtual addresses starting at 31K, the rest is accessed through 1696 * @phys_addr: the physical EEPROM address
1677 * virtual addresses starting at 0. This mapping is correct only for PF0. 1697 * @fn: the PCI function number
1698 * @sz: size of function-specific area
1699 *
1700 * Translate a physical EEPROM address to virtual. The first 1K is
1701 * accessed through virtual addresses starting at 31K, the rest is
1702 * accessed through virtual addresses starting at 0.
1703 *
1704 * The mapping is as follows:
1705 * [0..1K) -> [31K..32K)
1706 * [1K..1K+A) -> [31K-A..31K)
1707 * [1K+A..ES) -> [0..ES-A-1K)
1708 *
1709 * where A = @fn * @sz, and ES = EEPROM size.
1678 */ 1710 */
1679static int eeprom_ptov(unsigned int phys_addr) 1711static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1680{ 1712{
1713 fn *= sz;
1681 if (phys_addr < 1024) 1714 if (phys_addr < 1024)
1682 return phys_addr + (31 << 10); 1715 return phys_addr + (31 << 10);
1716 if (phys_addr < 1024 + fn)
1717 return 31744 - fn + phys_addr - 1024;
1683 if (phys_addr < EEPROMSIZE) 1718 if (phys_addr < EEPROMSIZE)
1684 return phys_addr - 1024; 1719 return phys_addr - 1024 - fn;
1685 return -EINVAL; 1720 return -EINVAL;
1686} 1721}
1687 1722
1688/* 1723/*
1689 * The next two routines implement eeprom read/write from physical addresses. 1724 * The next two routines implement eeprom read/write from physical addresses.
1690 * The physical->virtual translation is correct only for PF0.
1691 */ 1725 */
1692static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 1726static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1693{ 1727{
1694 int vaddr = eeprom_ptov(phys_addr); 1728 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1695 1729
1696 if (vaddr >= 0) 1730 if (vaddr >= 0)
1697 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 1731 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1700,7 +1734,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1700 1734
1701static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 1735static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1702{ 1736{
1703 int vaddr = eeprom_ptov(phys_addr); 1737 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1704 1738
1705 if (vaddr >= 0) 1739 if (vaddr >= 0)
1706 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 1740 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1743,6 +1777,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1743 aligned_offset = eeprom->offset & ~3; 1777 aligned_offset = eeprom->offset & ~3;
1744 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 1778 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1745 1779
1780 if (adapter->fn > 0) {
1781 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1782
1783 if (aligned_offset < start ||
1784 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1785 return -EPERM;
1786 }
1787
1746 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1788 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1747 /* 1789 /*
1748 * RMW possibly needed for first or last words. 1790 * RMW possibly needed for first or last words.
@@ -2165,8 +2207,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2165 * Queue a TID release request and if necessary schedule a work queue to 2207 * Queue a TID release request and if necessary schedule a work queue to
2166 * process it. 2208 * process it.
2167 */ 2209 */
2168void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, 2210static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2169 unsigned int tid) 2211 unsigned int tid)
2170{ 2212{
2171 void **p = &t->tid_tab[tid]; 2213 void **p = &t->tid_tab[tid];
2172 struct adapter *adap = container_of(t, struct adapter, tids); 2214 struct adapter *adap = container_of(t, struct adapter, tids);
@@ -2181,7 +2223,6 @@ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2181 } 2223 }
2182 spin_unlock_bh(&adap->tid_release_lock); 2224 spin_unlock_bh(&adap->tid_release_lock);
2183} 2225}
2184EXPORT_SYMBOL(cxgb4_queue_tid_release);
2185 2226
2186/* 2227/*
2187 * Process the list of pending TID release requests. 2228 * Process the list of pending TID release requests.
@@ -2305,7 +2346,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2305 req->peer_port = htons(0); 2346 req->peer_port = htons(0);
2306 req->local_ip = sip; 2347 req->local_ip = sip;
2307 req->peer_ip = htonl(0); 2348 req->peer_ip = htonl(0);
2308 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; 2349 chan = rxq_to_chan(&adap->sge, queue);
2309 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 2350 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2310 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 2351 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2311 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 2352 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2314,48 +2355,6 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2314EXPORT_SYMBOL(cxgb4_create_server); 2355EXPORT_SYMBOL(cxgb4_create_server);
2315 2356
2316/** 2357/**
2317 * cxgb4_create_server6 - create an IPv6 server
2318 * @dev: the device
2319 * @stid: the server TID
2320 * @sip: local IPv6 address to bind server to
2321 * @sport: the server's TCP port
2322 * @queue: queue to direct messages from this server to
2323 *
2324 * Create an IPv6 server for the given port and address.
2325 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2326 */
2327int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2328 const struct in6_addr *sip, __be16 sport,
2329 unsigned int queue)
2330{
2331 unsigned int chan;
2332 struct sk_buff *skb;
2333 struct adapter *adap;
2334 struct cpl_pass_open_req6 *req;
2335
2336 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2337 if (!skb)
2338 return -ENOMEM;
2339
2340 adap = netdev2adap(dev);
2341 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2342 INIT_TP_WR(req, 0);
2343 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2344 req->local_port = sport;
2345 req->peer_port = htons(0);
2346 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2347 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2348 req->peer_ip_hi = cpu_to_be64(0);
2349 req->peer_ip_lo = cpu_to_be64(0);
2350 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2351 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2352 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2353 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2354 return t4_mgmt_tx(adap, skb);
2355}
2356EXPORT_SYMBOL(cxgb4_create_server6);
2357
2358/**
2359 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU 2358 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2360 * @mtus: the HW MTU table 2359 * @mtus: the HW MTU table
2361 * @mtu: the target MTU 2360 * @mtu: the target MTU
@@ -2414,25 +2413,6 @@ unsigned int cxgb4_port_idx(const struct net_device *dev)
2414} 2413}
2415EXPORT_SYMBOL(cxgb4_port_idx); 2414EXPORT_SYMBOL(cxgb4_port_idx);
2416 2415
2417/**
2418 * cxgb4_netdev_by_hwid - return the net device of a HW port
2419 * @pdev: identifies the adapter
2420 * @id: the HW port id
2421 *
2422 * Return the net device associated with the interface with the given HW
2423 * id.
2424 */
2425struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2426{
2427 const struct adapter *adap = pci_get_drvdata(pdev);
2428
2429 if (!adap || id >= NCHAN)
2430 return NULL;
2431 id = adap->chan_map[id];
2432 return id < MAX_NPORTS ? adap->port[id] : NULL;
2433}
2434EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2435
2436void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 2416void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2437 struct tp_tcp_stats *v6) 2417 struct tp_tcp_stats *v6)
2438{ 2418{
@@ -2722,7 +2702,10 @@ static int cxgb_open(struct net_device *dev)
2722 return err; 2702 return err;
2723 } 2703 }
2724 2704
2725 dev->real_num_tx_queues = pi->nqsets; 2705 netif_set_real_num_tx_queues(dev, pi->nqsets);
2706 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
2707 if (err)
2708 return err;
2726 err = link_start(dev); 2709 err = link_start(dev);
2727 if (!err) 2710 if (!err)
2728 netif_tx_start_all_queues(dev); 2711 netif_tx_start_all_queues(dev);
@@ -3062,12 +3045,16 @@ static int adap_init0(struct adapter *adap)
3062 params[2] = FW_PARAM_PFVF(L2T_END); 3045 params[2] = FW_PARAM_PFVF(L2T_END);
3063 params[3] = FW_PARAM_PFVF(FILTER_START); 3046 params[3] = FW_PARAM_PFVF(FILTER_START);
3064 params[4] = FW_PARAM_PFVF(FILTER_END); 3047 params[4] = FW_PARAM_PFVF(FILTER_END);
3065 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val); 3048 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3049 params[6] = FW_PARAM_PFVF(EQ_START);
3050 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3066 if (ret < 0) 3051 if (ret < 0)
3067 goto bye; 3052 goto bye;
3068 port_vec = val[0]; 3053 port_vec = val[0];
3069 adap->tids.ftid_base = val[3]; 3054 adap->tids.ftid_base = val[3];
3070 adap->tids.nftids = val[4] - val[3] + 1; 3055 adap->tids.nftids = val[4] - val[3] + 1;
3056 adap->sge.ingr_start = val[5];
3057 adap->sge.egr_start = val[6];
3071 3058
3072 if (c.ofldcaps) { 3059 if (c.ofldcaps) {
3073 /* query offload-related parameters */ 3060 /* query offload-related parameters */
@@ -3815,7 +3802,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
3815 pci_disable_device(pdev); 3802 pci_disable_device(pdev);
3816 pci_release_regions(pdev); 3803 pci_release_regions(pdev);
3817 pci_set_drvdata(pdev, NULL); 3804 pci_set_drvdata(pdev, NULL);
3818 } else if (PCI_FUNC(pdev->devfn) > 0) 3805 } else
3819 pci_release_regions(pdev); 3806 pci_release_regions(pdev);
3820} 3807}
3821 3808
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 85d74e751ce0..1b48c0170145 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -139,16 +139,11 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
139void cxgb4_free_atid(struct tid_info *t, unsigned int atid); 139void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); 140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); 141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
142void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
143 unsigned int tid);
144 142
145struct in6_addr; 143struct in6_addr;
146 144
147int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 145int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
148 __be32 sip, __be16 sport, unsigned int queue); 146 __be32 sip, __be16 sport, unsigned int queue);
149int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
150 const struct in6_addr *sip, __be16 sport,
151 unsigned int queue);
152 147
153static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 148static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
154{ 149{
@@ -233,7 +228,6 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
233unsigned int cxgb4_port_chan(const struct net_device *dev); 228unsigned int cxgb4_port_chan(const struct net_device *dev);
234unsigned int cxgb4_port_viid(const struct net_device *dev); 229unsigned int cxgb4_port_viid(const struct net_device *dev);
235unsigned int cxgb4_port_idx(const struct net_device *dev); 230unsigned int cxgb4_port_idx(const struct net_device *dev);
236struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
237unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 231unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
238 unsigned int *idx); 232 unsigned int *idx);
239void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 233void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index e8f0f55e9d08..a2d323c473f8 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -481,40 +481,6 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
481 handle_failed_resolution(adap, arpq); 481 handle_failed_resolution(adap, arpq);
482} 482}
483 483
484/*
485 * Allocate an L2T entry for use by a switching rule. Such entries need to be
486 * explicitly freed and while busy they are not on any hash chain, so normal
487 * address resolution updates do not see them.
488 */
489struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
490{
491 struct l2t_entry *e;
492
493 write_lock_bh(&d->lock);
494 e = alloc_l2e(d);
495 if (e) {
496 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
497 e->state = L2T_STATE_SWITCHING;
498 atomic_set(&e->refcnt, 1);
499 spin_unlock(&e->lock);
500 }
501 write_unlock_bh(&d->lock);
502 return e;
503}
504
505/*
506 * Sets/updates the contents of a switching L2T entry that has been allocated
507 * with an earlier call to @t4_l2t_alloc_switching.
508 */
509int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
510 u8 port, u8 *eth_addr)
511{
512 e->vlan = vlan;
513 e->lport = port;
514 memcpy(e->dmac, eth_addr, ETH_ALEN);
515 return write_l2e(adap, e, 0);
516}
517
518struct l2t_data *t4_init_l2t(void) 484struct l2t_data *t4_init_l2t(void)
519{ 485{
520 int i; 486 int i;
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
index 643f27ed3cf4..7bd8f42378ff 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/cxgb4/l2t.h
@@ -100,9 +100,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
100 unsigned int priority); 100 unsigned int priority);
101 101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
105 u8 port, u8 *eth_addr);
106struct l2t_data *t4_init_l2t(void); 103struct l2t_data *t4_init_l2t(void);
107void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); 104void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
108 105
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index bf38cfc57565..9967f3debce7 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -557,7 +557,8 @@ out: cred = q->avail - cred;
557 557
558 if (unlikely(fl_starving(q))) { 558 if (unlikely(fl_starving(q))) {
559 smp_wmb(); 559 smp_wmb();
560 set_bit(q->cntxt_id, adap->sge.starving_fl); 560 set_bit(q->cntxt_id - adap->sge.egr_start,
561 adap->sge.starving_fl);
561 } 562 }
562 563
563 return cred; 564 return cred;
@@ -974,7 +975,7 @@ out_free: dev_kfree_skb(skb);
974 } 975 }
975 976
976 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 977 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
977 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); 978 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
978 cpl->pack = htons(0); 979 cpl->pack = htons(0);
979 cpl->len = htons(skb->len); 980 cpl->len = htons(skb->len);
980 cpl->ctrl1 = cpu_to_be64(cntrl); 981 cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1213,7 +1214,8 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
1213{ 1214{
1214 q->mapping_err++; 1215 q->mapping_err++;
1215 q->q.stops++; 1216 q->q.stops++;
1216 set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); 1217 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1218 q->adap->sge.txq_maperr);
1217} 1219}
1218 1220
1219/** 1221/**
@@ -1603,7 +1605,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1603 rxq->stats.rx_cso++; 1605 rxq->stats.rx_cso++;
1604 } 1606 }
1605 } else 1607 } else
1606 skb->ip_summed = CHECKSUM_NONE; 1608 skb_checksum_none_assert(skb);
1607 1609
1608 if (unlikely(pkt->vlan_ex)) { 1610 if (unlikely(pkt->vlan_ex)) {
1609 struct vlan_group *grp = pi->vlan_grp; 1611 struct vlan_group *grp = pi->vlan_grp;
@@ -1835,6 +1837,7 @@ static unsigned int process_intrq(struct adapter *adap)
1835 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { 1837 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1836 unsigned int qid = ntohl(rc->pldbuflen_qid); 1838 unsigned int qid = ntohl(rc->pldbuflen_qid);
1837 1839
1840 qid -= adap->sge.ingr_start;
1838 napi_schedule(&adap->sge.ingr_map[qid]->napi); 1841 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1839 } 1842 }
1840 1843
@@ -2050,14 +2053,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2050 /* set offset to -1 to distinguish ingress queues without FL */ 2053 /* set offset to -1 to distinguish ingress queues without FL */
2051 iq->offset = fl ? 0 : -1; 2054 iq->offset = fl ? 0 : -1;
2052 2055
2053 adap->sge.ingr_map[iq->cntxt_id] = iq; 2056 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2054 2057
2055 if (fl) { 2058 if (fl) {
2056 fl->cntxt_id = ntohs(c.fl0id); 2059 fl->cntxt_id = ntohs(c.fl0id);
2057 fl->avail = fl->pend_cred = 0; 2060 fl->avail = fl->pend_cred = 0;
2058 fl->pidx = fl->cidx = 0; 2061 fl->pidx = fl->cidx = 0;
2059 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2062 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2060 adap->sge.egr_map[fl->cntxt_id] = fl; 2063 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2061 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 2064 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2062 } 2065 }
2063 return 0; 2066 return 0;
@@ -2087,7 +2090,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2087 q->stops = q->restarts = 0; 2090 q->stops = q->restarts = 0;
2088 q->stat = (void *)&q->desc[q->size]; 2091 q->stat = (void *)&q->desc[q->size];
2089 q->cntxt_id = id; 2092 q->cntxt_id = id;
2090 adap->sge.egr_map[id] = q; 2093 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2091} 2094}
2092 2095
2093int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 2096int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
@@ -2259,7 +2262,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2259{ 2262{
2260 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2263 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2261 2264
2262 adap->sge.ingr_map[rq->cntxt_id] = NULL; 2265 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2263 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, 2266 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2264 rq->cntxt_id, fl_id, 0xffff); 2267 rq->cntxt_id, fl_id, 0xffff);
2265 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2268 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index 9e1a4b49b47a..bb813d94aea8 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -120,30 +120,6 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123#if 0
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145#endif
146
147/* 123/*
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
149 */ 125 */
@@ -1560,44 +1536,6 @@ void t4_intr_disable(struct adapter *adapter)
1560} 1536}
1561 1537
1562/** 1538/**
1563 * t4_intr_clear - clear all interrupts
1564 * @adapter: the adapter whose interrupts should be cleared
1565 *
1566 * Clears all interrupts. The caller must be a PCI function managing
1567 * global interrupts.
1568 */
1569void t4_intr_clear(struct adapter *adapter)
1570{
1571 static const unsigned int cause_reg[] = {
1572 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1573 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1574 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1575 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1576 MC_INT_CAUSE,
1577 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1578 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1579 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1580 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1581 TP_INT_CAUSE,
1582 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1583 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1584 MPS_RX_PERR_INT_CAUSE,
1585 CPL_INTR_CAUSE,
1586 MYPF_REG(PL_PF_INT_CAUSE),
1587 PL_PL_INT_CAUSE,
1588 LE_DB_INT_CAUSE,
1589 };
1590
1591 unsigned int i;
1592
1593 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1594 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1595
1596 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1597 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1598}
1599
1600/**
1601 * hash_mac_addr - return the hash value of a MAC address 1539 * hash_mac_addr - return the hash value of a MAC address
1602 * @addr: the 48-bit Ethernet MAC address 1540 * @addr: the 48-bit Ethernet MAC address
1603 * 1541 *
@@ -1709,36 +1647,6 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1709 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 1647 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1710} 1648}
1711 1649
1712/* Read an RSS table row */
1713static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1714{
1715 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1716 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1717 5, 0, val);
1718}
1719
1720/**
1721 * t4_read_rss - read the contents of the RSS mapping table
1722 * @adapter: the adapter
1723 * @map: holds the contents of the RSS mapping table
1724 *
1725 * Reads the contents of the RSS hash->queue mapping table.
1726 */
1727int t4_read_rss(struct adapter *adapter, u16 *map)
1728{
1729 u32 val;
1730 int i, ret;
1731
1732 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1733 ret = rd_rss_row(adapter, i, &val);
1734 if (ret)
1735 return ret;
1736 *map++ = LKPTBLQUEUE0_GET(val);
1737 *map++ = LKPTBLQUEUE1_GET(val);
1738 }
1739 return 0;
1740}
1741
1742/** 1650/**
1743 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 1651 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1744 * @adap: the adapter 1652 * @adap: the adapter
@@ -1779,29 +1687,6 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1779} 1687}
1780 1688
1781/** 1689/**
1782 * t4_tp_get_err_stats - read TP's error MIB counters
1783 * @adap: the adapter
1784 * @st: holds the counter values
1785 *
1786 * Returns the values of TP's error counters.
1787 */
1788void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1789{
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1791 12, TP_MIB_MAC_IN_ERR_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1793 8, TP_MIB_TNL_CNG_DROP_0);
1794 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1795 4, TP_MIB_TNL_DROP_0);
1796 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1797 4, TP_MIB_OFD_VLN_DROP_0);
1798 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1799 4, TP_MIB_TCP_V6IN_ERR_0);
1800 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1801 2, TP_MIB_OFD_ARP_DROP);
1802}
1803
1804/**
1805 * t4_read_mtu_tbl - returns the values in the HW path MTU table 1690 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1806 * @adap: the adapter 1691 * @adap: the adapter
1807 * @mtus: where to store the MTU values 1692 * @mtus: where to store the MTU values
@@ -1916,122 +1801,6 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1916} 1801}
1917 1802
1918/** 1803/**
1919 * t4_set_trace_filter - configure one of the tracing filters
1920 * @adap: the adapter
1921 * @tp: the desired trace filter parameters
1922 * @idx: which filter to configure
1923 * @enable: whether to enable or disable the filter
1924 *
1925 * Configures one of the tracing filters available in HW. If @enable is
1926 * %0 @tp is not examined and may be %NULL.
1927 */
1928int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1929 int idx, int enable)
1930{
1931 int i, ofst = idx * 4;
1932 u32 data_reg, mask_reg, cfg;
1933 u32 multitrc = TRCMULTIFILTER;
1934
1935 if (!enable) {
1936 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1937 goto out;
1938 }
1939
1940 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1941 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1942 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1943 return -EINVAL;
1944
1945 if (tp->snap_len > 256) { /* must be tracer 0 */
1946 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1947 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1948 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1949 return -EINVAL; /* other tracers are enabled */
1950 multitrc = 0;
1951 } else if (idx) {
1952 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1953 if (TFCAPTUREMAX_GET(i) > 256 &&
1954 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1955 return -EINVAL;
1956 }
1957
1958 /* stop the tracer we'll be changing */
1959 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1960
1961 /* disable tracing globally if running in the wrong single/multi mode */
1962 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1963 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1964 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1965 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1966 msleep(1);
1967 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1968 return -ETIMEDOUT;
1969 }
1970 /*
1971 * At this point either the tracing is enabled and in the right mode or
1972 * disabled.
1973 */
1974
1975 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1976 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1977 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1978
1979 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1980 t4_write_reg(adap, data_reg, tp->data[i]);
1981 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1982 }
1983 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1984 TFCAPTUREMAX(tp->snap_len) |
1985 TFMINPKTSIZE(tp->min_len));
1986 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1987 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1988 TFPORT(tp->port) | TFEN |
1989 (tp->invert ? TFINVERTMATCH : 0));
1990
1991 cfg &= ~TRCMULTIFILTER;
1992 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1993out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1994 return 0;
1995}
1996
1997/**
1998 * t4_get_trace_filter - query one of the tracing filters
1999 * @adap: the adapter
2000 * @tp: the current trace filter parameters
2001 * @idx: which trace filter to query
2002 * @enabled: non-zero if the filter is enabled
2003 *
2004 * Returns the current settings of one of the HW tracing filters.
2005 */
2006void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2007 int *enabled)
2008{
2009 u32 ctla, ctlb;
2010 int i, ofst = idx * 4;
2011 u32 data_reg, mask_reg;
2012
2013 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2014 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2015
2016 *enabled = !!(ctla & TFEN);
2017 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2018 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2019 tp->skip_ofst = TFOFFSET_GET(ctla);
2020 tp->skip_len = TFLENGTH_GET(ctla);
2021 tp->invert = !!(ctla & TFINVERTMATCH);
2022 tp->port = TFPORT_GET(ctla);
2023
2024 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2025 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2026 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2027
2028 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2029 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2030 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2031 }
2032}
2033
2034/**
2035 * get_mps_bg_map - return the buffer groups associated with a port 1804 * get_mps_bg_map - return the buffer groups associated with a port
2036 * @adap: the adapter 1805 * @adap: the adapter
2037 * @idx: the port index 1806 * @idx: the port index
@@ -2133,52 +1902,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2133} 1902}
2134 1903
2135/** 1904/**
2136 * t4_get_lb_stats - collect loopback port statistics
2137 * @adap: the adapter
2138 * @idx: the loopback port index
2139 * @p: the stats structure to fill
2140 *
2141 * Return HW statistics for the given loopback port.
2142 */
2143void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2144{
2145 u32 bgmap = get_mps_bg_map(adap, idx);
2146
2147#define GET_STAT(name) \
2148 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2149#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2150
2151 p->octets = GET_STAT(BYTES);
2152 p->frames = GET_STAT(FRAMES);
2153 p->bcast_frames = GET_STAT(BCAST);
2154 p->mcast_frames = GET_STAT(MCAST);
2155 p->ucast_frames = GET_STAT(UCAST);
2156 p->error_frames = GET_STAT(ERROR);
2157
2158 p->frames_64 = GET_STAT(64B);
2159 p->frames_65_127 = GET_STAT(65B_127B);
2160 p->frames_128_255 = GET_STAT(128B_255B);
2161 p->frames_256_511 = GET_STAT(256B_511B);
2162 p->frames_512_1023 = GET_STAT(512B_1023B);
2163 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2164 p->frames_1519_max = GET_STAT(1519B_MAX);
2165 p->drop = t4_read_reg(adap, PORT_REG(idx,
2166 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2167
2168 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2169 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2170 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2171 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2172 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2173 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2174 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2175 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2176
2177#undef GET_STAT
2178#undef GET_STAT_COM
2179}
2180
2181/**
2182 * t4_wol_magic_enable - enable/disable magic packet WoL 1905 * t4_wol_magic_enable - enable/disable magic packet WoL
2183 * @adap: the adapter 1906 * @adap: the adapter
2184 * @port: the physical port index 1907 * @port: the physical port index
@@ -2584,30 +2307,6 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2584} 2307}
2585 2308
2586/** 2309/**
2587 * t4_free_vi - free a virtual interface
2588 * @adap: the adapter
2589 * @mbox: mailbox to use for the FW command
2590 * @pf: the PF owning the VI
2591 * @vf: the VF owning the VI
2592 * @viid: virtual interface identifiler
2593 *
2594 * Free a previously allocated virtual interface.
2595 */
2596int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2597 unsigned int vf, unsigned int viid)
2598{
2599 struct fw_vi_cmd c;
2600
2601 memset(&c, 0, sizeof(c));
2602 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2603 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2604 FW_VI_CMD_VFN(vf));
2605 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2606 c.type_viid = htons(FW_VI_CMD_VIID(viid));
2607 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2608}
2609
2610/**
2611 * t4_set_rxmode - set Rx properties of a virtual interface 2310 * t4_set_rxmode - set Rx properties of a virtual interface
2612 * @adap: the adapter 2311 * @adap: the adapter
2613 * @mbox: mailbox to use for the FW command 2312 * @mbox: mailbox to use for the FW command
@@ -2833,37 +2532,6 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2833} 2532}
2834 2533
2835/** 2534/**
2836 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2837 * @adap: the adapter
2838 * @mbox: mailbox to use for the FW command
2839 * @start: %true to enable the queues, %false to disable them
2840 * @pf: the PF owning the queues
2841 * @vf: the VF owning the queues
2842 * @iqid: ingress queue id
2843 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2844 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2845 *
2846 * Starts or stops an ingress queue and its associated FLs, if any.
2847 */
2848int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2849 unsigned int pf, unsigned int vf, unsigned int iqid,
2850 unsigned int fl0id, unsigned int fl1id)
2851{
2852 struct fw_iq_cmd c;
2853
2854 memset(&c, 0, sizeof(c));
2855 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2856 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2857 FW_IQ_CMD_VFN(vf));
2858 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2859 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2860 c.iqid = htons(iqid);
2861 c.fl0id = htons(fl0id);
2862 c.fl1id = htons(fl1id);
2863 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2864}
2865
2866/**
2867 * t4_iq_free - free an ingress queue and its FLs 2535 * t4_iq_free - free an ingress queue and its FLs
2868 * @adap: the adapter 2536 * @adap: the adapter
2869 * @mbox: mailbox to use for the FW command 2537 * @mbox: mailbox to use for the FW command
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 10a055565776..c26b455f37de 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -42,6 +42,7 @@ enum {
42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ 42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */ 43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */
44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ 44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
45 EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
45 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ 46 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
46 TCB_SIZE = 128, /* TCB size */ 47 TCB_SIZE = 128, /* TCB size */
47 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 0969f2fbc1b0..940584a8a640 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -487,6 +487,11 @@ enum fw_params_param_pfvf {
487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, 487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, 488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, 489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
490 FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
491 FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
490}; 495};
491 496
492/* 497/*