aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/common.h27
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c121
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c87
-rw-r--r--drivers/net/cxgb3/mc5.c3
-rw-r--r--drivers/net/cxgb3/regs.h32
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c57
-rw-r--r--drivers/net/cxgb3/version.h5
-rw-r--r--drivers/net/cxgb3/xgmac.c191
10 files changed, 440 insertions, 90 deletions
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index e23deeb7d06..8d137963369 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -112,8 +112,7 @@ enum {
112}; 112};
113 113
114enum { 114enum {
115 SUPPORTED_OFFLOAD = 1 << 24, 115 SUPPORTED_IRQ = 1 << 24
116 SUPPORTED_IRQ = 1 << 25
117}; 116};
118 117
119enum { /* adapter interrupt-maintained statistics */ 118enum { /* adapter interrupt-maintained statistics */
@@ -260,6 +259,10 @@ struct mac_stats {
260 unsigned long serdes_signal_loss; 259 unsigned long serdes_signal_loss;
261 unsigned long xaui_pcs_ctc_err; 260 unsigned long xaui_pcs_ctc_err;
262 unsigned long xaui_pcs_align_change; 261 unsigned long xaui_pcs_align_change;
262
263 unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
264 unsigned long num_resets; /* # times reset due to stuck TX */
265
263}; 266};
264 267
265struct tp_mib_stats { 268struct tp_mib_stats {
@@ -354,6 +357,9 @@ enum {
354 MC5_MODE_72_BIT = 2 357 MC5_MODE_72_BIT = 2
355}; 358};
356 359
360/* MC5 min active region size */
361enum { MC5_MIN_TIDS = 16 };
362
357struct vpd_params { 363struct vpd_params {
358 unsigned int cclk; 364 unsigned int cclk;
359 unsigned int mclk; 365 unsigned int mclk;
@@ -398,6 +404,13 @@ struct adapter_params {
398 unsigned int stats_update_period; /* MAC stats accumulation period */ 404 unsigned int stats_update_period; /* MAC stats accumulation period */
399 unsigned int linkpoll_period; /* link poll period in 0.1s */ 405 unsigned int linkpoll_period; /* link poll period in 0.1s */
400 unsigned int rev; /* chip revision */ 406 unsigned int rev; /* chip revision */
407 unsigned int offload;
408};
409
410enum { /* chip revisions */
411 T3_REV_A = 0,
412 T3_REV_B = 2,
413 T3_REV_B2 = 3,
401}; 414};
402 415
403struct trace_params { 416struct trace_params {
@@ -465,6 +478,13 @@ struct cmac {
465 struct adapter *adapter; 478 struct adapter *adapter;
466 unsigned int offset; 479 unsigned int offset;
467 unsigned int nucast; /* # of address filters for unicast MACs */ 480 unsigned int nucast; /* # of address filters for unicast MACs */
481 unsigned int tx_tcnt;
482 unsigned int tx_xcnt;
483 u64 tx_mcnt;
484 unsigned int rx_xcnt;
485 u64 rx_mcnt;
486 unsigned int toggle_cnt;
487 unsigned int txen;
468 struct mac_stats stats; 488 struct mac_stats stats;
469}; 489};
470 490
@@ -588,7 +608,7 @@ static inline int is_10G(const struct adapter *adap)
588 608
589static inline int is_offload(const struct adapter *adap) 609static inline int is_offload(const struct adapter *adap)
590{ 610{
591 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD; 611 return adap->params.offload;
592} 612}
593 613
594static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 614static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
@@ -666,6 +686,7 @@ int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
666int t3_mac_set_num_ucast(struct cmac *mac, int n); 686int t3_mac_set_num_ucast(struct cmac *mac, int n);
667const struct mac_stats *t3_mac_update_stats(struct cmac *mac); 687const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
668int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); 688int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
689int t3b2_mac_watchdog_task(struct cmac *mac);
669 690
670void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode); 691void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
671int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, 692int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index e14862b43d1..483a594210a 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t,
67static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, 67static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
68 unsigned int tid) 68 unsigned int tid)
69{ 69{
70 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL; 70 struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
71 &(t->tid_tab[tid]) : NULL;
72
73 return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
71} 74}
72 75
73/* 76/*
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 7ff834e45d6..67b4b219d92 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -42,6 +42,7 @@
42#include <linux/workqueue.h> 42#include <linux/workqueue.h>
43#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h> 44#include <linux/rtnetlink.h>
45#include <linux/firmware.h>
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46 47
47#include "common.h" 48#include "common.h"
@@ -184,16 +185,24 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause) 185 int speed, int duplex, int pause)
185{ 186{
186 struct net_device *dev = adapter->port[port_id]; 187 struct net_device *dev = adapter->port[port_id];
188 struct port_info *pi = netdev_priv(dev);
189 struct cmac *mac = &pi->mac;
187 190
188 /* Skip changes from disabled ports. */ 191 /* Skip changes from disabled ports. */
189 if (!netif_running(dev)) 192 if (!netif_running(dev))
190 return; 193 return;
191 194
192 if (link_stat != netif_carrier_ok(dev)) { 195 if (link_stat != netif_carrier_ok(dev)) {
193 if (link_stat) 196 if (link_stat) {
197 t3_mac_enable(mac, MAC_DIRECTION_RX);
194 netif_carrier_on(dev); 198 netif_carrier_on(dev);
195 else 199 } else {
196 netif_carrier_off(dev); 200 netif_carrier_off(dev);
201 pi->phy.ops->power_down(&pi->phy, 1);
202 t3_mac_disable(mac, MAC_DIRECTION_RX);
203 t3_link_start(&pi->phy, mac, &pi->link_config);
204 }
205
197 link_report(dev); 206 link_report(dev);
198 } 207 }
199} 208}
@@ -406,7 +415,7 @@ static void quiesce_rx(struct adapter *adap)
406static int setup_sge_qsets(struct adapter *adap) 415static int setup_sge_qsets(struct adapter *adap)
407{ 416{
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0; 417 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1; 418 unsigned int ntxq = SGE_TXQ_PER_SET;
410 419
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) 420 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
412 irq_idx = -1; 421 irq_idx = -1;
@@ -484,12 +493,14 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
484static ssize_t set_nfilters(struct net_device *dev, unsigned int val) 493static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
485{ 494{
486 struct adapter *adap = dev->priv; 495 struct adapter *adap = dev->priv;
496 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
487 497
488 if (adap->flags & FULL_INIT_DONE) 498 if (adap->flags & FULL_INIT_DONE)
489 return -EBUSY; 499 return -EBUSY;
490 if (val && adap->params.rev == 0) 500 if (val && adap->params.rev == 0)
491 return -EINVAL; 501 return -EINVAL;
492 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers) 502 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
503 min_tids)
493 return -EINVAL; 504 return -EINVAL;
494 adap->params.mc5.nfilters = val; 505 adap->params.mc5.nfilters = val;
495 return 0; 506 return 0;
@@ -507,7 +518,8 @@ static ssize_t set_nservers(struct net_device *dev, unsigned int val)
507 518
508 if (adap->flags & FULL_INIT_DONE) 519 if (adap->flags & FULL_INIT_DONE)
509 return -EBUSY; 520 return -EBUSY;
510 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters) 521 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
522 MC5_MIN_TIDS)
511 return -EINVAL; 523 return -EINVAL;
512 adap->params.mc5.nservers = val; 524 adap->params.mc5.nservers = val;
513 return 0; 525 return 0;
@@ -707,6 +719,28 @@ static void bind_qsets(struct adapter *adap)
707 } 719 }
708} 720}
709 721
722#define FW_FNAME "t3fw-%d.%d.%d.bin"
723
724static int upgrade_fw(struct adapter *adap)
725{
726 int ret;
727 char buf[64];
728 const struct firmware *fw;
729 struct device *dev = &adap->pdev->dev;
730
731 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
732 FW_VERSION_MINOR, FW_VERSION_MICRO);
733 ret = request_firmware(&fw, buf, dev);
734 if (ret < 0) {
735 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
736 buf);
737 return ret;
738 }
739 ret = t3_load_fw(adap, fw->data, fw->size);
740 release_firmware(fw);
741 return ret;
742}
743
710/** 744/**
711 * cxgb_up - enable the adapter 745 * cxgb_up - enable the adapter
712 * @adapter: adapter being enabled 746 * @adapter: adapter being enabled
@@ -723,6 +757,8 @@ static int cxgb_up(struct adapter *adap)
723 757
724 if (!(adap->flags & FULL_INIT_DONE)) { 758 if (!(adap->flags & FULL_INIT_DONE)) {
725 err = t3_check_fw_version(adap); 759 err = t3_check_fw_version(adap);
760 if (err == -EINVAL)
761 err = upgrade_fw(adap);
726 if (err) 762 if (err)
727 goto out; 763 goto out;
728 764
@@ -734,6 +770,8 @@ static int cxgb_up(struct adapter *adap)
734 if (err) 770 if (err)
735 goto out; 771 goto out;
736 772
773 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
774
737 err = setup_sge_qsets(adap); 775 err = setup_sge_qsets(adap);
738 if (err) 776 if (err)
739 goto out; 777 goto out;
@@ -894,7 +932,7 @@ static int cxgb_open(struct net_device *dev)
894 return err; 932 return err;
895 933
896 set_bit(pi->port_id, &adapter->open_device_map); 934 set_bit(pi->port_id, &adapter->open_device_map);
897 if (!ofld_disable) { 935 if (is_offload(adapter) && !ofld_disable) {
898 err = offload_open(dev); 936 err = offload_open(dev);
899 if (err) 937 if (err)
900 printk(KERN_WARNING 938 printk(KERN_WARNING
@@ -1031,7 +1069,11 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
1031 "VLANinsertions ", 1069 "VLANinsertions ",
1032 "TxCsumOffload ", 1070 "TxCsumOffload ",
1033 "RxCsumGood ", 1071 "RxCsumGood ",
1034 "RxDrops " 1072 "RxDrops ",
1073
1074 "CheckTXEnToggled ",
1075 "CheckResets ",
1076
1035}; 1077};
1036 1078
1037static int get_stats_count(struct net_device *dev) 1079static int get_stats_count(struct net_device *dev)
@@ -1145,6 +1187,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1145 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1187 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1146 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1188 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1147 *data++ = s->rx_cong_drops; 1189 *data++ = s->rx_cong_drops;
1190
1191 *data++ = s->num_toggled;
1192 *data++ = s->num_resets;
1148} 1193}
1149 1194
1150static inline void reg_block_dump(struct adapter *ap, void *buf, 1195static inline void reg_block_dump(struct adapter *ap, void *buf,
@@ -1362,23 +1407,27 @@ static int set_rx_csum(struct net_device *dev, u32 data)
1362 1407
1363static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1408static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1364{ 1409{
1365 struct adapter *adapter = dev->priv; 1410 const struct adapter *adapter = dev->priv;
1411 const struct port_info *pi = netdev_priv(dev);
1412 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1366 1413
1367 e->rx_max_pending = MAX_RX_BUFFERS; 1414 e->rx_max_pending = MAX_RX_BUFFERS;
1368 e->rx_mini_max_pending = 0; 1415 e->rx_mini_max_pending = 0;
1369 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; 1416 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1370 e->tx_max_pending = MAX_TXQ_ENTRIES; 1417 e->tx_max_pending = MAX_TXQ_ENTRIES;
1371 1418
1372 e->rx_pending = adapter->params.sge.qset[0].fl_size; 1419 e->rx_pending = q->fl_size;
1373 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size; 1420 e->rx_mini_pending = q->rspq_size;
1374 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size; 1421 e->rx_jumbo_pending = q->jumbo_size;
1375 e->tx_pending = adapter->params.sge.qset[0].txq_size[0]; 1422 e->tx_pending = q->txq_size[0];
1376} 1423}
1377 1424
1378static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1425static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1379{ 1426{
1380 int i; 1427 int i;
1428 struct qset_params *q;
1381 struct adapter *adapter = dev->priv; 1429 struct adapter *adapter = dev->priv;
1430 const struct port_info *pi = netdev_priv(dev);
1382 1431
1383 if (e->rx_pending > MAX_RX_BUFFERS || 1432 if (e->rx_pending > MAX_RX_BUFFERS ||
1384 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || 1433 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
@@ -1393,9 +1442,8 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1393 if (adapter->flags & FULL_INIT_DONE) 1442 if (adapter->flags & FULL_INIT_DONE)
1394 return -EBUSY; 1443 return -EBUSY;
1395 1444
1396 for (i = 0; i < SGE_QSETS; ++i) { 1445 q = &adapter->params.sge.qset[pi->first_qset];
1397 struct qset_params *q = &adapter->params.sge.qset[i]; 1446 for (i = 0; i < pi->nqsets; ++i, ++q) {
1398
1399 q->rspq_size = e->rx_mini_pending; 1447 q->rspq_size = e->rx_mini_pending;
1400 q->fl_size = e->rx_pending; 1448 q->fl_size = e->rx_pending;
1401 q->jumbo_size = e->rx_jumbo_pending; 1449 q->jumbo_size = e->rx_jumbo_pending;
@@ -2067,6 +2115,42 @@ static void check_link_status(struct adapter *adapter)
2067 } 2115 }
2068} 2116}
2069 2117
2118static void check_t3b2_mac(struct adapter *adapter)
2119{
2120 int i;
2121
2122 if (!rtnl_trylock()) /* synchronize with ifdown */
2123 return;
2124
2125 for_each_port(adapter, i) {
2126 struct net_device *dev = adapter->port[i];
2127 struct port_info *p = netdev_priv(dev);
2128 int status;
2129
2130 if (!netif_running(dev))
2131 continue;
2132
2133 status = 0;
2134 if (netif_running(dev) && netif_carrier_ok(dev))
2135 status = t3b2_mac_watchdog_task(&p->mac);
2136 if (status == 1)
2137 p->mac.stats.num_toggled++;
2138 else if (status == 2) {
2139 struct cmac *mac = &p->mac;
2140
2141 t3_mac_set_mtu(mac, dev->mtu);
2142 t3_mac_set_address(mac, 0, dev->dev_addr);
2143 cxgb_set_rxmode(dev);
2144 t3_link_start(&p->phy, mac, &p->link_config);
2145 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2146 t3_port_intr_enable(adapter, p->port_id);
2147 p->mac.stats.num_resets++;
2148 }
2149 }
2150 rtnl_unlock();
2151}
2152
2153
2070static void t3_adap_check_task(struct work_struct *work) 2154static void t3_adap_check_task(struct work_struct *work)
2071{ 2155{
2072 struct adapter *adapter = container_of(work, struct adapter, 2156 struct adapter *adapter = container_of(work, struct adapter,
@@ -2087,6 +2171,9 @@ static void t3_adap_check_task(struct work_struct *work)
2087 adapter->check_task_cnt = 0; 2171 adapter->check_task_cnt = 0;
2088 } 2172 }
2089 2173
2174 if (p->rev == T3_REV_B2)
2175 check_t3b2_mac(adapter);
2176
2090 /* Schedule the next check update if any port is active. */ 2177 /* Schedule the next check update if any port is active. */
2091 spin_lock(&adapter->work_lock); 2178 spin_lock(&adapter->work_lock);
2092 if (adapter->open_device_map & PORT_MASK) 2179 if (adapter->open_device_map & PORT_MASK)
@@ -2195,9 +2282,9 @@ static void __devinit print_port_info(struct adapter *adap,
2195 2282
2196 if (!test_bit(i, &adap->registered_device_map)) 2283 if (!test_bit(i, &adap->registered_device_map))
2197 continue; 2284 continue;
2198 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n", 2285 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2199 dev->name, ai->desc, pi->port_type->desc, 2286 dev->name, ai->desc, pi->port_type->desc,
2200 adap->params.rev, buf, 2287 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2201 (adap->flags & USING_MSIX) ? " MSI-X" : 2288 (adap->flags & USING_MSIX) ? " MSI-X" :
2202 (adap->flags & USING_MSI) ? " MSI" : ""); 2289 (adap->flags & USING_MSI) ? " MSI" : "");
2203 if (adap->name == dev->name && adap->params.vpd.mclk) 2290 if (adap->name == dev->name && adap->params.vpd.mclk)
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index f6ed033efb5..199e5066acf 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
508 508
509 spin_lock_bh(&td->tid_release_lock); 509 spin_lock_bh(&td->tid_release_lock);
510 p->ctx = (void *)td->tid_release_list; 510 p->ctx = (void *)td->tid_release_list;
511 p->client = NULL;
511 td->tid_release_list = p; 512 td->tid_release_list = p;
512 if (!p->ctx) 513 if (!p->ctx)
513 schedule_work(&td->tid_release_task); 514 schedule_work(&td->tid_release_task);
@@ -553,7 +554,9 @@ int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
553 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 554 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
554 555
555 spin_lock_bh(&t->atid_lock); 556 spin_lock_bh(&t->atid_lock);
556 if (t->afree) { 557 if (t->afree &&
558 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
559 t->ntids) {
557 union active_open_entry *p = t->afree; 560 union active_open_entry *p = t->afree;
558 561
559 atid = (p - t->atid_tab) + t->atid_base; 562 atid = (p - t->atid_tab) + t->atid_base;
@@ -621,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
621 struct t3c_tid_entry *t3c_tid; 624 struct t3c_tid_entry *t3c_tid;
622 625
623 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 626 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
624 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers && 627 if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
628 t3c_tid->client->handlers &&
625 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 629 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
626 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 630 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
627 t3c_tid-> 631 t3c_tid->
@@ -640,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
640 struct t3c_tid_entry *t3c_tid; 644 struct t3c_tid_entry *t3c_tid;
641 645
642 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 646 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
643 if (t3c_tid->ctx && t3c_tid->client->handlers && 647 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
644 t3c_tid->client->handlers[p->opcode]) { 648 t3c_tid->client->handlers[p->opcode]) {
645 return t3c_tid->client->handlers[p->opcode] (dev, skb, 649 return t3c_tid->client->handlers[p->opcode] (dev, skb,
646 t3c_tid->ctx); 650 t3c_tid->ctx);
@@ -658,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
658 struct t3c_tid_entry *t3c_tid; 662 struct t3c_tid_entry *t3c_tid;
659 663
660 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 664 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
661 if (t3c_tid->ctx && t3c_tid->client->handlers && 665 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
662 t3c_tid->client->handlers[p->opcode]) { 666 t3c_tid->client->handlers[p->opcode]) {
663 return t3c_tid->client->handlers[p->opcode] 667 return t3c_tid->client->handlers[p->opcode]
664 (dev, skb, t3c_tid->ctx); 668 (dev, skb, t3c_tid->ctx);
@@ -687,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
687 } 691 }
688} 692}
689 693
694/*
695 * Returns an sk_buff for a reply CPL message of size len. If the input
696 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
697 * is allocated. The input skb must be of size at least len. Note that this
698 * operation does not destroy the original skb data even if it decides to reuse
699 * the buffer.
700 */
701static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
702 int gfp)
703{
704 if (likely(!skb_cloned(skb))) {
705 BUG_ON(skb->len < len);
706 __skb_trim(skb, len);
707 skb_get(skb);
708 } else {
709 skb = alloc_skb(len, gfp);
710 if (skb)
711 __skb_put(skb, len);
712 }
713 return skb;
714}
715
690static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 716static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
691{ 717{
692 union opcode_tid *p = cplhdr(skb); 718 union opcode_tid *p = cplhdr(skb);
@@ -694,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
694 struct t3c_tid_entry *t3c_tid; 720 struct t3c_tid_entry *t3c_tid;
695 721
696 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 722 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
697 if (t3c_tid->ctx && t3c_tid->client->handlers && 723 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
698 t3c_tid->client->handlers[p->opcode]) { 724 t3c_tid->client->handlers[p->opcode]) {
699 return t3c_tid->client->handlers[p->opcode] 725 return t3c_tid->client->handlers[p->opcode]
700 (dev, skb, t3c_tid->ctx); 726 (dev, skb, t3c_tid->ctx);
701 } else { 727 } else {
702 struct cpl_abort_req_rss *req = cplhdr(skb); 728 struct cpl_abort_req_rss *req = cplhdr(skb);
703 struct cpl_abort_rpl *rpl; 729 struct cpl_abort_rpl *rpl;
730 struct sk_buff *reply_skb;
731 unsigned int tid = GET_TID(req);
732 u8 cmd = req->status;
733
734 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
735 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
736 goto out;
704 737
705 struct sk_buff *skb = 738 reply_skb = cxgb3_get_cpl_reply_skb(skb,
706 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC); 739 sizeof(struct
707 if (!skb) { 740 cpl_abort_rpl),
741 GFP_ATOMIC);
742
743 if (!reply_skb) {
708 printk("do_abort_req_rss: couldn't get skb!\n"); 744 printk("do_abort_req_rss: couldn't get skb!\n");
709 goto out; 745 goto out;
710 } 746 }
711 skb->priority = CPL_PRIORITY_DATA; 747 reply_skb->priority = CPL_PRIORITY_DATA;
712 __skb_put(skb, sizeof(struct cpl_abort_rpl)); 748 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
713 rpl = cplhdr(skb); 749 rpl = cplhdr(reply_skb);
714 rpl->wr.wr_hi = 750 rpl->wr.wr_hi =
715 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 751 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
716 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); 752 rpl->wr.wr_lo = htonl(V_WR_TID(tid));
717 OPCODE_TID(rpl) = 753 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
718 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); 754 rpl->cmd = cmd;
719 rpl->cmd = req->status; 755 cxgb3_ofld_send(dev, reply_skb);
720 cxgb3_ofld_send(dev, skb);
721out: 756out:
722 return CPL_RET_BUF_DONE; 757 return CPL_RET_BUF_DONE;
723 } 758 }
@@ -730,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
730 struct t3c_tid_entry *t3c_tid; 765 struct t3c_tid_entry *t3c_tid;
731 766
732 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 767 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
733 if (t3c_tid->ctx && t3c_tid->client->handlers && 768 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
734 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 769 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
735 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 770 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
736 (dev, skb, t3c_tid->ctx); 771 (dev, skb, t3c_tid->ctx);
@@ -741,17 +776,6 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
741 } 776 }
742} 777}
743 778
744static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
745{
746 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
747
748 if (rpl->status != CPL_ERR_NONE)
749 printk(KERN_ERR
750 "Unexpected SET_TCB_RPL status %u for tid %u\n",
751 rpl->status, GET_TID(rpl));
752 return CPL_RET_BUF_DONE;
753}
754
755static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 779static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
756{ 780{
757 struct cpl_trace_pkt *p = cplhdr(skb); 781 struct cpl_trace_pkt *p = cplhdr(skb);
@@ -771,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
771 struct t3c_tid_entry *t3c_tid; 795 struct t3c_tid_entry *t3c_tid;
772 796
773 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 797 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
774 if (t3c_tid->ctx && t3c_tid->client->handlers && 798 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
775 t3c_tid->client->handlers[opcode]) { 799 t3c_tid->client->handlers[opcode]) {
776 return t3c_tid->client->handlers[opcode] (dev, skb, 800 return t3c_tid->client->handlers[opcode] (dev, skb,
777 t3c_tid->ctx); 801 t3c_tid->ctx);
@@ -970,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
970 for (tid = 0; tid < ti->ntids; tid++) { 994 for (tid = 0; tid < ti->ntids; tid++) {
971 te = lookup_tid(ti, tid); 995 te = lookup_tid(ti, tid);
972 BUG_ON(!te); 996 BUG_ON(!te);
973 if (te->ctx && te->client && te->client->redirect) { 997 if (te && te->ctx && te->client && te->client->redirect) {
974 update_tcb = te->client->redirect(te->ctx, old, new, e); 998 update_tcb = te->client->redirect(te->ctx, old, new, e);
975 if (update_tcb) { 999 if (update_tcb) {
976 l2t_hold(L2DATA(tdev), e); 1000 l2t_hold(L2DATA(tdev), e);
@@ -1213,7 +1237,8 @@ void __init cxgb3_offload_init(void)
1213 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1237 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1214 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1238 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1215 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1239 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1216 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl); 1240 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
1241 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
1217 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1242 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1218 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1243 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1219 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); 1244 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index 644d62ea86a..84c1ffa8e2d 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -328,6 +328,9 @@ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
328 unsigned int tcam_size = mc5->tcam_size; 328 unsigned int tcam_size = mc5->tcam_size;
329 struct adapter *adap = mc5->adapter; 329 struct adapter *adap = mc5->adapter;
330 330
331 if (!tcam_size)
332 return 0;
333
331 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) 334 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
332 return -EINVAL; 335 return -EINVAL;
333 336
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index b56c5f52bcd..e5a553410e2 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1206,6 +1206,14 @@
1206 1206
1207#define A_TP_RX_TRC_KEY0 0x120 1207#define A_TP_RX_TRC_KEY0 0x120
1208 1208
1209#define A_TP_TX_DROP_CNT_CH0 0x12d
1210
1211#define S_TXDROPCNTCH0RCVD 0
1212#define M_TXDROPCNTCH0RCVD 0xffff
1213#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
1214#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
1215 M_TXDROPCNTCH0RCVD)
1216
1209#define A_ULPRX_CTL 0x500 1217#define A_ULPRX_CTL 0x500
1210 1218
1211#define S_ROUND_ROBIN 4 1219#define S_ROUND_ROBIN 4
@@ -1226,9 +1234,15 @@
1226 1234
1227#define A_ULPRX_ISCSI_TAGMASK 0x514 1235#define A_ULPRX_ISCSI_TAGMASK 0x514
1228 1236
1237#define S_HPZ0 0
1238#define M_HPZ0 0xf
1239#define V_HPZ0(x) ((x) << S_HPZ0)
1240#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
1241
1229#define A_ULPRX_TDDP_LLIMIT 0x51c 1242#define A_ULPRX_TDDP_LLIMIT 0x51c
1230 1243
1231#define A_ULPRX_TDDP_ULIMIT 0x520 1244#define A_ULPRX_TDDP_ULIMIT 0x520
1245#define A_ULPRX_TDDP_PSZ 0x528
1232 1246
1233#define A_ULPRX_STAG_LLIMIT 0x52c 1247#define A_ULPRX_STAG_LLIMIT 0x52c
1234 1248
@@ -1834,6 +1848,8 @@
1834#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN) 1848#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
1835#define F_TXPAUSEEN V_TXPAUSEEN(1U) 1849#define F_TXPAUSEEN V_TXPAUSEEN(1U)
1836 1850
1851#define A_XGM_TX_PAUSE_QUANTA 0x808
1852
1837#define A_XGM_RX_CTRL 0x80c 1853#define A_XGM_RX_CTRL 0x80c
1838 1854
1839#define S_RXEN 0 1855#define S_RXEN 0
@@ -1920,11 +1936,20 @@
1920 1936
1921#define A_XGM_TXFIFO_CFG 0x888 1937#define A_XGM_TXFIFO_CFG 0x888
1922 1938
1939#define S_TXIPG 13
1940#define M_TXIPG 0xff
1941#define V_TXIPG(x) ((x) << S_TXIPG)
1942#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
1943
1923#define S_TXFIFOTHRESH 4 1944#define S_TXFIFOTHRESH 4
1924#define M_TXFIFOTHRESH 0x1ff 1945#define M_TXFIFOTHRESH 0x1ff
1925 1946
1926#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH) 1947#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
1927 1948
1949#define S_ENDROPPKT 21
1950#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
1951#define F_ENDROPPKT V_ENDROPPKT(1U)
1952
1928#define A_XGM_SERDES_CTRL 0x890 1953#define A_XGM_SERDES_CTRL 0x890
1929#define A_XGM_SERDES_CTRL0 0x8e0 1954#define A_XGM_SERDES_CTRL0 0x8e0
1930 1955
@@ -2190,6 +2215,13 @@
2190 2215
2191#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4 2216#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2192 2217
2218#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
2219
2220#define S_TXSPI4SOPCNT 16
2221#define M_TXSPI4SOPCNT 0xffff
2222#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
2223#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
2224
2193#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac 2225#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2194 2226
2195#define XGMAC0_1_BASE_ADDR 0xa00 2227#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index c23783432e5..027ab2c3825 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2631,7 +2631,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2631 q->txq[TXQ_ETH].stop_thres = nports * 2631 q->txq[TXQ_ETH].stop_thres = nports *
2632 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2632 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2633 2633
2634 if (ntxq == 1) { 2634 if (!is_offload(adapter)) {
2635#ifdef USE_RX_PAGE 2635#ifdef USE_RX_PAGE
2636 q->fl[0].buf_size = RX_PAGE_SIZE; 2636 q->fl[0].buf_size = RX_PAGE_SIZE;
2637#else 2637#else
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index eaa7a2e89a3..fb485d0a43d 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -438,23 +438,23 @@ static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0, 438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN | 439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5, 440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD, 441 0,
442 &mi1_mdio_ops, "Chelsio PE9000"}, 442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0, 443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN | 444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5, 445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD, 446 0,
447 &mi1_mdio_ops, "Chelsio T302"}, 447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0, 448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | 449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0, 450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD, 451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
452 &mi1_mdio_ext_ops, "Chelsio T310"}, 452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0, 453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | 454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | 455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0, 456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD, 457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
458 &mi1_mdio_ext_ops, "Chelsio T320"}, 458 &mi1_mdio_ext_ops, "Chelsio T320"},
459}; 459};
460 460
@@ -681,7 +681,8 @@ enum {
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682 682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ 683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */ 684 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
685 FW_MIN_SIZE = 8 /* at least version and csum */
685}; 686};
686 687
687/** 688/**
@@ -935,7 +936,7 @@ int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
935 const u32 *p = (const u32 *)fw_data; 936 const u32 *p = (const u32 *)fw_data;
936 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; 937 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
937 938
938 if (size & 3) 939 if ((size & 3) || size < FW_MIN_SIZE)
939 return -EINVAL; 940 return -EINVAL;
940 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR) 941 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
941 return -EFBIG; 942 return -EFBIG;
@@ -1522,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1522 */ 1523 */
1523int t3_phy_intr_handler(struct adapter *adapter) 1524int t3_phy_intr_handler(struct adapter *adapter)
1524{ 1525{
1525 static const int intr_gpio_bits[] = { 8, 0x20 }; 1526 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1526
1527 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); 1527 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1528 1528
1529 for_each_port(adapter, i) { 1529 for_each_port(adapter, i) {
1530 if (cause & intr_gpio_bits[i]) { 1530 struct port_info *p = adap2pinfo(adapter, i);
1531 struct cphy *phy = &adap2pinfo(adapter, i)->phy; 1531
1532 int phy_cause = phy->ops->intr_handler(phy); 1532 mask = gpi - (gpi & (gpi - 1));
1533 gpi -= mask;
1534
1535 if (!(p->port_type->caps & SUPPORTED_IRQ))
1536 continue;
1537
1538 if (cause & mask) {
1539 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1533 1540
1534 if (phy_cause & cphy_cause_link_change) 1541 if (phy_cause & cphy_cause_link_change)
1535 t3_link_changed(adapter, i); 1542 t3_link_changed(adapter, i);
1536 if (phy_cause & cphy_cause_fifo_error) 1543 if (phy_cause & cphy_cause_fifo_error)
1537 phy->fifo_errors++; 1544 p->phy.fifo_errors++;
1538 } 1545 }
1539 } 1546 }
1540 1547
@@ -2899,6 +2906,9 @@ static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2899 struct adapter *adapter = mc7->adapter; 2906 struct adapter *adapter = mc7->adapter;
2900 const struct mc7_timing_params *p = &mc7_timings[mem_type]; 2907 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2901 2908
2909 if (!mc7->size)
2910 return 0;
2911
2902 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); 2912 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2903 slow = val & F_SLOW; 2913 slow = val & F_SLOW;
2904 width = G_WIDTH(val); 2914 width = G_WIDTH(val);
@@ -3099,8 +3109,10 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3099 do { /* wait for uP to initialize */ 3109 do { /* wait for uP to initialize */
3100 msleep(20); 3110 msleep(20);
3101 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); 3111 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3102 if (!attempts) 3112 if (!attempts) {
3113 CH_ERR(adapter, "uP initialization timed out\n");
3103 goto out_err; 3114 goto out_err;
3115 }
3104 3116
3105 err = 0; 3117 err = 0;
3106out_err: 3118out_err:
@@ -3200,7 +3212,7 @@ static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3200 mc7->name = name; 3212 mc7->name = name;
3201 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; 3213 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3202 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); 3214 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3203 mc7->size = mc7_calc_size(cfg); 3215 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3204 mc7->width = G_WIDTH(cfg); 3216 mc7->width = G_WIDTH(cfg);
3205} 3217}
3206 3218
@@ -3227,6 +3239,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3227 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1)); 3239 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3228 t3_write_reg(adapter, A_T3DBG_GPIO_EN, 3240 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3229 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); 3241 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3242 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3230 3243
3231 if (adapter->params.rev == 0 || !uses_xaui(adapter)) 3244 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3232 val |= F_ENRGMII; 3245 val |= F_ENRGMII;
@@ -3243,15 +3256,17 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3243} 3256}
3244 3257
3245/* 3258/*
3246 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X 3259 * Reset the adapter.
3260 * Older PCIe cards lose their config space during reset, PCI-X
3247 * ones don't. 3261 * ones don't.
3248 */ 3262 */
3249int t3_reset_adapter(struct adapter *adapter) 3263int t3_reset_adapter(struct adapter *adapter)
3250{ 3264{
3251 int i; 3265 int i, save_and_restore_pcie =
3266 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3252 uint16_t devid = 0; 3267 uint16_t devid = 0;
3253 3268
3254 if (is_pcie(adapter)) 3269 if (save_and_restore_pcie)
3255 pci_save_state(adapter->pdev); 3270 pci_save_state(adapter->pdev);
3256 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); 3271 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3257 3272
@@ -3269,7 +3284,7 @@ int t3_reset_adapter(struct adapter *adapter)
3269 if (devid != 0x1425) 3284 if (devid != 0x1425)
3270 return -1; 3285 return -1;
3271 3286
3272 if (is_pcie(adapter)) 3287 if (save_and_restore_pcie)
3273 pci_restore_state(adapter->pdev); 3288 pci_restore_state(adapter->pdev);
3274 return 0; 3289 return 0;
3275} 3290}
@@ -3323,7 +3338,13 @@ int __devinit t3_prep_adapter(struct adapter *adapter,
3323 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size); 3338 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3324 p->ntimer_qs = p->cm_size >= (128 << 20) || 3339 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3325 adapter->params.rev > 0 ? 12 : 6; 3340 adapter->params.rev > 0 ? 12 : 6;
3341 }
3342
3343 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3344 t3_mc7_size(&adapter->pmtx) &&
3345 t3_mc7_size(&adapter->cm);
3326 3346
3347 if (is_offload(adapter)) {
3327 adapter->params.mc5.nservers = DEFAULT_NSERVERS; 3348 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3328 adapter->params.mc5.nfilters = adapter->params.rev > 0 ? 3349 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3329 DEFAULT_NFILTERS : 0; 3350 DEFAULT_NFILTERS : 0;
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 82278f85025..042e27e291c 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -36,6 +36,9 @@
36#define DRV_NAME "cxgb3" 36#define DRV_NAME "cxgb3"
37/* Driver version */ 37/* Driver version */
38#define DRV_VERSION "1.0-ko" 38#define DRV_VERSION "1.0-ko"
39
40/* Firmware version */
39#define FW_VERSION_MAJOR 3 41#define FW_VERSION_MAJOR 3
40#define FW_VERSION_MINOR 2 42#define FW_VERSION_MINOR 3
43#define FW_VERSION_MICRO 0
41#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 907a272ae32..a506792f957 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -124,9 +124,6 @@ int t3_mac_reset(struct cmac *mac)
124 xaui_serdes_reset(mac); 124 xaui_serdes_reset(mac);
125 } 125 }
126 126
127 if (adap->params.rev > 0)
128 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
129
130 val = F_MAC_RESET_; 127 val = F_MAC_RESET_;
131 if (is_10G(adap)) 128 if (is_10G(adap))
132 val |= F_PCS_RESET_; 129 val |= F_PCS_RESET_;
@@ -145,6 +142,58 @@ int t3_mac_reset(struct cmac *mac)
145 return 0; 142 return 0;
146} 143}
147 144
145int t3b2_mac_reset(struct cmac *mac)
146{
147 struct adapter *adap = mac->adapter;
148 unsigned int oft = mac->offset;
149 u32 val;
150
151 if (!macidx(mac))
152 t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
153 else
154 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
155
156 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
157 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
158
159 msleep(10);
160
161 /* Check for xgm Rx fifo empty */
162 if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
163 0x80000000, 1, 5, 2)) {
164 CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
165 macidx(mac));
166 return -1;
167 }
168
169 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
170 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
171
172 val = F_MAC_RESET_;
173 if (is_10G(adap))
174 val |= F_PCS_RESET_;
175 else if (uses_xaui(adap))
176 val |= F_PCS_RESET_ | F_XG2G_RESET_;
177 else
178 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
179 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
180 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
181 if ((val & F_PCS_RESET_) && adap->params.rev) {
182 msleep(1);
183 t3b_pcs_reset(mac);
184 }
185 t3_write_reg(adap, A_XGM_RX_CFG + oft,
186 F_DISPAUSEFRAMES | F_EN1536BFRAMES |
187 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
188
189 if (!macidx(mac))
190 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
191 else
192 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
193
194 return 0;
195}
196
148/* 197/*
149 * Set the exact match register 'idx' to recognize the given Ethernet address. 198 * Set the exact match register 'idx' to recognize the given Ethernet address.
150 */ 199 */
@@ -251,9 +300,11 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
251 * Adjust the PAUSE frame watermarks. We always set the LWM, and the 300 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
252 * HWM only if flow-control is enabled. 301 * HWM only if flow-control is enabled.
253 */ 302 */
254 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U); 303 hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
255 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024); 304 MAC_RXFIFO_SIZE * 38 / 100);
256 lwm = hwm - 1024; 305 hwm = min(hwm, MAC_RXFIFO_SIZE - 8192);
306 lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
307
257 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); 308 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
258 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); 309 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
259 v |= V_RXFIFOPAUSELWM(lwm / 8); 310 v |= V_RXFIFOPAUSELWM(lwm / 8);
@@ -270,7 +321,15 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
270 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; 321 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
271 thres = max(thres, 8U); /* need at least 8 */ 322 thres = max(thres, 8U); /* need at least 8 */
272 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, 323 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
273 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres)); 324 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
325 V_TXFIFOTHRESH(thres) | V_TXIPG(1));
326
327 if (adap->params.rev > 0)
328 t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
329 (hwm - lwm) * 4 / 8);
330 t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
331 MAC_RXFIFO_SIZE * 4 * 8 / 512);
332
274 return 0; 333 return 0;
275} 334}
276 335
@@ -298,12 +357,6 @@ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
298 V_PORTSPEED(M_PORTSPEED), val); 357 V_PORTSPEED(M_PORTSPEED), val);
299 } 358 }
300 359
301 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
302 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
303 if (fc & PAUSE_TX)
304 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
305 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
306
307 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 360 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
308 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0); 361 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
309 return 0; 362 return 0;
@@ -314,13 +367,28 @@ int t3_mac_enable(struct cmac *mac, int which)
314 int idx = macidx(mac); 367 int idx = macidx(mac);
315 struct adapter *adap = mac->adapter; 368 struct adapter *adap = mac->adapter;
316 unsigned int oft = mac->offset; 369 unsigned int oft = mac->offset;
317 370 struct mac_stats *s = &mac->stats;
371
318 if (which & MAC_DIRECTION_TX) { 372 if (which & MAC_DIRECTION_TX) {
319 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); 373 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
320 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); 374 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
321 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001); 375 t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
322 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); 376 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
323 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 377 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
378
379 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
380 mac->tx_mcnt = s->tx_frames;
381 mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
382 A_TP_PIO_DATA)));
383 mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
384 A_XGM_TX_SPI4_SOP_EOP_CNT +
385 oft)));
386 mac->rx_mcnt = s->rx_frames;
387 mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
388 A_XGM_RX_SPI4_SOP_EOP_CNT +
389 oft)));
390 mac->txen = F_TXEN;
391 mac->toggle_cnt = 0;
324 } 392 }
325 if (which & MAC_DIRECTION_RX) 393 if (which & MAC_DIRECTION_RX)
326 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN); 394 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
@@ -331,19 +399,102 @@ int t3_mac_disable(struct cmac *mac, int which)
331{ 399{
332 int idx = macidx(mac); 400 int idx = macidx(mac);
333 struct adapter *adap = mac->adapter; 401 struct adapter *adap = mac->adapter;
402 int val;
334 403
335 if (which & MAC_DIRECTION_TX) { 404 if (which & MAC_DIRECTION_TX) {
336 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); 405 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
337 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); 406 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
338 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f); 407 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
339 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); 408 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
340 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0); 409 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
410 mac->txen = 0;
341 } 411 }
342 if (which & MAC_DIRECTION_RX) 412 if (which & MAC_DIRECTION_RX) {
413 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
414 F_PCS_RESET_, 0);
415 msleep(100);
343 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); 416 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
417 val = F_MAC_RESET_;
418 if (is_10G(adap))
419 val |= F_PCS_RESET_;
420 else if (uses_xaui(adap))
421 val |= F_PCS_RESET_ | F_XG2G_RESET_;
422 else
423 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
424 t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
425 }
344 return 0; 426 return 0;
345} 427}
346 428
429int t3b2_mac_watchdog_task(struct cmac *mac)
430{
431 struct adapter *adap = mac->adapter;
432 struct mac_stats *s = &mac->stats;
433 unsigned int tx_tcnt, tx_xcnt;
434 unsigned int tx_mcnt = s->tx_frames;
435 unsigned int rx_mcnt = s->rx_frames;
436 unsigned int rx_xcnt;
437 int status;
438
439 if (tx_mcnt == mac->tx_mcnt) {
440 tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
441 A_XGM_TX_SPI4_SOP_EOP_CNT +
442 mac->offset)));
443 if (tx_xcnt == 0) {
444 t3_write_reg(adap, A_TP_PIO_ADDR,
445 A_TP_TX_DROP_CNT_CH0 + macidx(mac));
446 tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
447 A_TP_PIO_DATA)));
448 } else {
449 mac->toggle_cnt = 0;
450 return 0;
451 }
452 } else {
453 mac->toggle_cnt = 0;
454 return 0;
455 }
456
457 if (((tx_tcnt != mac->tx_tcnt) &&
458 (tx_xcnt == 0) && (mac->tx_xcnt == 0)) ||
459 ((mac->tx_mcnt == tx_mcnt) &&
460 (tx_xcnt != 0) && (mac->tx_xcnt != 0))) {
461 if (mac->toggle_cnt > 4)
462 status = 2;
463 else
464 status = 1;
465 } else {
466 mac->toggle_cnt = 0;
467 return 0;
468 }
469
470 if (rx_mcnt != mac->rx_mcnt)
471 rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
472 A_XGM_RX_SPI4_SOP_EOP_CNT +
473 mac->offset)));
474 else
475 return 0;
476
477 if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0)
478 status = 2;
479
480 mac->tx_tcnt = tx_tcnt;
481 mac->tx_xcnt = tx_xcnt;
482 mac->tx_mcnt = s->tx_frames;
483 mac->rx_xcnt = rx_xcnt;
484 mac->rx_mcnt = s->rx_frames;
485 if (status == 1) {
486 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
487 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
488 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
489 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
490 mac->toggle_cnt++;
491 } else if (status == 2) {
492 t3b2_mac_reset(mac);
493 mac->toggle_cnt = 0;
494 }
495 return status;
496}
497
347/* 498/*
348 * This function is called periodically to accumulate the current values of the 499 * This function is called periodically to accumulate the current values of the
349 * RMON counters into the port statistics. Since the packet counters are only 500 * RMON counters into the port statistics. Since the packet counters are only
@@ -373,7 +524,11 @@ const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
373 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES); 524 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
374 525
375 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES); 526 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
376 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT); 527
528 v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
529 if (mac->adapter->params.rev == T3_REV_B2)
530 v &= 0x7fffffff;
531 mac->stats.rx_too_long += v;
377 532
378 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES); 533 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
379 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES); 534 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);