diff options
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r-- | drivers/net/chelsio/common.h | 2 | ||||
-rw-r--r-- | drivers/net/chelsio/cpl5_cmd.h | 18 | ||||
-rw-r--r-- | drivers/net/chelsio/cxgb2.c | 149 | ||||
-rw-r--r-- | drivers/net/chelsio/elmer0.h | 40 | ||||
-rw-r--r-- | drivers/net/chelsio/espi.c | 44 | ||||
-rw-r--r-- | drivers/net/chelsio/fpga_defs.h | 6 | ||||
-rw-r--r-- | drivers/net/chelsio/gmac.h | 11 | ||||
-rw-r--r-- | drivers/net/chelsio/ixf1010.c | 100 | ||||
-rw-r--r-- | drivers/net/chelsio/mv88e1xxx.c | 27 | ||||
-rw-r--r-- | drivers/net/chelsio/my3126.c | 16 | ||||
-rw-r--r-- | drivers/net/chelsio/pm3393.c | 91 | ||||
-rw-r--r-- | drivers/net/chelsio/sge.c | 328 | ||||
-rw-r--r-- | drivers/net/chelsio/subr.c | 89 | ||||
-rw-r--r-- | drivers/net/chelsio/tp.c | 62 | ||||
-rw-r--r-- | drivers/net/chelsio/vsc7326.c | 139 | ||||
-rw-r--r-- | drivers/net/chelsio/vsc7326_reg.h | 139 | ||||
-rw-r--r-- | drivers/net/chelsio/vsc8244.c | 41 |
17 files changed, 653 insertions, 649 deletions
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 74758d2c7af8..787f2f2820fe 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h | |||
@@ -324,7 +324,7 @@ struct board_info { | |||
324 | unsigned char mdio_phybaseaddr; | 324 | unsigned char mdio_phybaseaddr; |
325 | struct gmac *gmac; | 325 | struct gmac *gmac; |
326 | struct gphy *gphy; | 326 | struct gphy *gphy; |
327 | struct mdio_ops *mdio_ops; | 327 | struct mdio_ops *mdio_ops; |
328 | const char *desc; | 328 | const char *desc; |
329 | }; | 329 | }; |
330 | 330 | ||
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h index 35f565be4fd3..e36d45b78cc7 100644 --- a/drivers/net/chelsio/cpl5_cmd.h +++ b/drivers/net/chelsio/cpl5_cmd.h | |||
@@ -103,7 +103,7 @@ enum CPL_opcode { | |||
103 | CPL_MIGRATE_C2T_RPL = 0xDD, | 103 | CPL_MIGRATE_C2T_RPL = 0xDD, |
104 | CPL_ERROR = 0xD7, | 104 | CPL_ERROR = 0xD7, |
105 | 105 | ||
106 | /* internal: driver -> TOM */ | 106 | /* internal: driver -> TOM */ |
107 | CPL_MSS_CHANGE = 0xE1 | 107 | CPL_MSS_CHANGE = 0xE1 |
108 | }; | 108 | }; |
109 | 109 | ||
@@ -159,8 +159,8 @@ enum { // TX_PKT_LSO ethernet types | |||
159 | }; | 159 | }; |
160 | 160 | ||
161 | union opcode_tid { | 161 | union opcode_tid { |
162 | u32 opcode_tid; | 162 | u32 opcode_tid; |
163 | u8 opcode; | 163 | u8 opcode; |
164 | }; | 164 | }; |
165 | 165 | ||
166 | #define S_OPCODE 24 | 166 | #define S_OPCODE 24 |
@@ -234,7 +234,7 @@ struct cpl_pass_accept_req { | |||
234 | u32 local_ip; | 234 | u32 local_ip; |
235 | u32 peer_ip; | 235 | u32 peer_ip; |
236 | u32 tos_tid; | 236 | u32 tos_tid; |
237 | struct tcp_options tcp_options; | 237 | struct tcp_options tcp_options; |
238 | u8 dst_mac[6]; | 238 | u8 dst_mac[6]; |
239 | u16 vlan_tag; | 239 | u16 vlan_tag; |
240 | u8 src_mac[6]; | 240 | u8 src_mac[6]; |
@@ -250,12 +250,12 @@ struct cpl_pass_accept_rpl { | |||
250 | u32 peer_ip; | 250 | u32 peer_ip; |
251 | u32 opt0h; | 251 | u32 opt0h; |
252 | union { | 252 | union { |
253 | u32 opt0l; | 253 | u32 opt0l; |
254 | struct { | 254 | struct { |
255 | u8 rsvd[3]; | 255 | u8 rsvd[3]; |
256 | u8 status; | 256 | u8 status; |
257 | }; | ||
257 | }; | 258 | }; |
258 | }; | ||
259 | }; | 259 | }; |
260 | 260 | ||
261 | struct cpl_act_open_req { | 261 | struct cpl_act_open_req { |
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index fd5d821f3f2a..7d0f24f69777 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -69,14 +69,14 @@ static inline void cancel_mac_stats_update(struct adapter *ap) | |||
69 | cancel_delayed_work(&ap->stats_update_task); | 69 | cancel_delayed_work(&ap->stats_update_task); |
70 | } | 70 | } |
71 | 71 | ||
72 | #define MAX_CMDQ_ENTRIES 16384 | 72 | #define MAX_CMDQ_ENTRIES 16384 |
73 | #define MAX_CMDQ1_ENTRIES 1024 | 73 | #define MAX_CMDQ1_ENTRIES 1024 |
74 | #define MAX_RX_BUFFERS 16384 | 74 | #define MAX_RX_BUFFERS 16384 |
75 | #define MAX_RX_JUMBO_BUFFERS 16384 | 75 | #define MAX_RX_JUMBO_BUFFERS 16384 |
76 | #define MAX_TX_BUFFERS_HIGH 16384U | 76 | #define MAX_TX_BUFFERS_HIGH 16384U |
77 | #define MAX_TX_BUFFERS_LOW 1536U | 77 | #define MAX_TX_BUFFERS_LOW 1536U |
78 | #define MAX_TX_BUFFERS 1460U | 78 | #define MAX_TX_BUFFERS 1460U |
79 | #define MIN_FL_ENTRIES 32 | 79 | #define MIN_FL_ENTRIES 32 |
80 | 80 | ||
81 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ | 81 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
82 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | 82 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ |
@@ -143,7 +143,7 @@ static void link_report(struct port_info *p) | |||
143 | case SPEED_100: s = "100Mbps"; break; | 143 | case SPEED_100: s = "100Mbps"; break; |
144 | } | 144 | } |
145 | 145 | ||
146 | printk(KERN_INFO "%s: link up, %s, %s-duplex\n", | 146 | printk(KERN_INFO "%s: link up, %s, %s-duplex\n", |
147 | p->dev->name, s, | 147 | p->dev->name, s, |
148 | p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); | 148 | p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); |
149 | } | 149 | } |
@@ -233,7 +233,7 @@ static int cxgb_up(struct adapter *adapter) | |||
233 | 233 | ||
234 | t1_sge_start(adapter->sge); | 234 | t1_sge_start(adapter->sge); |
235 | t1_interrupts_enable(adapter); | 235 | t1_interrupts_enable(adapter); |
236 | out_err: | 236 | out_err: |
237 | return err; | 237 | return err; |
238 | } | 238 | } |
239 | 239 | ||
@@ -454,51 +454,21 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |||
454 | const struct cmac_statistics *s; | 454 | const struct cmac_statistics *s; |
455 | const struct sge_intr_counts *t; | 455 | const struct sge_intr_counts *t; |
456 | struct sge_port_stats ss; | 456 | struct sge_port_stats ss; |
457 | unsigned int len; | ||
457 | 458 | ||
458 | s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); | 459 | s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); |
459 | 460 | ||
460 | *data++ = s->TxOctetsOK; | 461 | len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK); |
461 | *data++ = s->TxOctetsBad; | 462 | memcpy(data, &s->TxOctetsOK, len); |
462 | *data++ = s->TxUnicastFramesOK; | 463 | data += len; |
463 | *data++ = s->TxMulticastFramesOK; | 464 | |
464 | *data++ = s->TxBroadcastFramesOK; | 465 | len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK); |
465 | *data++ = s->TxPauseFrames; | 466 | memcpy(data, &s->RxOctetsOK, len); |
466 | *data++ = s->TxFramesWithDeferredXmissions; | 467 | data += len; |
467 | *data++ = s->TxLateCollisions; | ||
468 | *data++ = s->TxTotalCollisions; | ||
469 | *data++ = s->TxFramesAbortedDueToXSCollisions; | ||
470 | *data++ = s->TxUnderrun; | ||
471 | *data++ = s->TxLengthErrors; | ||
472 | *data++ = s->TxInternalMACXmitError; | ||
473 | *data++ = s->TxFramesWithExcessiveDeferral; | ||
474 | *data++ = s->TxFCSErrors; | ||
475 | |||
476 | *data++ = s->RxOctetsOK; | ||
477 | *data++ = s->RxOctetsBad; | ||
478 | *data++ = s->RxUnicastFramesOK; | ||
479 | *data++ = s->RxMulticastFramesOK; | ||
480 | *data++ = s->RxBroadcastFramesOK; | ||
481 | *data++ = s->RxPauseFrames; | ||
482 | *data++ = s->RxFCSErrors; | ||
483 | *data++ = s->RxAlignErrors; | ||
484 | *data++ = s->RxSymbolErrors; | ||
485 | *data++ = s->RxDataErrors; | ||
486 | *data++ = s->RxSequenceErrors; | ||
487 | *data++ = s->RxRuntErrors; | ||
488 | *data++ = s->RxJabberErrors; | ||
489 | *data++ = s->RxInternalMACRcvError; | ||
490 | *data++ = s->RxInRangeLengthErrors; | ||
491 | *data++ = s->RxOutOfRangeLengthField; | ||
492 | *data++ = s->RxFrameTooLongErrors; | ||
493 | 468 | ||
494 | t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); | 469 | t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); |
495 | *data++ = ss.rx_packets; | 470 | memcpy(data, &ss, sizeof(ss)); |
496 | *data++ = ss.rx_cso_good; | 471 | data += sizeof(ss); |
497 | *data++ = ss.tx_packets; | ||
498 | *data++ = ss.tx_cso; | ||
499 | *data++ = ss.tx_tso; | ||
500 | *data++ = ss.vlan_xtract; | ||
501 | *data++ = ss.vlan_insert; | ||
502 | 472 | ||
503 | t = t1_sge_get_intr_counts(adapter->sge); | 473 | t = t1_sge_get_intr_counts(adapter->sge); |
504 | *data++ = t->rx_drops; | 474 | *data++ = t->rx_drops; |
@@ -749,7 +719,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | |||
749 | return -EINVAL; | 719 | return -EINVAL; |
750 | 720 | ||
751 | if (adapter->flags & FULL_INIT_DONE) | 721 | if (adapter->flags & FULL_INIT_DONE) |
752 | return -EBUSY; | 722 | return -EBUSY; |
753 | 723 | ||
754 | adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; | 724 | adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; |
755 | adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; | 725 | adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; |
@@ -764,7 +734,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
764 | struct adapter *adapter = dev->priv; | 734 | struct adapter *adapter = dev->priv; |
765 | 735 | ||
766 | adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; | 736 | adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; |
767 | adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; | 737 | adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; |
768 | adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; | 738 | adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; |
769 | t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); | 739 | t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); |
770 | return 0; | 740 | return 0; |
@@ -782,9 +752,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
782 | 752 | ||
783 | static int get_eeprom_len(struct net_device *dev) | 753 | static int get_eeprom_len(struct net_device *dev) |
784 | { | 754 | { |
785 | struct adapter *adapter = dev->priv; | 755 | struct adapter *adapter = dev->priv; |
786 | 756 | ||
787 | return t1_is_asic(adapter) ? EEPROM_SIZE : 0; | 757 | return t1_is_asic(adapter) ? EEPROM_SIZE : 0; |
788 | } | 758 | } |
789 | 759 | ||
790 | #define EEPROM_MAGIC(ap) \ | 760 | #define EEPROM_MAGIC(ap) \ |
@@ -848,7 +818,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | |||
848 | u32 val; | 818 | u32 val; |
849 | 819 | ||
850 | if (!phy->mdio_read) | 820 | if (!phy->mdio_read) |
851 | return -EOPNOTSUPP; | 821 | return -EOPNOTSUPP; |
852 | phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, | 822 | phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, |
853 | &val); | 823 | &val); |
854 | data->val_out = val; | 824 | data->val_out = val; |
@@ -860,7 +830,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | |||
860 | if (!capable(CAP_NET_ADMIN)) | 830 | if (!capable(CAP_NET_ADMIN)) |
861 | return -EPERM; | 831 | return -EPERM; |
862 | if (!phy->mdio_write) | 832 | if (!phy->mdio_write) |
863 | return -EOPNOTSUPP; | 833 | return -EOPNOTSUPP; |
864 | phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, | 834 | phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, |
865 | data->val_in); | 835 | data->val_in); |
866 | break; | 836 | break; |
@@ -879,9 +849,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu) | |||
879 | struct cmac *mac = adapter->port[dev->if_port].mac; | 849 | struct cmac *mac = adapter->port[dev->if_port].mac; |
880 | 850 | ||
881 | if (!mac->ops->set_mtu) | 851 | if (!mac->ops->set_mtu) |
882 | return -EOPNOTSUPP; | 852 | return -EOPNOTSUPP; |
883 | if (new_mtu < 68) | 853 | if (new_mtu < 68) |
884 | return -EINVAL; | 854 | return -EINVAL; |
885 | if ((ret = mac->ops->set_mtu(mac, new_mtu))) | 855 | if ((ret = mac->ops->set_mtu(mac, new_mtu))) |
886 | return ret; | 856 | return ret; |
887 | dev->mtu = new_mtu; | 857 | dev->mtu = new_mtu; |
@@ -1211,9 +1181,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1211 | 1181 | ||
1212 | return 0; | 1182 | return 0; |
1213 | 1183 | ||
1214 | out_release_adapter_res: | 1184 | out_release_adapter_res: |
1215 | t1_free_sw_modules(adapter); | 1185 | t1_free_sw_modules(adapter); |
1216 | out_free_dev: | 1186 | out_free_dev: |
1217 | if (adapter) { | 1187 | if (adapter) { |
1218 | if (adapter->regs) | 1188 | if (adapter->regs) |
1219 | iounmap(adapter->regs); | 1189 | iounmap(adapter->regs); |
@@ -1222,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1222 | free_netdev(adapter->port[i].dev); | 1192 | free_netdev(adapter->port[i].dev); |
1223 | } | 1193 | } |
1224 | pci_release_regions(pdev); | 1194 | pci_release_regions(pdev); |
1225 | out_disable_pdev: | 1195 | out_disable_pdev: |
1226 | pci_disable_device(pdev); | 1196 | pci_disable_device(pdev); |
1227 | pci_set_drvdata(pdev, NULL); | 1197 | pci_set_drvdata(pdev, NULL); |
1228 | return err; | 1198 | return err; |
@@ -1273,28 +1243,27 @@ static int t1_clock(struct adapter *adapter, int mode) | |||
1273 | int M_MEM_VAL; | 1243 | int M_MEM_VAL; |
1274 | 1244 | ||
1275 | enum { | 1245 | enum { |
1276 | M_CORE_BITS = 9, | 1246 | M_CORE_BITS = 9, |
1277 | T_CORE_VAL = 0, | 1247 | T_CORE_VAL = 0, |
1278 | T_CORE_BITS = 2, | 1248 | T_CORE_BITS = 2, |
1279 | N_CORE_VAL = 0, | 1249 | N_CORE_VAL = 0, |
1280 | N_CORE_BITS = 2, | 1250 | N_CORE_BITS = 2, |
1281 | M_MEM_BITS = 9, | 1251 | M_MEM_BITS = 9, |
1282 | T_MEM_VAL = 0, | 1252 | T_MEM_VAL = 0, |
1283 | T_MEM_BITS = 2, | 1253 | T_MEM_BITS = 2, |
1284 | N_MEM_VAL = 0, | 1254 | N_MEM_VAL = 0, |
1285 | N_MEM_BITS = 2, | 1255 | N_MEM_BITS = 2, |
1286 | NP_LOAD = 1 << 17, | 1256 | NP_LOAD = 1 << 17, |
1287 | S_LOAD_MEM = 1 << 5, | 1257 | S_LOAD_MEM = 1 << 5, |
1288 | S_LOAD_CORE = 1 << 6, | 1258 | S_LOAD_CORE = 1 << 6, |
1289 | S_CLOCK = 1 << 3 | 1259 | S_CLOCK = 1 << 3 |
1290 | }; | 1260 | }; |
1291 | 1261 | ||
1292 | if (!t1_is_T1B(adapter)) | 1262 | if (!t1_is_T1B(adapter)) |
1293 | return -ENODEV; /* Can't re-clock this chip. */ | 1263 | return -ENODEV; /* Can't re-clock this chip. */ |
1294 | 1264 | ||
1295 | if (mode & 2) { | 1265 | if (mode & 2) |
1296 | return 0; /* show current mode. */ | 1266 | return 0; /* show current mode. */ |
1297 | } | ||
1298 | 1267 | ||
1299 | if ((adapter->t1powersave & 1) == (mode & 1)) | 1268 | if ((adapter->t1powersave & 1) == (mode & 1)) |
1300 | return -EALREADY; /* ASIC already running in mode. */ | 1269 | return -EALREADY; /* ASIC already running in mode. */ |
@@ -1386,26 +1355,26 @@ static inline void t1_sw_reset(struct pci_dev *pdev) | |||
1386 | static void __devexit remove_one(struct pci_dev *pdev) | 1355 | static void __devexit remove_one(struct pci_dev *pdev) |
1387 | { | 1356 | { |
1388 | struct net_device *dev = pci_get_drvdata(pdev); | 1357 | struct net_device *dev = pci_get_drvdata(pdev); |
1358 | struct adapter *adapter = dev->priv; | ||
1359 | int i; | ||
1389 | 1360 | ||
1390 | if (dev) { | 1361 | for_each_port(adapter, i) { |
1391 | int i; | 1362 | if (test_bit(i, &adapter->registered_device_map)) |
1392 | struct adapter *adapter = dev->priv; | 1363 | unregister_netdev(adapter->port[i].dev); |
1393 | 1364 | } | |
1394 | for_each_port(adapter, i) | ||
1395 | if (test_bit(i, &adapter->registered_device_map)) | ||
1396 | unregister_netdev(adapter->port[i].dev); | ||
1397 | 1365 | ||
1398 | t1_free_sw_modules(adapter); | 1366 | t1_free_sw_modules(adapter); |
1399 | iounmap(adapter->regs); | 1367 | iounmap(adapter->regs); |
1400 | while (--i >= 0) | ||
1401 | if (adapter->port[i].dev) | ||
1402 | free_netdev(adapter->port[i].dev); | ||
1403 | 1368 | ||
1404 | pci_release_regions(pdev); | 1369 | while (--i >= 0) { |
1405 | pci_disable_device(pdev); | 1370 | if (adapter->port[i].dev) |
1406 | pci_set_drvdata(pdev, NULL); | 1371 | free_netdev(adapter->port[i].dev); |
1407 | t1_sw_reset(pdev); | ||
1408 | } | 1372 | } |
1373 | |||
1374 | pci_release_regions(pdev); | ||
1375 | pci_disable_device(pdev); | ||
1376 | pci_set_drvdata(pdev, NULL); | ||
1377 | t1_sw_reset(pdev); | ||
1409 | } | 1378 | } |
1410 | 1379 | ||
1411 | static struct pci_driver driver = { | 1380 | static struct pci_driver driver = { |
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h index 9ebecaa97d31..eef655c827d9 100644 --- a/drivers/net/chelsio/elmer0.h +++ b/drivers/net/chelsio/elmer0.h | |||
@@ -46,14 +46,14 @@ enum { | |||
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* ELMER0 registers */ | 48 | /* ELMER0 registers */ |
49 | #define A_ELMER0_VERSION 0x100000 | 49 | #define A_ELMER0_VERSION 0x100000 |
50 | #define A_ELMER0_PHY_CFG 0x100004 | 50 | #define A_ELMER0_PHY_CFG 0x100004 |
51 | #define A_ELMER0_INT_ENABLE 0x100008 | 51 | #define A_ELMER0_INT_ENABLE 0x100008 |
52 | #define A_ELMER0_INT_CAUSE 0x10000c | 52 | #define A_ELMER0_INT_CAUSE 0x10000c |
53 | #define A_ELMER0_GPI_CFG 0x100010 | 53 | #define A_ELMER0_GPI_CFG 0x100010 |
54 | #define A_ELMER0_GPI_STAT 0x100014 | 54 | #define A_ELMER0_GPI_STAT 0x100014 |
55 | #define A_ELMER0_GPO 0x100018 | 55 | #define A_ELMER0_GPO 0x100018 |
56 | #define A_ELMER0_PORT0_MI1_CFG 0x400000 | 56 | #define A_ELMER0_PORT0_MI1_CFG 0x400000 |
57 | 57 | ||
58 | #define S_MI1_MDI_ENABLE 0 | 58 | #define S_MI1_MDI_ENABLE 0 |
59 | #define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) | 59 | #define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) |
@@ -111,18 +111,18 @@ enum { | |||
111 | #define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) | 111 | #define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) |
112 | #define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) | 112 | #define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) |
113 | 113 | ||
114 | #define A_ELMER0_PORT1_MI1_CFG 0x500000 | 114 | #define A_ELMER0_PORT1_MI1_CFG 0x500000 |
115 | #define A_ELMER0_PORT1_MI1_ADDR 0x500004 | 115 | #define A_ELMER0_PORT1_MI1_ADDR 0x500004 |
116 | #define A_ELMER0_PORT1_MI1_DATA 0x500008 | 116 | #define A_ELMER0_PORT1_MI1_DATA 0x500008 |
117 | #define A_ELMER0_PORT1_MI1_OP 0x50000c | 117 | #define A_ELMER0_PORT1_MI1_OP 0x50000c |
118 | #define A_ELMER0_PORT2_MI1_CFG 0x600000 | 118 | #define A_ELMER0_PORT2_MI1_CFG 0x600000 |
119 | #define A_ELMER0_PORT2_MI1_ADDR 0x600004 | 119 | #define A_ELMER0_PORT2_MI1_ADDR 0x600004 |
120 | #define A_ELMER0_PORT2_MI1_DATA 0x600008 | 120 | #define A_ELMER0_PORT2_MI1_DATA 0x600008 |
121 | #define A_ELMER0_PORT2_MI1_OP 0x60000c | 121 | #define A_ELMER0_PORT2_MI1_OP 0x60000c |
122 | #define A_ELMER0_PORT3_MI1_CFG 0x700000 | 122 | #define A_ELMER0_PORT3_MI1_CFG 0x700000 |
123 | #define A_ELMER0_PORT3_MI1_ADDR 0x700004 | 123 | #define A_ELMER0_PORT3_MI1_ADDR 0x700004 |
124 | #define A_ELMER0_PORT3_MI1_DATA 0x700008 | 124 | #define A_ELMER0_PORT3_MI1_DATA 0x700008 |
125 | #define A_ELMER0_PORT3_MI1_OP 0x70000c | 125 | #define A_ELMER0_PORT3_MI1_OP 0x70000c |
126 | 126 | ||
127 | /* Simple bit definition for GPI and GP0 registers. */ | 127 | /* Simple bit definition for GPI and GP0 registers. */ |
128 | #define ELMER0_GP_BIT0 0x0001 | 128 | #define ELMER0_GP_BIT0 0x0001 |
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c index 4192f0f5b3ee..d7c5406a6c3f 100644 --- a/drivers/net/chelsio/espi.c +++ b/drivers/net/chelsio/espi.c | |||
@@ -202,9 +202,9 @@ static void espi_setup_for_pm3393(adapter_t *adapter) | |||
202 | 202 | ||
203 | static void espi_setup_for_vsc7321(adapter_t *adapter) | 203 | static void espi_setup_for_vsc7321(adapter_t *adapter) |
204 | { | 204 | { |
205 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); | 205 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); |
206 | writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); | 206 | writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); |
207 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); | 207 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); |
208 | writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); | 208 | writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); |
209 | writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); | 209 | writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); |
210 | writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); | 210 | writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); |
@@ -247,10 +247,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports) | |||
247 | writel(V_OUT_OF_SYNC_COUNT(4) | | 247 | writel(V_OUT_OF_SYNC_COUNT(4) | |
248 | V_DIP2_PARITY_ERR_THRES(3) | | 248 | V_DIP2_PARITY_ERR_THRES(3) | |
249 | V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); | 249 | V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); |
250 | writel(nports == 4 ? 0x200040 : 0x1000080, | 250 | writel(nports == 4 ? 0x200040 : 0x1000080, |
251 | adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); | 251 | adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); |
252 | } else | 252 | } else |
253 | writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); | 253 | writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); |
254 | 254 | ||
255 | if (mac_type == CHBT_MAC_PM3393) | 255 | if (mac_type == CHBT_MAC_PM3393) |
256 | espi_setup_for_pm3393(adapter); | 256 | espi_setup_for_pm3393(adapter); |
@@ -301,7 +301,8 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) | |||
301 | { | 301 | { |
302 | struct peespi *espi = adapter->espi; | 302 | struct peespi *espi = adapter->espi; |
303 | 303 | ||
304 | if (!is_T2(adapter)) return; | 304 | if (!is_T2(adapter)) |
305 | return; | ||
305 | spin_lock(&espi->lock); | 306 | spin_lock(&espi->lock); |
306 | espi->misc_ctrl = (val & ~MON_MASK) | | 307 | espi->misc_ctrl = (val & ~MON_MASK) | |
307 | (espi->misc_ctrl & MON_MASK); | 308 | (espi->misc_ctrl & MON_MASK); |
@@ -340,32 +341,31 @@ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) | |||
340 | * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in | 341 | * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in |
341 | * one shot, since there is no per port counter on the out side. | 342 | * one shot, since there is no per port counter on the out side. |
342 | */ | 343 | */ |
343 | int | 344 | int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) |
344 | t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) | ||
345 | { | 345 | { |
346 | struct peespi *espi = adapter->espi; | 346 | struct peespi *espi = adapter->espi; |
347 | u8 i, nport = (u8)adapter->params.nports; | 347 | u8 i, nport = (u8)adapter->params.nports; |
348 | 348 | ||
349 | if (!wait) { | 349 | if (!wait) { |
350 | if (!spin_trylock(&espi->lock)) | 350 | if (!spin_trylock(&espi->lock)) |
351 | return -1; | 351 | return -1; |
352 | } else | 352 | } else |
353 | spin_lock(&espi->lock); | 353 | spin_lock(&espi->lock); |
354 | 354 | ||
355 | if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) { | 355 | if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) { |
356 | espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | | 356 | espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | |
357 | F_MONITORED_DIRECTION; | 357 | F_MONITORED_DIRECTION; |
358 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | 358 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); |
359 | } | 359 | } |
360 | for (i = 0 ; i < nport; i++, valp++) { | 360 | for (i = 0 ; i < nport; i++, valp++) { |
361 | if (i) { | 361 | if (i) { |
362 | writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), | 362 | writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), |
363 | adapter->regs + A_ESPI_MISC_CONTROL); | 363 | adapter->regs + A_ESPI_MISC_CONTROL); |
364 | } | 364 | } |
365 | *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); | 365 | *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); |
366 | } | 366 | } |
367 | 367 | ||
368 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | 368 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); |
369 | spin_unlock(&espi->lock); | 369 | spin_unlock(&espi->lock); |
370 | return 0; | 370 | return 0; |
371 | } | 371 | } |
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h index 17a3c2ba36a3..ccdb2bc9ae98 100644 --- a/drivers/net/chelsio/fpga_defs.h +++ b/drivers/net/chelsio/fpga_defs.h | |||
@@ -98,9 +98,9 @@ | |||
98 | #define A_MI0_DATA_INT 0xb10 | 98 | #define A_MI0_DATA_INT 0xb10 |
99 | 99 | ||
100 | /* GMAC registers */ | 100 | /* GMAC registers */ |
101 | #define A_GMAC_MACID_LO 0x28 | 101 | #define A_GMAC_MACID_LO 0x28 |
102 | #define A_GMAC_MACID_HI 0x2c | 102 | #define A_GMAC_MACID_HI 0x2c |
103 | #define A_GMAC_CSR 0x30 | 103 | #define A_GMAC_CSR 0x30 |
104 | 104 | ||
105 | #define S_INTERFACE 0 | 105 | #define S_INTERFACE 0 |
106 | #define M_INTERFACE 0x3 | 106 | #define M_INTERFACE 0x3 |
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h index a2b8ad9b5535..006a2eb2d362 100644 --- a/drivers/net/chelsio/gmac.h +++ b/drivers/net/chelsio/gmac.h | |||
@@ -42,8 +42,15 @@ | |||
42 | 42 | ||
43 | #include "common.h" | 43 | #include "common.h" |
44 | 44 | ||
45 | enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL }; | 45 | enum { |
46 | enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 }; | 46 | MAC_STATS_UPDATE_FAST, |
47 | MAC_STATS_UPDATE_FULL | ||
48 | }; | ||
49 | |||
50 | enum { | ||
51 | MAC_DIRECTION_RX = 1, | ||
52 | MAC_DIRECTION_TX = 2 | ||
53 | }; | ||
47 | 54 | ||
48 | struct cmac_statistics { | 55 | struct cmac_statistics { |
49 | /* Transmit */ | 56 | /* Transmit */ |
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c index 5b8f144e83d4..10b2a9a19006 100644 --- a/drivers/net/chelsio/ixf1010.c +++ b/drivers/net/chelsio/ixf1010.c | |||
@@ -145,48 +145,61 @@ static void disable_port(struct cmac *mac) | |||
145 | t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val); | 145 | t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val); |
146 | } | 146 | } |
147 | 147 | ||
148 | #define RMON_UPDATE(mac, name, stat_name) \ | ||
149 | t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \ | ||
150 | (mac)->stats.stat_name += val; | ||
151 | |||
152 | /* | 148 | /* |
153 | * Read the current values of the RMON counters and add them to the cumulative | 149 | * Read the current values of the RMON counters and add them to the cumulative |
154 | * port statistics. The HW RMON counters are cleared by this operation. | 150 | * port statistics. The HW RMON counters are cleared by this operation. |
155 | */ | 151 | */ |
156 | static void port_stats_update(struct cmac *mac) | 152 | static void port_stats_update(struct cmac *mac) |
157 | { | 153 | { |
158 | u32 val; | 154 | static struct { |
155 | unsigned int reg; | ||
156 | unsigned int offset; | ||
157 | } hw_stats[] = { | ||
158 | |||
159 | #define HW_STAT(name, stat_name) \ | ||
160 | { REG_##name, \ | ||
161 | (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } | ||
162 | |||
163 | /* Rx stats */ | ||
164 | HW_STAT(RxOctetsTotalOK, RxOctetsOK), | ||
165 | HW_STAT(RxOctetsBad, RxOctetsBad), | ||
166 | HW_STAT(RxUCPkts, RxUnicastFramesOK), | ||
167 | HW_STAT(RxMCPkts, RxMulticastFramesOK), | ||
168 | HW_STAT(RxBCPkts, RxBroadcastFramesOK), | ||
169 | HW_STAT(RxJumboPkts, RxJumboFramesOK), | ||
170 | HW_STAT(RxFCSErrors, RxFCSErrors), | ||
171 | HW_STAT(RxAlignErrors, RxAlignErrors), | ||
172 | HW_STAT(RxLongErrors, RxFrameTooLongErrors), | ||
173 | HW_STAT(RxVeryLongErrors, RxFrameTooLongErrors), | ||
174 | HW_STAT(RxPauseMacControlCounter, RxPauseFrames), | ||
175 | HW_STAT(RxDataErrors, RxDataErrors), | ||
176 | HW_STAT(RxJabberErrors, RxJabberErrors), | ||
177 | HW_STAT(RxRuntErrors, RxRuntErrors), | ||
178 | HW_STAT(RxShortErrors, RxRuntErrors), | ||
179 | HW_STAT(RxSequenceErrors, RxSequenceErrors), | ||
180 | HW_STAT(RxSymbolErrors, RxSymbolErrors), | ||
181 | |||
182 | /* Tx stats (skip collision stats as we are full-duplex only) */ | ||
183 | HW_STAT(TxOctetsTotalOK, TxOctetsOK), | ||
184 | HW_STAT(TxOctetsBad, TxOctetsBad), | ||
185 | HW_STAT(TxUCPkts, TxUnicastFramesOK), | ||
186 | HW_STAT(TxMCPkts, TxMulticastFramesOK), | ||
187 | HW_STAT(TxBCPkts, TxBroadcastFramesOK), | ||
188 | HW_STAT(TxJumboPkts, TxJumboFramesOK), | ||
189 | HW_STAT(TxPauseFrames, TxPauseFrames), | ||
190 | HW_STAT(TxExcessiveLengthDrop, TxLengthErrors), | ||
191 | HW_STAT(TxUnderrun, TxUnderrun), | ||
192 | HW_STAT(TxCRCErrors, TxFCSErrors) | ||
193 | }, *p = hw_stats; | ||
194 | u64 *stats = (u64 *) &mac->stats; | ||
195 | unsigned int i; | ||
196 | |||
197 | for (i = 0; i < ARRAY_SIZE(hw_stats); i++) { | ||
198 | u32 val; | ||
159 | 199 | ||
160 | /* Rx stats */ | 200 | t1_tpi_read(mac->adapter, MACREG(mac, p->reg), &val); |
161 | RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK); | 201 | stats[p->offset] += val; |
162 | RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad); | 202 | } |
163 | RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK); | ||
164 | RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK); | ||
165 | RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK); | ||
166 | RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK); | ||
167 | RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors); | ||
168 | RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors); | ||
169 | RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors); | ||
170 | RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors); | ||
171 | RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames); | ||
172 | RMON_UPDATE(mac, RxDataErrors, RxDataErrors); | ||
173 | RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors); | ||
174 | RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors); | ||
175 | RMON_UPDATE(mac, RxShortErrors, RxRuntErrors); | ||
176 | RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors); | ||
177 | RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); | ||
178 | |||
179 | /* Tx stats (skip collision stats as we are full-duplex only) */ | ||
180 | RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK); | ||
181 | RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad); | ||
182 | RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK); | ||
183 | RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK); | ||
184 | RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK); | ||
185 | RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK); | ||
186 | RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames); | ||
187 | RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors); | ||
188 | RMON_UPDATE(mac, TxUnderrun, TxUnderrun); | ||
189 | RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors); | ||
190 | } | 203 | } |
191 | 204 | ||
192 | /* No-op interrupt operation as this MAC does not support interrupts */ | 205 | /* No-op interrupt operation as this MAC does not support interrupts */ |
@@ -273,7 +286,8 @@ static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) | |||
273 | static int mac_set_mtu(struct cmac *mac, int mtu) | 286 | static int mac_set_mtu(struct cmac *mac, int mtu) |
274 | { | 287 | { |
275 | /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */ | 288 | /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */ |
276 | if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL; | 289 | if (mtu > (MAX_FRAME_SIZE - 14 - 4)) |
290 | return -EINVAL; | ||
277 | t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE), | 291 | t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE), |
278 | mtu + 14 + 4); | 292 | mtu + 14 + 4); |
279 | return 0; | 293 | return 0; |
@@ -357,8 +371,8 @@ static void enable_port(struct cmac *mac) | |||
357 | val |= (1 << index); | 371 | val |= (1 << index); |
358 | t1_tpi_write(adapter, REG_PORT_ENABLE, val); | 372 | t1_tpi_write(adapter, REG_PORT_ENABLE, val); |
359 | 373 | ||
360 | index <<= 2; | 374 | index <<= 2; |
361 | if (is_T2(adapter)) { | 375 | if (is_T2(adapter)) { |
362 | /* T204: set the Fifo water level & threshold */ | 376 | /* T204: set the Fifo water level & threshold */ |
363 | t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740); | 377 | t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740); |
364 | t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730); | 378 | t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730); |
@@ -389,6 +403,10 @@ static int mac_disable(struct cmac *mac, int which) | |||
389 | return 0; | 403 | return 0; |
390 | } | 404 | } |
391 | 405 | ||
406 | #define RMON_UPDATE(mac, name, stat_name) \ | ||
407 | t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \ | ||
408 | (mac)->stats.stat_name += val; | ||
409 | |||
392 | /* | 410 | /* |
393 | * This function is called periodically to accumulate the current values of the | 411 | * This function is called periodically to accumulate the current values of the |
394 | * RMON counters into the port statistics. Since the counters are only 32 bits | 412 | * RMON counters into the port statistics. Since the counters are only 32 bits |
@@ -460,10 +478,12 @@ static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index) | |||
460 | struct cmac *mac; | 478 | struct cmac *mac; |
461 | u32 val; | 479 | u32 val; |
462 | 480 | ||
463 | if (index > 9) return NULL; | 481 | if (index > 9) |
482 | return NULL; | ||
464 | 483 | ||
465 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); | 484 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); |
466 | if (!mac) return NULL; | 485 | if (!mac) |
486 | return NULL; | ||
467 | 487 | ||
468 | mac->ops = &ixf1010_ops; | 488 | mac->ops = &ixf1010_ops; |
469 | mac->instance = (cmac_instance *)(mac + 1); | 489 | mac->instance = (cmac_instance *)(mac + 1); |
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c index 28ac93ff7c4f..5867e3b0a887 100644 --- a/drivers/net/chelsio/mv88e1xxx.c +++ b/drivers/net/chelsio/mv88e1xxx.c | |||
@@ -73,9 +73,8 @@ static int mv88e1xxx_interrupt_enable(struct cphy *cphy) | |||
73 | 73 | ||
74 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | 74 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); |
75 | elmer |= ELMER0_GP_BIT1; | 75 | elmer |= ELMER0_GP_BIT1; |
76 | if (is_T2(cphy->adapter)) { | 76 | if (is_T2(cphy->adapter)) |
77 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | 77 | elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4; |
78 | } | ||
79 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | 78 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); |
80 | } | 79 | } |
81 | return 0; | 80 | return 0; |
@@ -92,9 +91,8 @@ static int mv88e1xxx_interrupt_disable(struct cphy *cphy) | |||
92 | 91 | ||
93 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | 92 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); |
94 | elmer &= ~ELMER0_GP_BIT1; | 93 | elmer &= ~ELMER0_GP_BIT1; |
95 | if (is_T2(cphy->adapter)) { | 94 | if (is_T2(cphy->adapter)) |
96 | elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); | 95 | elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); |
97 | } | ||
98 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | 96 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); |
99 | } | 97 | } |
100 | return 0; | 98 | return 0; |
@@ -112,9 +110,8 @@ static int mv88e1xxx_interrupt_clear(struct cphy *cphy) | |||
112 | if (t1_is_asic(cphy->adapter)) { | 110 | if (t1_is_asic(cphy->adapter)) { |
113 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); | 111 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); |
114 | elmer |= ELMER0_GP_BIT1; | 112 | elmer |= ELMER0_GP_BIT1; |
115 | if (is_T2(cphy->adapter)) { | 113 | if (is_T2(cphy->adapter)) |
116 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | 114 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; |
117 | } | ||
118 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | 115 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); |
119 | } | 116 | } |
120 | return 0; | 117 | return 0; |
@@ -300,7 +297,7 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy) | |||
300 | 297 | ||
301 | /* | 298 | /* |
302 | * Loop until cause reads zero. Need to handle bouncing interrupts. | 299 | * Loop until cause reads zero. Need to handle bouncing interrupts. |
303 | */ | 300 | */ |
304 | while (1) { | 301 | while (1) { |
305 | u32 cause; | 302 | u32 cause; |
306 | 303 | ||
@@ -308,15 +305,16 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy) | |||
308 | MV88E1XXX_INTERRUPT_STATUS_REGISTER, | 305 | MV88E1XXX_INTERRUPT_STATUS_REGISTER, |
309 | &cause); | 306 | &cause); |
310 | cause &= INTR_ENABLE_MASK; | 307 | cause &= INTR_ENABLE_MASK; |
311 | if (!cause) break; | 308 | if (!cause) |
309 | break; | ||
312 | 310 | ||
313 | if (cause & MV88E1XXX_INTR_LINK_CHNG) { | 311 | if (cause & MV88E1XXX_INTR_LINK_CHNG) { |
314 | (void) simple_mdio_read(cphy, | 312 | (void) simple_mdio_read(cphy, |
315 | MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); | 313 | MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); |
316 | 314 | ||
317 | if (status & MV88E1XXX_INTR_LINK_CHNG) { | 315 | if (status & MV88E1XXX_INTR_LINK_CHNG) |
318 | cphy->state |= PHY_LINK_UP; | 316 | cphy->state |= PHY_LINK_UP; |
319 | } else { | 317 | else { |
320 | cphy->state &= ~PHY_LINK_UP; | 318 | cphy->state &= ~PHY_LINK_UP; |
321 | if (cphy->state & PHY_AUTONEG_EN) | 319 | if (cphy->state & PHY_AUTONEG_EN) |
322 | cphy->state &= ~PHY_AUTONEG_RDY; | 320 | cphy->state &= ~PHY_AUTONEG_RDY; |
@@ -360,7 +358,8 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr, | |||
360 | { | 358 | { |
361 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); | 359 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); |
362 | 360 | ||
363 | if (!cphy) return NULL; | 361 | if (!cphy) |
362 | return NULL; | ||
364 | 363 | ||
365 | cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); | 364 | cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); |
366 | 365 | ||
@@ -377,11 +376,11 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr, | |||
377 | } | 376 | } |
378 | (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ | 377 | (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ |
379 | 378 | ||
380 | /* LED */ | 379 | /* LED */ |
381 | if (is_T2(adapter)) { | 380 | if (is_T2(adapter)) { |
382 | (void) simple_mdio_write(cphy, | 381 | (void) simple_mdio_write(cphy, |
383 | MV88E1XXX_LED_CONTROL_REGISTER, 0x1); | 382 | MV88E1XXX_LED_CONTROL_REGISTER, 0x1); |
384 | } | 383 | } |
385 | 384 | ||
386 | return cphy; | 385 | return cphy; |
387 | } | 386 | } |
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c index 82fed1dd5005..87dde3e60046 100644 --- a/drivers/net/chelsio/my3126.c +++ b/drivers/net/chelsio/my3126.c | |||
@@ -10,25 +10,25 @@ static int my3126_reset(struct cphy *cphy, int wait) | |||
10 | * This can be done through registers. It is not required since | 10 | * This can be done through registers. It is not required since |
11 | * a full chip reset is used. | 11 | * a full chip reset is used. |
12 | */ | 12 | */ |
13 | return (0); | 13 | return 0; |
14 | } | 14 | } |
15 | 15 | ||
16 | static int my3126_interrupt_enable(struct cphy *cphy) | 16 | static int my3126_interrupt_enable(struct cphy *cphy) |
17 | { | 17 | { |
18 | schedule_delayed_work(&cphy->phy_update, HZ/30); | 18 | schedule_delayed_work(&cphy->phy_update, HZ/30); |
19 | t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); | 19 | t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); |
20 | return (0); | 20 | return 0; |
21 | } | 21 | } |
22 | 22 | ||
23 | static int my3126_interrupt_disable(struct cphy *cphy) | 23 | static int my3126_interrupt_disable(struct cphy *cphy) |
24 | { | 24 | { |
25 | cancel_rearming_delayed_work(&cphy->phy_update); | 25 | cancel_rearming_delayed_work(&cphy->phy_update); |
26 | return (0); | 26 | return 0; |
27 | } | 27 | } |
28 | 28 | ||
29 | static int my3126_interrupt_clear(struct cphy *cphy) | 29 | static int my3126_interrupt_clear(struct cphy *cphy) |
30 | { | 30 | { |
31 | return (0); | 31 | return 0; |
32 | } | 32 | } |
33 | 33 | ||
34 | #define OFFSET(REG_ADDR) (REG_ADDR << 2) | 34 | #define OFFSET(REG_ADDR) (REG_ADDR << 2) |
@@ -102,7 +102,7 @@ static void my3216_poll(struct work_struct *work) | |||
102 | 102 | ||
103 | static int my3126_set_loopback(struct cphy *cphy, int on) | 103 | static int my3126_set_loopback(struct cphy *cphy, int on) |
104 | { | 104 | { |
105 | return (0); | 105 | return 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | /* To check the activity LED */ | 108 | /* To check the activity LED */ |
@@ -146,7 +146,7 @@ static int my3126_get_link_status(struct cphy *cphy, | |||
146 | if (fc) | 146 | if (fc) |
147 | *fc = PAUSE_RX | PAUSE_TX; | 147 | *fc = PAUSE_RX | PAUSE_TX; |
148 | 148 | ||
149 | return (0); | 149 | return 0; |
150 | } | 150 | } |
151 | 151 | ||
152 | static void my3126_destroy(struct cphy *cphy) | 152 | static void my3126_destroy(struct cphy *cphy) |
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter, | |||
177 | INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); | 177 | INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); |
178 | cphy->bmsr = 0; | 178 | cphy->bmsr = 0; |
179 | 179 | ||
180 | return (cphy); | 180 | return cphy; |
181 | } | 181 | } |
182 | 182 | ||
183 | /* Chip Reset */ | 183 | /* Chip Reset */ |
@@ -198,7 +198,7 @@ static int my3126_phy_reset(adapter_t * adapter) | |||
198 | val |= 0x8000; | 198 | val |= 0x8000; |
199 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | 199 | t1_tpi_write(adapter, A_ELMER0_GPO, val); |
200 | udelay(100); | 200 | udelay(100); |
201 | return (0); | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | struct gphy t1_my3126_ops = { | 204 | struct gphy t1_my3126_ops = { |
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c index 63cabeb98afe..69129edeefd6 100644 --- a/drivers/net/chelsio/pm3393.c +++ b/drivers/net/chelsio/pm3393.c | |||
@@ -446,17 +446,51 @@ static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val, | |||
446 | *val += 1ull << 40; | 446 | *val += 1ull << 40; |
447 | } | 447 | } |
448 | 448 | ||
449 | #define RMON_UPDATE(mac, name, stat_name) \ | ||
450 | pm3393_rmon_update((mac)->adapter, OFFSET(name), \ | ||
451 | &(mac)->stats.stat_name, \ | ||
452 | (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) | ||
453 | |||
454 | |||
455 | static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, | 449 | static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, |
456 | int flag) | 450 | int flag) |
457 | { | 451 | { |
458 | u64 ro; | 452 | static struct { |
459 | u32 val0, val1, val2, val3; | 453 | unsigned int reg; |
454 | unsigned int offset; | ||
455 | } hw_stats [] = { | ||
456 | |||
457 | #define HW_STAT(name, stat_name) \ | ||
458 | { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } | ||
459 | |||
460 | /* Rx stats */ | ||
461 | HW_STAT(RxOctetsReceivedOK, RxOctetsOK), | ||
462 | HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK), | ||
463 | HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK), | ||
464 | HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK), | ||
465 | HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames), | ||
466 | HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors), | ||
467 | HW_STAT(RxFramesLostDueToInternalMACErrors, | ||
468 | RxInternalMACRcvError), | ||
469 | HW_STAT(RxSymbolErrors, RxSymbolErrors), | ||
470 | HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors), | ||
471 | HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors), | ||
472 | HW_STAT(RxJabbers, RxJabberErrors), | ||
473 | HW_STAT(RxFragments, RxRuntErrors), | ||
474 | HW_STAT(RxUndersizedFrames, RxRuntErrors), | ||
475 | HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK), | ||
476 | HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK), | ||
477 | |||
478 | /* Tx stats */ | ||
479 | HW_STAT(TxOctetsTransmittedOK, TxOctetsOK), | ||
480 | HW_STAT(TxFramesLostDueToInternalMACTransmissionError, | ||
481 | TxInternalMACXmitError), | ||
482 | HW_STAT(TxTransmitSystemError, TxFCSErrors), | ||
483 | HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK), | ||
484 | HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK), | ||
485 | HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK), | ||
486 | HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames), | ||
487 | HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK), | ||
488 | HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK) | ||
489 | }, *p = hw_stats; | ||
490 | u64 ro; | ||
491 | u32 val0, val1, val2, val3; | ||
492 | u64 *stats = (u64 *) &mac->stats; | ||
493 | unsigned int i; | ||
460 | 494 | ||
461 | /* Snap the counters */ | 495 | /* Snap the counters */ |
462 | pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, | 496 | pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, |
@@ -470,35 +504,14 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, | |||
470 | ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | | 504 | ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | |
471 | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); | 505 | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); |
472 | 506 | ||
473 | /* Rx stats */ | 507 | for (i = 0; i < ARRAY_SIZE(hw_stats); i++) { |
474 | RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); | 508 | unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW; |
475 | RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); | 509 | |
476 | RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); | 510 | pm3393_rmon_update((mac)->adapter, OFFSET(p->reg), |
477 | RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); | 511 | stats + p->offset, ro & (reg >> 2)); |
478 | RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); | 512 | } |
479 | RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); | 513 | |
480 | RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, | 514 | |
481 | RxInternalMACRcvError); | ||
482 | RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); | ||
483 | RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); | ||
484 | RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); | ||
485 | RMON_UPDATE(mac, RxJabbers, RxJabberErrors); | ||
486 | RMON_UPDATE(mac, RxFragments, RxRuntErrors); | ||
487 | RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); | ||
488 | RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); | ||
489 | RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); | ||
490 | |||
491 | /* Tx stats */ | ||
492 | RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); | ||
493 | RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, | ||
494 | TxInternalMACXmitError); | ||
495 | RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); | ||
496 | RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); | ||
497 | RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); | ||
498 | RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); | ||
499 | RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); | ||
500 | RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); | ||
501 | RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); | ||
502 | 515 | ||
503 | return &mac->stats; | 516 | return &mac->stats; |
504 | } | 517 | } |
@@ -534,9 +547,9 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) | |||
534 | /* Store local copy */ | 547 | /* Store local copy */ |
535 | memcpy(cmac->instance->mac_addr, ma, 6); | 548 | memcpy(cmac->instance->mac_addr, ma, 6); |
536 | 549 | ||
537 | lo = ((u32) ma[1] << 8) | (u32) ma[0]; | 550 | lo = ((u32) ma[1] << 8) | (u32) ma[0]; |
538 | mid = ((u32) ma[3] << 8) | (u32) ma[2]; | 551 | mid = ((u32) ma[3] << 8) | (u32) ma[2]; |
539 | hi = ((u32) ma[5] << 8) | (u32) ma[4]; | 552 | hi = ((u32) ma[5] << 8) | (u32) ma[4]; |
540 | 553 | ||
541 | /* Disable Rx/Tx MAC before configuring it. */ | 554 | /* Disable Rx/Tx MAC before configuring it. */ |
542 | if (enabled) | 555 | if (enabled) |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 659cb2252e44..89a682702fa9 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -71,12 +71,9 @@ | |||
71 | #define SGE_FREEL_REFILL_THRESH 16 | 71 | #define SGE_FREEL_REFILL_THRESH 16 |
72 | #define SGE_RESPQ_E_N 1024 | 72 | #define SGE_RESPQ_E_N 1024 |
73 | #define SGE_INTRTIMER_NRES 1000 | 73 | #define SGE_INTRTIMER_NRES 1000 |
74 | #define SGE_RX_COPY_THRES 256 | ||
75 | #define SGE_RX_SM_BUF_SIZE 1536 | 74 | #define SGE_RX_SM_BUF_SIZE 1536 |
76 | #define SGE_TX_DESC_MAX_PLEN 16384 | 75 | #define SGE_TX_DESC_MAX_PLEN 16384 |
77 | 76 | ||
78 | # define SGE_RX_DROP_THRES 2 | ||
79 | |||
80 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) | 77 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) |
81 | 78 | ||
82 | /* | 79 | /* |
@@ -85,10 +82,6 @@ | |||
85 | */ | 82 | */ |
86 | #define TX_RECLAIM_PERIOD (HZ / 4) | 83 | #define TX_RECLAIM_PERIOD (HZ / 4) |
87 | 84 | ||
88 | #ifndef NET_IP_ALIGN | ||
89 | # define NET_IP_ALIGN 2 | ||
90 | #endif | ||
91 | |||
92 | #define M_CMD_LEN 0x7fffffff | 85 | #define M_CMD_LEN 0x7fffffff |
93 | #define V_CMD_LEN(v) (v) | 86 | #define V_CMD_LEN(v) (v) |
94 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) | 87 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) |
@@ -195,7 +188,7 @@ struct cmdQ { | |||
195 | struct cmdQ_e *entries; /* HW command descriptor Q */ | 188 | struct cmdQ_e *entries; /* HW command descriptor Q */ |
196 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ | 189 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ |
197 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ | 190 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ |
198 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ | 191 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ |
199 | }; | 192 | }; |
200 | 193 | ||
201 | struct freelQ { | 194 | struct freelQ { |
@@ -241,9 +234,9 @@ struct sched_port { | |||
241 | /* Per T204 device */ | 234 | /* Per T204 device */ |
242 | struct sched { | 235 | struct sched { |
243 | ktime_t last_updated; /* last time quotas were computed */ | 236 | ktime_t last_updated; /* last time quotas were computed */ |
244 | unsigned int max_avail; /* max bits to be sent to any port */ | 237 | unsigned int max_avail; /* max bits to be sent to any port */ |
245 | unsigned int port; /* port index (round robin ports) */ | 238 | unsigned int port; /* port index (round robin ports) */ |
246 | unsigned int num; /* num skbs in per port queues */ | 239 | unsigned int num; /* num skbs in per port queues */ |
247 | struct sched_port p[MAX_NPORTS]; | 240 | struct sched_port p[MAX_NPORTS]; |
248 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ | 241 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ |
249 | }; | 242 | }; |
@@ -259,10 +252,10 @@ static void restart_sched(unsigned long); | |||
259 | * contention. | 252 | * contention. |
260 | */ | 253 | */ |
261 | struct sge { | 254 | struct sge { |
262 | struct adapter *adapter; /* adapter backpointer */ | 255 | struct adapter *adapter; /* adapter backpointer */ |
263 | struct net_device *netdev; /* netdevice backpointer */ | 256 | struct net_device *netdev; /* netdevice backpointer */ |
264 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ | 257 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ |
265 | struct respQ respQ; /* response Q */ | 258 | struct respQ respQ; /* response Q */ |
266 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ | 259 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ |
267 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ | 260 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ |
268 | unsigned int jumbo_fl; /* jumbo freelist Q index */ | 261 | unsigned int jumbo_fl; /* jumbo freelist Q index */ |
@@ -460,7 +453,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, | |||
460 | if (credits < MAX_SKB_FRAGS + 1) | 453 | if (credits < MAX_SKB_FRAGS + 1) |
461 | goto out; | 454 | goto out; |
462 | 455 | ||
463 | again: | 456 | again: |
464 | for (i = 0; i < MAX_NPORTS; i++) { | 457 | for (i = 0; i < MAX_NPORTS; i++) { |
465 | s->port = ++s->port & (MAX_NPORTS - 1); | 458 | s->port = ++s->port & (MAX_NPORTS - 1); |
466 | skbq = &s->p[s->port].skbq; | 459 | skbq = &s->p[s->port].skbq; |
@@ -483,8 +476,8 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, | |||
483 | if (update-- && sched_update_avail(sge)) | 476 | if (update-- && sched_update_avail(sge)) |
484 | goto again; | 477 | goto again; |
485 | 478 | ||
486 | out: | 479 | out: |
487 | /* If there are more pending skbs, we use the hardware to schedule us | 480 | /* If there are more pending skbs, we use the hardware to schedule us |
488 | * again. | 481 | * again. |
489 | */ | 482 | */ |
490 | if (s->num && !skb) { | 483 | if (s->num && !skb) { |
@@ -575,11 +568,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |||
575 | q->size = p->freelQ_size[i]; | 568 | q->size = p->freelQ_size[i]; |
576 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; | 569 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; |
577 | size = sizeof(struct freelQ_e) * q->size; | 570 | size = sizeof(struct freelQ_e) * q->size; |
578 | q->entries = (struct freelQ_e *) | 571 | q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
579 | pci_alloc_consistent(pdev, size, &q->dma_addr); | ||
580 | if (!q->entries) | 572 | if (!q->entries) |
581 | goto err_no_mem; | 573 | goto err_no_mem; |
582 | memset(q->entries, 0, size); | 574 | |
583 | size = sizeof(struct freelQ_ce) * q->size; | 575 | size = sizeof(struct freelQ_ce) * q->size; |
584 | q->centries = kzalloc(size, GFP_KERNEL); | 576 | q->centries = kzalloc(size, GFP_KERNEL); |
585 | if (!q->centries) | 577 | if (!q->centries) |
@@ -613,11 +605,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |||
613 | sge->respQ.size = SGE_RESPQ_E_N; | 605 | sge->respQ.size = SGE_RESPQ_E_N; |
614 | sge->respQ.credits = 0; | 606 | sge->respQ.credits = 0; |
615 | size = sizeof(struct respQ_e) * sge->respQ.size; | 607 | size = sizeof(struct respQ_e) * sge->respQ.size; |
616 | sge->respQ.entries = (struct respQ_e *) | 608 | sge->respQ.entries = |
617 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); | 609 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); |
618 | if (!sge->respQ.entries) | 610 | if (!sge->respQ.entries) |
619 | goto err_no_mem; | 611 | goto err_no_mem; |
620 | memset(sge->respQ.entries, 0, size); | ||
621 | return 0; | 612 | return 0; |
622 | 613 | ||
623 | err_no_mem: | 614 | err_no_mem: |
@@ -637,20 +628,12 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) | |||
637 | q->in_use -= n; | 628 | q->in_use -= n; |
638 | ce = &q->centries[cidx]; | 629 | ce = &q->centries[cidx]; |
639 | while (n--) { | 630 | while (n--) { |
640 | if (q->sop) { | 631 | if (likely(pci_unmap_len(ce, dma_len))) { |
641 | if (likely(pci_unmap_len(ce, dma_len))) { | 632 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
642 | pci_unmap_single(pdev, | 633 | pci_unmap_len(ce, dma_len), |
643 | pci_unmap_addr(ce, dma_addr), | 634 | PCI_DMA_TODEVICE); |
644 | pci_unmap_len(ce, dma_len), | 635 | if (q->sop) |
645 | PCI_DMA_TODEVICE); | ||
646 | q->sop = 0; | 636 | q->sop = 0; |
647 | } | ||
648 | } else { | ||
649 | if (likely(pci_unmap_len(ce, dma_len))) { | ||
650 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | ||
651 | pci_unmap_len(ce, dma_len), | ||
652 | PCI_DMA_TODEVICE); | ||
653 | } | ||
654 | } | 637 | } |
655 | if (ce->skb) { | 638 | if (ce->skb) { |
656 | dev_kfree_skb_any(ce->skb); | 639 | dev_kfree_skb_any(ce->skb); |
@@ -711,11 +694,10 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | |||
711 | q->stop_thres = 0; | 694 | q->stop_thres = 0; |
712 | spin_lock_init(&q->lock); | 695 | spin_lock_init(&q->lock); |
713 | size = sizeof(struct cmdQ_e) * q->size; | 696 | size = sizeof(struct cmdQ_e) * q->size; |
714 | q->entries = (struct cmdQ_e *) | 697 | q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
715 | pci_alloc_consistent(pdev, size, &q->dma_addr); | ||
716 | if (!q->entries) | 698 | if (!q->entries) |
717 | goto err_no_mem; | 699 | goto err_no_mem; |
718 | memset(q->entries, 0, size); | 700 | |
719 | size = sizeof(struct cmdQ_ce) * q->size; | 701 | size = sizeof(struct cmdQ_ce) * q->size; |
720 | q->centries = kzalloc(size, GFP_KERNEL); | 702 | q->centries = kzalloc(size, GFP_KERNEL); |
721 | if (!q->centries) | 703 | if (!q->centries) |
@@ -770,7 +752,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off) | |||
770 | static void configure_sge(struct sge *sge, struct sge_params *p) | 752 | static void configure_sge(struct sge *sge, struct sge_params *p) |
771 | { | 753 | { |
772 | struct adapter *ap = sge->adapter; | 754 | struct adapter *ap = sge->adapter; |
773 | 755 | ||
774 | writel(0, ap->regs + A_SG_CONTROL); | 756 | writel(0, ap->regs + A_SG_CONTROL); |
775 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, | 757 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, |
776 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); | 758 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); |
@@ -850,7 +832,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) | |||
850 | struct freelQ_e *e = &q->entries[q->pidx]; | 832 | struct freelQ_e *e = &q->entries[q->pidx]; |
851 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; | 833 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; |
852 | 834 | ||
853 | |||
854 | while (q->credits < q->size) { | 835 | while (q->credits < q->size) { |
855 | struct sk_buff *skb; | 836 | struct sk_buff *skb; |
856 | dma_addr_t mapping; | 837 | dma_addr_t mapping; |
@@ -862,6 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) | |||
862 | skb_reserve(skb, q->dma_offset); | 843 | skb_reserve(skb, q->dma_offset); |
863 | mapping = pci_map_single(pdev, skb->data, dma_len, | 844 | mapping = pci_map_single(pdev, skb->data, dma_len, |
864 | PCI_DMA_FROMDEVICE); | 845 | PCI_DMA_FROMDEVICE); |
846 | skb_reserve(skb, sge->rx_pkt_pad); | ||
847 | |||
865 | ce->skb = skb; | 848 | ce->skb = skb; |
866 | pci_unmap_addr_set(ce, dma_addr, mapping); | 849 | pci_unmap_addr_set(ce, dma_addr, mapping); |
867 | pci_unmap_len_set(ce, dma_len, dma_len); | 850 | pci_unmap_len_set(ce, dma_len, dma_len); |
@@ -881,7 +864,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) | |||
881 | } | 864 | } |
882 | q->credits++; | 865 | q->credits++; |
883 | } | 866 | } |
884 | |||
885 | } | 867 | } |
886 | 868 | ||
887 | /* | 869 | /* |
@@ -1041,6 +1023,10 @@ static void recycle_fl_buf(struct freelQ *fl, int idx) | |||
1041 | } | 1023 | } |
1042 | } | 1024 | } |
1043 | 1025 | ||
1026 | static int copybreak __read_mostly = 256; | ||
1027 | module_param(copybreak, int, 0); | ||
1028 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | ||
1029 | |||
1044 | /** | 1030 | /** |
1045 | * get_packet - return the next ingress packet buffer | 1031 | * get_packet - return the next ingress packet buffer |
1046 | * @pdev: the PCI device that received the packet | 1032 | * @pdev: the PCI device that received the packet |
@@ -1060,45 +1046,42 @@ static void recycle_fl_buf(struct freelQ *fl, int idx) | |||
1060 | * be copied but there is no memory for the copy. | 1046 | * be copied but there is no memory for the copy. |
1061 | */ | 1047 | */ |
1062 | static inline struct sk_buff *get_packet(struct pci_dev *pdev, | 1048 | static inline struct sk_buff *get_packet(struct pci_dev *pdev, |
1063 | struct freelQ *fl, unsigned int len, | 1049 | struct freelQ *fl, unsigned int len) |
1064 | int dma_pad, int skb_pad, | ||
1065 | unsigned int copy_thres, | ||
1066 | unsigned int drop_thres) | ||
1067 | { | 1050 | { |
1068 | struct sk_buff *skb; | 1051 | struct sk_buff *skb; |
1069 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | 1052 | const struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
1070 | 1053 | ||
1071 | if (len < copy_thres) { | 1054 | if (len < copybreak) { |
1072 | skb = alloc_skb(len + skb_pad, GFP_ATOMIC); | 1055 | skb = alloc_skb(len + 2, GFP_ATOMIC); |
1073 | if (likely(skb != NULL)) { | 1056 | if (!skb) |
1074 | skb_reserve(skb, skb_pad); | ||
1075 | skb_put(skb, len); | ||
1076 | pci_dma_sync_single_for_cpu(pdev, | ||
1077 | pci_unmap_addr(ce, dma_addr), | ||
1078 | pci_unmap_len(ce, dma_len), | ||
1079 | PCI_DMA_FROMDEVICE); | ||
1080 | memcpy(skb->data, ce->skb->data + dma_pad, len); | ||
1081 | pci_dma_sync_single_for_device(pdev, | ||
1082 | pci_unmap_addr(ce, dma_addr), | ||
1083 | pci_unmap_len(ce, dma_len), | ||
1084 | PCI_DMA_FROMDEVICE); | ||
1085 | } else if (!drop_thres) | ||
1086 | goto use_orig_buf; | 1057 | goto use_orig_buf; |
1087 | 1058 | ||
1059 | skb_reserve(skb, 2); /* align IP header */ | ||
1060 | skb_put(skb, len); | ||
1061 | pci_dma_sync_single_for_cpu(pdev, | ||
1062 | pci_unmap_addr(ce, dma_addr), | ||
1063 | pci_unmap_len(ce, dma_len), | ||
1064 | PCI_DMA_FROMDEVICE); | ||
1065 | memcpy(skb->data, ce->skb->data, len); | ||
1066 | pci_dma_sync_single_for_device(pdev, | ||
1067 | pci_unmap_addr(ce, dma_addr), | ||
1068 | pci_unmap_len(ce, dma_len), | ||
1069 | PCI_DMA_FROMDEVICE); | ||
1088 | recycle_fl_buf(fl, fl->cidx); | 1070 | recycle_fl_buf(fl, fl->cidx); |
1089 | return skb; | 1071 | return skb; |
1090 | } | 1072 | } |
1091 | 1073 | ||
1092 | if (fl->credits < drop_thres) { | 1074 | use_orig_buf: |
1075 | if (fl->credits < 2) { | ||
1093 | recycle_fl_buf(fl, fl->cidx); | 1076 | recycle_fl_buf(fl, fl->cidx); |
1094 | return NULL; | 1077 | return NULL; |
1095 | } | 1078 | } |
1096 | 1079 | ||
1097 | use_orig_buf: | ||
1098 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | 1080 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
1099 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | 1081 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
1100 | skb = ce->skb; | 1082 | skb = ce->skb; |
1101 | skb_reserve(skb, dma_pad); | 1083 | prefetch(skb->data); |
1084 | |||
1102 | skb_put(skb, len); | 1085 | skb_put(skb, len); |
1103 | return skb; | 1086 | return skb; |
1104 | } | 1087 | } |
@@ -1137,6 +1120,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | |||
1137 | static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) | 1120 | static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) |
1138 | { | 1121 | { |
1139 | unsigned int count = 0; | 1122 | unsigned int count = 0; |
1123 | |||
1140 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { | 1124 | if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { |
1141 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | 1125 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; |
1142 | unsigned int i, len = skb->len - skb->data_len; | 1126 | unsigned int i, len = skb->len - skb->data_len; |
@@ -1343,7 +1327,7 @@ static void restart_sched(unsigned long arg) | |||
1343 | while ((skb = sched_skb(sge, NULL, credits)) != NULL) { | 1327 | while ((skb = sched_skb(sge, NULL, credits)) != NULL) { |
1344 | unsigned int genbit, pidx, count; | 1328 | unsigned int genbit, pidx, count; |
1345 | count = 1 + skb_shinfo(skb)->nr_frags; | 1329 | count = 1 + skb_shinfo(skb)->nr_frags; |
1346 | count += compute_large_page_tx_descs(skb); | 1330 | count += compute_large_page_tx_descs(skb); |
1347 | q->in_use += count; | 1331 | q->in_use += count; |
1348 | genbit = q->genbit; | 1332 | genbit = q->genbit; |
1349 | pidx = q->pidx; | 1333 | pidx = q->pidx; |
@@ -1375,27 +1359,25 @@ static void restart_sched(unsigned long arg) | |||
1375 | * | 1359 | * |
1376 | * Process an ingress ethernet pakcet and deliver it to the stack. | 1360 | * Process an ingress ethernet pakcet and deliver it to the stack. |
1377 | */ | 1361 | */ |
1378 | static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | 1362 | static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) |
1379 | { | 1363 | { |
1380 | struct sk_buff *skb; | 1364 | struct sk_buff *skb; |
1381 | struct cpl_rx_pkt *p; | 1365 | const struct cpl_rx_pkt *p; |
1382 | struct adapter *adapter = sge->adapter; | 1366 | struct adapter *adapter = sge->adapter; |
1383 | struct sge_port_stats *st; | 1367 | struct sge_port_stats *st; |
1384 | 1368 | ||
1385 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, | 1369 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); |
1386 | sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES, | ||
1387 | SGE_RX_DROP_THRES); | ||
1388 | if (unlikely(!skb)) { | 1370 | if (unlikely(!skb)) { |
1389 | sge->stats.rx_drops++; | 1371 | sge->stats.rx_drops++; |
1390 | return 0; | 1372 | return; |
1391 | } | 1373 | } |
1392 | 1374 | ||
1393 | p = (struct cpl_rx_pkt *)skb->data; | 1375 | p = (const struct cpl_rx_pkt *) skb->data; |
1394 | skb_pull(skb, sizeof(*p)); | ||
1395 | if (p->iff >= adapter->params.nports) { | 1376 | if (p->iff >= adapter->params.nports) { |
1396 | kfree_skb(skb); | 1377 | kfree_skb(skb); |
1397 | return 0; | 1378 | return; |
1398 | } | 1379 | } |
1380 | __skb_pull(skb, sizeof(*p)); | ||
1399 | 1381 | ||
1400 | skb->dev = adapter->port[p->iff].dev; | 1382 | skb->dev = adapter->port[p->iff].dev; |
1401 | skb->dev->last_rx = jiffies; | 1383 | skb->dev->last_rx = jiffies; |
@@ -1427,7 +1409,6 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1427 | netif_rx(skb); | 1409 | netif_rx(skb); |
1428 | #endif | 1410 | #endif |
1429 | } | 1411 | } |
1430 | return 0; | ||
1431 | } | 1412 | } |
1432 | 1413 | ||
1433 | /* | 1414 | /* |
@@ -1448,29 +1429,28 @@ static inline int enough_free_Tx_descs(const struct cmdQ *q) | |||
1448 | static void restart_tx_queues(struct sge *sge) | 1429 | static void restart_tx_queues(struct sge *sge) |
1449 | { | 1430 | { |
1450 | struct adapter *adap = sge->adapter; | 1431 | struct adapter *adap = sge->adapter; |
1432 | int i; | ||
1451 | 1433 | ||
1452 | if (enough_free_Tx_descs(&sge->cmdQ[0])) { | 1434 | if (!enough_free_Tx_descs(&sge->cmdQ[0])) |
1453 | int i; | 1435 | return; |
1454 | 1436 | ||
1455 | for_each_port(adap, i) { | 1437 | for_each_port(adap, i) { |
1456 | struct net_device *nd = adap->port[i].dev; | 1438 | struct net_device *nd = adap->port[i].dev; |
1457 | 1439 | ||
1458 | if (test_and_clear_bit(nd->if_port, | 1440 | if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && |
1459 | &sge->stopped_tx_queues) && | 1441 | netif_running(nd)) { |
1460 | netif_running(nd)) { | 1442 | sge->stats.cmdQ_restarted[2]++; |
1461 | sge->stats.cmdQ_restarted[2]++; | 1443 | netif_wake_queue(nd); |
1462 | netif_wake_queue(nd); | ||
1463 | } | ||
1464 | } | 1444 | } |
1465 | } | 1445 | } |
1466 | } | 1446 | } |
1467 | 1447 | ||
1468 | /* | 1448 | /* |
1469 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 | 1449 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 |
1470 | * information. | 1450 | * information. |
1471 | */ | 1451 | */ |
1472 | static unsigned int update_tx_info(struct adapter *adapter, | 1452 | static unsigned int update_tx_info(struct adapter *adapter, |
1473 | unsigned int flags, | 1453 | unsigned int flags, |
1474 | unsigned int pr0) | 1454 | unsigned int pr0) |
1475 | { | 1455 | { |
1476 | struct sge *sge = adapter->sge; | 1456 | struct sge *sge = adapter->sge; |
@@ -1510,29 +1490,30 @@ static int process_responses(struct adapter *adapter, int budget) | |||
1510 | struct sge *sge = adapter->sge; | 1490 | struct sge *sge = adapter->sge; |
1511 | struct respQ *q = &sge->respQ; | 1491 | struct respQ *q = &sge->respQ; |
1512 | struct respQ_e *e = &q->entries[q->cidx]; | 1492 | struct respQ_e *e = &q->entries[q->cidx]; |
1513 | int budget_left = budget; | 1493 | int done = 0; |
1514 | unsigned int flags = 0; | 1494 | unsigned int flags = 0; |
1515 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | 1495 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; |
1516 | |||
1517 | 1496 | ||
1518 | while (likely(budget_left && e->GenerationBit == q->genbit)) { | 1497 | while (done < budget && e->GenerationBit == q->genbit) { |
1519 | flags |= e->Qsleeping; | 1498 | flags |= e->Qsleeping; |
1520 | 1499 | ||
1521 | cmdq_processed[0] += e->Cmdq0CreditReturn; | 1500 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1522 | cmdq_processed[1] += e->Cmdq1CreditReturn; | 1501 | cmdq_processed[1] += e->Cmdq1CreditReturn; |
1523 | 1502 | ||
1524 | /* We batch updates to the TX side to avoid cacheline | 1503 | /* We batch updates to the TX side to avoid cacheline |
1525 | * ping-pong of TX state information on MP where the sender | 1504 | * ping-pong of TX state information on MP where the sender |
1526 | * might run on a different CPU than this function... | 1505 | * might run on a different CPU than this function... |
1527 | */ | 1506 | */ |
1528 | if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { | 1507 | if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { |
1529 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | 1508 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
1530 | cmdq_processed[0] = 0; | 1509 | cmdq_processed[0] = 0; |
1531 | } | 1510 | } |
1511 | |||
1532 | if (unlikely(cmdq_processed[1] > 16)) { | 1512 | if (unlikely(cmdq_processed[1] > 16)) { |
1533 | sge->cmdQ[1].processed += cmdq_processed[1]; | 1513 | sge->cmdQ[1].processed += cmdq_processed[1]; |
1534 | cmdq_processed[1] = 0; | 1514 | cmdq_processed[1] = 0; |
1535 | } | 1515 | } |
1516 | |||
1536 | if (likely(e->DataValid)) { | 1517 | if (likely(e->DataValid)) { |
1537 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; | 1518 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
1538 | 1519 | ||
@@ -1542,12 +1523,16 @@ static int process_responses(struct adapter *adapter, int budget) | |||
1542 | else | 1523 | else |
1543 | sge_rx(sge, fl, e->BufferLength); | 1524 | sge_rx(sge, fl, e->BufferLength); |
1544 | 1525 | ||
1526 | ++done; | ||
1527 | |||
1545 | /* | 1528 | /* |
1546 | * Note: this depends on each packet consuming a | 1529 | * Note: this depends on each packet consuming a |
1547 | * single free-list buffer; cf. the BUG above. | 1530 | * single free-list buffer; cf. the BUG above. |
1548 | */ | 1531 | */ |
1549 | if (++fl->cidx == fl->size) | 1532 | if (++fl->cidx == fl->size) |
1550 | fl->cidx = 0; | 1533 | fl->cidx = 0; |
1534 | prefetch(fl->centries[fl->cidx].skb); | ||
1535 | |||
1551 | if (unlikely(--fl->credits < | 1536 | if (unlikely(--fl->credits < |
1552 | fl->size - SGE_FREEL_REFILL_THRESH)) | 1537 | fl->size - SGE_FREEL_REFILL_THRESH)) |
1553 | refill_free_list(sge, fl); | 1538 | refill_free_list(sge, fl); |
@@ -1566,14 +1551,20 @@ static int process_responses(struct adapter *adapter, int budget) | |||
1566 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | 1551 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); |
1567 | q->credits = 0; | 1552 | q->credits = 0; |
1568 | } | 1553 | } |
1569 | --budget_left; | ||
1570 | } | 1554 | } |
1571 | 1555 | ||
1572 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | 1556 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
1573 | sge->cmdQ[1].processed += cmdq_processed[1]; | 1557 | sge->cmdQ[1].processed += cmdq_processed[1]; |
1574 | 1558 | ||
1575 | budget -= budget_left; | 1559 | return done; |
1576 | return budget; | 1560 | } |
1561 | |||
1562 | static inline int responses_pending(const struct adapter *adapter) | ||
1563 | { | ||
1564 | const struct respQ *Q = &adapter->sge->respQ; | ||
1565 | const struct respQ_e *e = &Q->entries[Q->cidx]; | ||
1566 | |||
1567 | return (e->GenerationBit == Q->genbit); | ||
1577 | } | 1568 | } |
1578 | 1569 | ||
1579 | #ifdef CONFIG_CHELSIO_T1_NAPI | 1570 | #ifdef CONFIG_CHELSIO_T1_NAPI |
@@ -1585,19 +1576,25 @@ static int process_responses(struct adapter *adapter, int budget) | |||
1585 | * which the caller must ensure is a valid pure response. Returns 1 if it | 1576 | * which the caller must ensure is a valid pure response. Returns 1 if it |
1586 | * encounters a valid data-carrying response, 0 otherwise. | 1577 | * encounters a valid data-carrying response, 0 otherwise. |
1587 | */ | 1578 | */ |
1588 | static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) | 1579 | static int process_pure_responses(struct adapter *adapter) |
1589 | { | 1580 | { |
1590 | struct sge *sge = adapter->sge; | 1581 | struct sge *sge = adapter->sge; |
1591 | struct respQ *q = &sge->respQ; | 1582 | struct respQ *q = &sge->respQ; |
1583 | struct respQ_e *e = &q->entries[q->cidx]; | ||
1584 | const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; | ||
1592 | unsigned int flags = 0; | 1585 | unsigned int flags = 0; |
1593 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | 1586 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; |
1594 | 1587 | ||
1588 | prefetch(fl->centries[fl->cidx].skb); | ||
1589 | if (e->DataValid) | ||
1590 | return 1; | ||
1591 | |||
1595 | do { | 1592 | do { |
1596 | flags |= e->Qsleeping; | 1593 | flags |= e->Qsleeping; |
1597 | 1594 | ||
1598 | cmdq_processed[0] += e->Cmdq0CreditReturn; | 1595 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1599 | cmdq_processed[1] += e->Cmdq1CreditReturn; | 1596 | cmdq_processed[1] += e->Cmdq1CreditReturn; |
1600 | 1597 | ||
1601 | e++; | 1598 | e++; |
1602 | if (unlikely(++q->cidx == q->size)) { | 1599 | if (unlikely(++q->cidx == q->size)) { |
1603 | q->cidx = 0; | 1600 | q->cidx = 0; |
@@ -1613,7 +1610,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) | |||
1613 | sge->stats.pure_rsps++; | 1610 | sge->stats.pure_rsps++; |
1614 | } while (e->GenerationBit == q->genbit && !e->DataValid); | 1611 | } while (e->GenerationBit == q->genbit && !e->DataValid); |
1615 | 1612 | ||
1616 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | 1613 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
1617 | sge->cmdQ[1].processed += cmdq_processed[1]; | 1614 | sge->cmdQ[1].processed += cmdq_processed[1]; |
1618 | 1615 | ||
1619 | return e->GenerationBit == q->genbit; | 1616 | return e->GenerationBit == q->genbit; |
@@ -1627,23 +1624,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) | |||
1627 | int t1_poll(struct net_device *dev, int *budget) | 1624 | int t1_poll(struct net_device *dev, int *budget) |
1628 | { | 1625 | { |
1629 | struct adapter *adapter = dev->priv; | 1626 | struct adapter *adapter = dev->priv; |
1630 | int effective_budget = min(*budget, dev->quota); | 1627 | int work_done; |
1631 | int work_done = process_responses(adapter, effective_budget); | ||
1632 | 1628 | ||
1629 | work_done = process_responses(adapter, min(*budget, dev->quota)); | ||
1633 | *budget -= work_done; | 1630 | *budget -= work_done; |
1634 | dev->quota -= work_done; | 1631 | dev->quota -= work_done; |
1635 | 1632 | ||
1636 | if (work_done >= effective_budget) | 1633 | if (unlikely(responses_pending(adapter))) |
1637 | return 1; | 1634 | return 1; |
1638 | 1635 | ||
1639 | spin_lock_irq(&adapter->async_lock); | 1636 | netif_rx_complete(dev); |
1640 | __netif_rx_complete(dev); | ||
1641 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | 1637 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
1642 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, | ||
1643 | adapter->regs + A_PL_ENABLE); | ||
1644 | spin_unlock_irq(&adapter->async_lock); | ||
1645 | 1638 | ||
1646 | return 0; | 1639 | return 0; |
1640 | |||
1647 | } | 1641 | } |
1648 | 1642 | ||
1649 | /* | 1643 | /* |
@@ -1652,44 +1646,33 @@ int t1_poll(struct net_device *dev, int *budget) | |||
1652 | irqreturn_t t1_interrupt(int irq, void *data) | 1646 | irqreturn_t t1_interrupt(int irq, void *data) |
1653 | { | 1647 | { |
1654 | struct adapter *adapter = data; | 1648 | struct adapter *adapter = data; |
1655 | struct net_device *dev = adapter->sge->netdev; | ||
1656 | struct sge *sge = adapter->sge; | 1649 | struct sge *sge = adapter->sge; |
1657 | u32 cause; | 1650 | int handled; |
1658 | int handled = 0; | ||
1659 | 1651 | ||
1660 | cause = readl(adapter->regs + A_PL_CAUSE); | 1652 | if (likely(responses_pending(adapter))) { |
1661 | if (cause == 0 || cause == ~0) | 1653 | struct net_device *dev = sge->netdev; |
1662 | return IRQ_NONE; | ||
1663 | 1654 | ||
1664 | spin_lock(&adapter->async_lock); | 1655 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
1665 | if (cause & F_PL_INTR_SGE_DATA) { | ||
1666 | struct respQ *q = &adapter->sge->respQ; | ||
1667 | struct respQ_e *e = &q->entries[q->cidx]; | ||
1668 | |||
1669 | handled = 1; | ||
1670 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | ||
1671 | |||
1672 | if (e->GenerationBit == q->genbit && | ||
1673 | __netif_rx_schedule_prep(dev)) { | ||
1674 | if (e->DataValid || process_pure_responses(adapter, e)) { | ||
1675 | /* mask off data IRQ */ | ||
1676 | writel(adapter->slow_intr_mask, | ||
1677 | adapter->regs + A_PL_ENABLE); | ||
1678 | __netif_rx_schedule(sge->netdev); | ||
1679 | goto unlock; | ||
1680 | } | ||
1681 | /* no data, no NAPI needed */ | ||
1682 | netif_poll_enable(dev); | ||
1683 | 1656 | ||
1657 | if (__netif_rx_schedule_prep(dev)) { | ||
1658 | if (process_pure_responses(adapter)) | ||
1659 | __netif_rx_schedule(dev); | ||
1660 | else { | ||
1661 | /* no data, no NAPI needed */ | ||
1662 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | ||
1663 | netif_poll_enable(dev); /* undo schedule_prep */ | ||
1664 | } | ||
1684 | } | 1665 | } |
1685 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | 1666 | return IRQ_HANDLED; |
1686 | } else | 1667 | } |
1687 | handled = t1_slow_intr_handler(adapter); | 1668 | |
1669 | spin_lock(&adapter->async_lock); | ||
1670 | handled = t1_slow_intr_handler(adapter); | ||
1671 | spin_unlock(&adapter->async_lock); | ||
1688 | 1672 | ||
1689 | if (!handled) | 1673 | if (!handled) |
1690 | sge->stats.unhandled_irqs++; | 1674 | sge->stats.unhandled_irqs++; |
1691 | unlock: | 1675 | |
1692 | spin_unlock(&adapter->async_lock); | ||
1693 | return IRQ_RETVAL(handled != 0); | 1676 | return IRQ_RETVAL(handled != 0); |
1694 | } | 1677 | } |
1695 | 1678 | ||
@@ -1712,17 +1695,13 @@ unlock: | |||
1712 | irqreturn_t t1_interrupt(int irq, void *cookie) | 1695 | irqreturn_t t1_interrupt(int irq, void *cookie) |
1713 | { | 1696 | { |
1714 | int work_done; | 1697 | int work_done; |
1715 | struct respQ_e *e; | ||
1716 | struct adapter *adapter = cookie; | 1698 | struct adapter *adapter = cookie; |
1717 | struct respQ *Q = &adapter->sge->respQ; | ||
1718 | 1699 | ||
1719 | spin_lock(&adapter->async_lock); | 1700 | spin_lock(&adapter->async_lock); |
1720 | e = &Q->entries[Q->cidx]; | ||
1721 | prefetch(e); | ||
1722 | 1701 | ||
1723 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | 1702 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
1724 | 1703 | ||
1725 | if (likely(e->GenerationBit == Q->genbit)) | 1704 | if (likely(responses_pending(adapter))) |
1726 | work_done = process_responses(adapter, -1); | 1705 | work_done = process_responses(adapter, -1); |
1727 | else | 1706 | else |
1728 | work_done = t1_slow_intr_handler(adapter); | 1707 | work_done = t1_slow_intr_handler(adapter); |
@@ -1796,7 +1775,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | |||
1796 | * through the scheduler. | 1775 | * through the scheduler. |
1797 | */ | 1776 | */ |
1798 | if (sge->tx_sched && !qid && skb->dev) { | 1777 | if (sge->tx_sched && !qid && skb->dev) { |
1799 | use_sched: | 1778 | use_sched: |
1800 | use_sched_skb = 1; | 1779 | use_sched_skb = 1; |
1801 | /* Note that the scheduler might return a different skb than | 1780 | /* Note that the scheduler might return a different skb than |
1802 | * the one passed in. | 1781 | * the one passed in. |
@@ -1900,7 +1879,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1900 | cpl = (struct cpl_tx_pkt *)hdr; | 1879 | cpl = (struct cpl_tx_pkt *)hdr; |
1901 | } else { | 1880 | } else { |
1902 | /* | 1881 | /* |
1903 | * Packets shorter than ETH_HLEN can break the MAC, drop them | 1882 | * Packets shorter than ETH_HLEN can break the MAC, drop them |
1904 | * early. Also, we may get oversized packets because some | 1883 | * early. Also, we may get oversized packets because some |
1905 | * parts of the kernel don't handle our unusual hard_header_len | 1884 | * parts of the kernel don't handle our unusual hard_header_len |
1906 | * right, drop those too. | 1885 | * right, drop those too. |
@@ -1984,9 +1963,9 @@ send: | |||
1984 | * then silently discard to avoid leak. | 1963 | * then silently discard to avoid leak. |
1985 | */ | 1964 | */ |
1986 | if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { | 1965 | if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { |
1987 | dev_kfree_skb_any(skb); | 1966 | dev_kfree_skb_any(skb); |
1988 | ret = NETDEV_TX_OK; | 1967 | ret = NETDEV_TX_OK; |
1989 | } | 1968 | } |
1990 | return ret; | 1969 | return ret; |
1991 | } | 1970 | } |
1992 | 1971 | ||
@@ -2099,31 +2078,35 @@ static void espibug_workaround_t204(unsigned long data) | |||
2099 | 2078 | ||
2100 | if (adapter->open_device_map & PORT_MASK) { | 2079 | if (adapter->open_device_map & PORT_MASK) { |
2101 | int i; | 2080 | int i; |
2102 | if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) { | 2081 | |
2082 | if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) | ||
2103 | return; | 2083 | return; |
2104 | } | 2084 | |
2105 | for (i = 0; i < nports; i++) { | 2085 | for (i = 0; i < nports; i++) { |
2106 | struct sk_buff *skb = sge->espibug_skb[i]; | 2086 | struct sk_buff *skb = sge->espibug_skb[i]; |
2107 | if ( (netif_running(adapter->port[i].dev)) && | 2087 | |
2108 | !(netif_queue_stopped(adapter->port[i].dev)) && | 2088 | if (!netif_running(adapter->port[i].dev) || |
2109 | (seop[i] && ((seop[i] & 0xfff) == 0)) && | 2089 | netif_queue_stopped(adapter->port[i].dev) || |
2110 | skb ) { | 2090 | !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) |
2111 | if (!skb->cb[0]) { | 2091 | continue; |
2112 | u8 ch_mac_addr[ETH_ALEN] = | 2092 | |
2113 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | 2093 | if (!skb->cb[0]) { |
2114 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | 2094 | u8 ch_mac_addr[ETH_ALEN] = { |
2115 | ch_mac_addr, ETH_ALEN); | 2095 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 |
2116 | memcpy(skb->data + skb->len - 10, | 2096 | }; |
2117 | ch_mac_addr, ETH_ALEN); | 2097 | |
2118 | skb->cb[0] = 0xff; | 2098 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), |
2119 | } | 2099 | ch_mac_addr, ETH_ALEN); |
2120 | 2100 | memcpy(skb->data + skb->len - 10, | |
2121 | /* bump the reference count to avoid freeing of | 2101 | ch_mac_addr, ETH_ALEN); |
2122 | * the skb once the DMA has completed. | 2102 | skb->cb[0] = 0xff; |
2123 | */ | ||
2124 | skb = skb_get(skb); | ||
2125 | t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); | ||
2126 | } | 2103 | } |
2104 | |||
2105 | /* bump the reference count to avoid freeing of | ||
2106 | * the skb once the DMA has completed. | ||
2107 | */ | ||
2108 | skb = skb_get(skb); | ||
2109 | t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); | ||
2127 | } | 2110 | } |
2128 | } | 2111 | } |
2129 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | 2112 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); |
@@ -2192,9 +2175,8 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter, | |||
2192 | if (adapter->params.nports > 1) { | 2175 | if (adapter->params.nports > 1) { |
2193 | tx_sched_init(sge); | 2176 | tx_sched_init(sge); |
2194 | sge->espibug_timer.function = espibug_workaround_t204; | 2177 | sge->espibug_timer.function = espibug_workaround_t204; |
2195 | } else { | 2178 | } else |
2196 | sge->espibug_timer.function = espibug_workaround; | 2179 | sge->espibug_timer.function = espibug_workaround; |
2197 | } | ||
2198 | sge->espibug_timer.data = (unsigned long)sge->adapter; | 2180 | sge->espibug_timer.data = (unsigned long)sge->adapter; |
2199 | 2181 | ||
2200 | sge->espibug_timeout = 1; | 2182 | sge->espibug_timeout = 1; |
@@ -2202,7 +2184,7 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter, | |||
2202 | if (adapter->params.nports > 1) | 2184 | if (adapter->params.nports > 1) |
2203 | sge->espibug_timeout = HZ/100; | 2185 | sge->espibug_timeout = HZ/100; |
2204 | } | 2186 | } |
2205 | 2187 | ||
2206 | 2188 | ||
2207 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; | 2189 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; |
2208 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; | 2190 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; |
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 22ed9a383c08..c2522cdfab37 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c | |||
@@ -223,13 +223,13 @@ static int fpga_slow_intr(adapter_t *adapter) | |||
223 | t1_sge_intr_error_handler(adapter->sge); | 223 | t1_sge_intr_error_handler(adapter->sge); |
224 | 224 | ||
225 | if (cause & FPGA_PCIX_INTERRUPT_GMAC) | 225 | if (cause & FPGA_PCIX_INTERRUPT_GMAC) |
226 | fpga_phy_intr_handler(adapter); | 226 | fpga_phy_intr_handler(adapter); |
227 | 227 | ||
228 | if (cause & FPGA_PCIX_INTERRUPT_TP) { | 228 | if (cause & FPGA_PCIX_INTERRUPT_TP) { |
229 | /* | 229 | /* |
230 | * FPGA doesn't support MC4 interrupts and it requires | 230 | * FPGA doesn't support MC4 interrupts and it requires |
231 | * this odd layer of indirection for MC5. | 231 | * this odd layer of indirection for MC5. |
232 | */ | 232 | */ |
233 | u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); | 233 | u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); |
234 | 234 | ||
235 | /* Clear TP interrupt */ | 235 | /* Clear TP interrupt */ |
@@ -262,8 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) | |||
262 | udelay(10); | 262 | udelay(10); |
263 | } while (busy && --attempts); | 263 | } while (busy && --attempts); |
264 | if (busy) | 264 | if (busy) |
265 | CH_ALERT("%s: MDIO operation timed out\n", | 265 | CH_ALERT("%s: MDIO operation timed out\n", adapter->name); |
266 | adapter->name); | ||
267 | return busy; | 266 | return busy; |
268 | } | 267 | } |
269 | 268 | ||
@@ -605,22 +604,23 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) | |||
605 | 604 | ||
606 | switch (board_info(adapter)->board) { | 605 | switch (board_info(adapter)->board) { |
607 | #ifdef CONFIG_CHELSIO_T1_1G | 606 | #ifdef CONFIG_CHELSIO_T1_1G |
608 | case CHBT_BOARD_CHT204: | 607 | case CHBT_BOARD_CHT204: |
609 | case CHBT_BOARD_CHT204E: | 608 | case CHBT_BOARD_CHT204E: |
610 | case CHBT_BOARD_CHN204: | 609 | case CHBT_BOARD_CHN204: |
611 | case CHBT_BOARD_CHT204V: { | 610 | case CHBT_BOARD_CHT204V: { |
612 | int i, port_bit; | 611 | int i, port_bit; |
613 | for_each_port(adapter, i) { | 612 | for_each_port(adapter, i) { |
614 | port_bit = i + 1; | 613 | port_bit = i + 1; |
615 | if (!(cause & (1 << port_bit))) continue; | 614 | if (!(cause & (1 << port_bit))) |
615 | continue; | ||
616 | 616 | ||
617 | phy = adapter->port[i].phy; | 617 | phy = adapter->port[i].phy; |
618 | phy_cause = phy->ops->interrupt_handler(phy); | 618 | phy_cause = phy->ops->interrupt_handler(phy); |
619 | if (phy_cause & cphy_cause_link_change) | 619 | if (phy_cause & cphy_cause_link_change) |
620 | t1_link_changed(adapter, i); | 620 | t1_link_changed(adapter, i); |
621 | } | 621 | } |
622 | break; | 622 | break; |
623 | } | 623 | } |
624 | case CHBT_BOARD_CHT101: | 624 | case CHBT_BOARD_CHT101: |
625 | if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ | 625 | if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ |
626 | phy = adapter->port[0].phy; | 626 | phy = adapter->port[0].phy; |
@@ -631,13 +631,13 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) | |||
631 | break; | 631 | break; |
632 | case CHBT_BOARD_7500: { | 632 | case CHBT_BOARD_7500: { |
633 | int p; | 633 | int p; |
634 | /* | 634 | /* |
635 | * Elmer0's interrupt cause isn't useful here because there is | 635 | * Elmer0's interrupt cause isn't useful here because there is |
636 | * only one bit that can be set for all 4 ports. This means | 636 | * only one bit that can be set for all 4 ports. This means |
637 | * we are forced to check every PHY's interrupt status | 637 | * we are forced to check every PHY's interrupt status |
638 | * register to see who initiated the interrupt. | 638 | * register to see who initiated the interrupt. |
639 | */ | 639 | */ |
640 | for_each_port(adapter, p) { | 640 | for_each_port(adapter, p) { |
641 | phy = adapter->port[p].phy; | 641 | phy = adapter->port[p].phy; |
642 | phy_cause = phy->ops->interrupt_handler(phy); | 642 | phy_cause = phy->ops->interrupt_handler(phy); |
643 | if (phy_cause & cphy_cause_link_change) | 643 | if (phy_cause & cphy_cause_link_change) |
@@ -658,7 +658,7 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) | |||
658 | break; | 658 | break; |
659 | case CHBT_BOARD_8000: | 659 | case CHBT_BOARD_8000: |
660 | case CHBT_BOARD_CHT110: | 660 | case CHBT_BOARD_CHT110: |
661 | CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", | 661 | CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", |
662 | cause); | 662 | cause); |
663 | if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ | 663 | if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ |
664 | struct cmac *mac = adapter->port[0].mac; | 664 | struct cmac *mac = adapter->port[0].mac; |
@@ -670,9 +670,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) | |||
670 | 670 | ||
671 | t1_tpi_read(adapter, | 671 | t1_tpi_read(adapter, |
672 | A_ELMER0_GPI_STAT, &mod_detect); | 672 | A_ELMER0_GPI_STAT, &mod_detect); |
673 | CH_MSG(adapter, INFO, LINK, "XPAK %s\n", | 673 | CH_MSG(adapter, INFO, LINK, "XPAK %s\n", |
674 | mod_detect ? "removed" : "inserted"); | 674 | mod_detect ? "removed" : "inserted"); |
675 | } | 675 | } |
676 | break; | 676 | break; |
677 | #ifdef CONFIG_CHELSIO_T1_COUGAR | 677 | #ifdef CONFIG_CHELSIO_T1_COUGAR |
678 | case CHBT_BOARD_COUGAR: | 678 | case CHBT_BOARD_COUGAR: |
@@ -688,7 +688,8 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) | |||
688 | 688 | ||
689 | for_each_port(adapter, i) { | 689 | for_each_port(adapter, i) { |
690 | port_bit = i ? i + 1 : 0; | 690 | port_bit = i ? i + 1 : 0; |
691 | if (!(cause & (1 << port_bit))) continue; | 691 | if (!(cause & (1 << port_bit))) |
692 | continue; | ||
692 | 693 | ||
693 | phy = adapter->port[i].phy; | 694 | phy = adapter->port[i].phy; |
694 | phy_cause = phy->ops->interrupt_handler(phy); | 695 | phy_cause = phy->ops->interrupt_handler(phy); |
@@ -755,7 +756,7 @@ void t1_interrupts_disable(adapter_t* adapter) | |||
755 | 756 | ||
756 | /* Disable PCIX & external chip interrupts. */ | 757 | /* Disable PCIX & external chip interrupts. */ |
757 | if (t1_is_asic(adapter)) | 758 | if (t1_is_asic(adapter)) |
758 | writel(0, adapter->regs + A_PL_ENABLE); | 759 | writel(0, adapter->regs + A_PL_ENABLE); |
759 | 760 | ||
760 | /* PCI-X interrupts */ | 761 | /* PCI-X interrupts */ |
761 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); | 762 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); |
@@ -830,11 +831,11 @@ int t1_slow_intr_handler(adapter_t *adapter) | |||
830 | /* Power sequencing is a work-around for Intel's XPAKs. */ | 831 | /* Power sequencing is a work-around for Intel's XPAKs. */ |
831 | static void power_sequence_xpak(adapter_t* adapter) | 832 | static void power_sequence_xpak(adapter_t* adapter) |
832 | { | 833 | { |
833 | u32 mod_detect; | 834 | u32 mod_detect; |
834 | u32 gpo; | 835 | u32 gpo; |
835 | 836 | ||
836 | /* Check for XPAK */ | 837 | /* Check for XPAK */ |
837 | t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); | 838 | t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); |
838 | if (!(ELMER0_GP_BIT5 & mod_detect)) { | 839 | if (!(ELMER0_GP_BIT5 & mod_detect)) { |
839 | /* XPAK is present */ | 840 | /* XPAK is present */ |
840 | t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); | 841 | t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); |
@@ -877,31 +878,31 @@ static int board_init(adapter_t *adapter, const struct board_info *bi) | |||
877 | case CHBT_BOARD_N210: | 878 | case CHBT_BOARD_N210: |
878 | case CHBT_BOARD_CHT210: | 879 | case CHBT_BOARD_CHT210: |
879 | case CHBT_BOARD_COUGAR: | 880 | case CHBT_BOARD_COUGAR: |
880 | t1_tpi_par(adapter, 0xf); | 881 | t1_tpi_par(adapter, 0xf); |
881 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); | 882 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); |
882 | break; | 883 | break; |
883 | case CHBT_BOARD_CHT110: | 884 | case CHBT_BOARD_CHT110: |
884 | t1_tpi_par(adapter, 0xf); | 885 | t1_tpi_par(adapter, 0xf); |
885 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); | 886 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); |
886 | 887 | ||
887 | /* TBD XXX Might not need. This fixes a problem | 888 | /* TBD XXX Might not need. This fixes a problem |
888 | * described in the Intel SR XPAK errata. | 889 | * described in the Intel SR XPAK errata. |
889 | */ | 890 | */ |
890 | power_sequence_xpak(adapter); | 891 | power_sequence_xpak(adapter); |
891 | break; | 892 | break; |
892 | #ifdef CONFIG_CHELSIO_T1_1G | 893 | #ifdef CONFIG_CHELSIO_T1_1G |
893 | case CHBT_BOARD_CHT204E: | 894 | case CHBT_BOARD_CHT204E: |
894 | /* add config space write here */ | 895 | /* add config space write here */ |
895 | case CHBT_BOARD_CHT204: | 896 | case CHBT_BOARD_CHT204: |
896 | case CHBT_BOARD_CHT204V: | 897 | case CHBT_BOARD_CHT204V: |
897 | case CHBT_BOARD_CHN204: | 898 | case CHBT_BOARD_CHN204: |
898 | t1_tpi_par(adapter, 0xf); | 899 | t1_tpi_par(adapter, 0xf); |
899 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); | 900 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); |
900 | break; | 901 | break; |
901 | case CHBT_BOARD_CHT101: | 902 | case CHBT_BOARD_CHT101: |
902 | case CHBT_BOARD_7500: | 903 | case CHBT_BOARD_7500: |
903 | t1_tpi_par(adapter, 0xf); | 904 | t1_tpi_par(adapter, 0xf); |
904 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); | 905 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); |
905 | break; | 906 | break; |
906 | #endif | 907 | #endif |
907 | } | 908 | } |
@@ -941,7 +942,7 @@ int t1_init_hw_modules(adapter_t *adapter) | |||
941 | goto out_err; | 942 | goto out_err; |
942 | 943 | ||
943 | err = 0; | 944 | err = 0; |
944 | out_err: | 945 | out_err: |
945 | return err; | 946 | return err; |
946 | } | 947 | } |
947 | 948 | ||
@@ -983,7 +984,7 @@ void t1_free_sw_modules(adapter_t *adapter) | |||
983 | if (adapter->espi) | 984 | if (adapter->espi) |
984 | t1_espi_destroy(adapter->espi); | 985 | t1_espi_destroy(adapter->espi); |
985 | #ifdef CONFIG_CHELSIO_T1_COUGAR | 986 | #ifdef CONFIG_CHELSIO_T1_COUGAR |
986 | if (adapter->cspi) | 987 | if (adapter->cspi) |
987 | t1_cspi_destroy(adapter->cspi); | 988 | t1_cspi_destroy(adapter->cspi); |
988 | #endif | 989 | #endif |
989 | } | 990 | } |
@@ -1010,7 +1011,7 @@ static void __devinit init_link_config(struct link_config *lc, | |||
1010 | CH_ERR("%s: CSPI initialization failed\n", | 1011 | CH_ERR("%s: CSPI initialization failed\n", |
1011 | adapter->name); | 1012 | adapter->name); |
1012 | goto error; | 1013 | goto error; |
1013 | } | 1014 | } |
1014 | #endif | 1015 | #endif |
1015 | 1016 | ||
1016 | /* | 1017 | /* |
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c index 0ca0b6e19e43..6222d585e447 100644 --- a/drivers/net/chelsio/tp.c +++ b/drivers/net/chelsio/tp.c | |||
@@ -17,39 +17,36 @@ struct petp { | |||
17 | static void tp_init(adapter_t * ap, const struct tp_params *p, | 17 | static void tp_init(adapter_t * ap, const struct tp_params *p, |
18 | unsigned int tp_clk) | 18 | unsigned int tp_clk) |
19 | { | 19 | { |
20 | if (t1_is_asic(ap)) { | 20 | u32 val; |
21 | u32 val; | ||
22 | |||
23 | val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | | ||
24 | F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; | ||
25 | if (!p->pm_size) | ||
26 | val |= F_OFFLOAD_DISABLE; | ||
27 | else | ||
28 | val |= F_TP_IN_ESPI_CHECK_IP_CSUM | | ||
29 | F_TP_IN_ESPI_CHECK_TCP_CSUM; | ||
30 | writel(val, ap->regs + A_TP_IN_CONFIG); | ||
31 | writel(F_TP_OUT_CSPI_CPL | | ||
32 | F_TP_OUT_ESPI_ETHERNET | | ||
33 | F_TP_OUT_ESPI_GENERATE_IP_CSUM | | ||
34 | F_TP_OUT_ESPI_GENERATE_TCP_CSUM, | ||
35 | ap->regs + A_TP_OUT_CONFIG); | ||
36 | writel(V_IP_TTL(64) | | ||
37 | F_PATH_MTU /* IP DF bit */ | | ||
38 | V_5TUPLE_LOOKUP(p->use_5tuple_mode) | | ||
39 | V_SYN_COOKIE_PARAMETER(29), | ||
40 | ap->regs + A_TP_GLOBAL_CONFIG); | ||
41 | /* | ||
42 | * Enable pause frame deadlock prevention. | ||
43 | */ | ||
44 | if (is_T2(ap) && ap->params.nports > 1) { | ||
45 | u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); | ||
46 | |||
47 | writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | | ||
48 | V_DROP_TICKS_CNT(drop_ticks) | | ||
49 | V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), | ||
50 | ap->regs + A_TP_TX_DROP_CONFIG); | ||
51 | } | ||
52 | 21 | ||
22 | if (!t1_is_asic(ap)) | ||
23 | return; | ||
24 | |||
25 | val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | | ||
26 | F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; | ||
27 | if (!p->pm_size) | ||
28 | val |= F_OFFLOAD_DISABLE; | ||
29 | else | ||
30 | val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM; | ||
31 | writel(val, ap->regs + A_TP_IN_CONFIG); | ||
32 | writel(F_TP_OUT_CSPI_CPL | | ||
33 | F_TP_OUT_ESPI_ETHERNET | | ||
34 | F_TP_OUT_ESPI_GENERATE_IP_CSUM | | ||
35 | F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG); | ||
36 | writel(V_IP_TTL(64) | | ||
37 | F_PATH_MTU /* IP DF bit */ | | ||
38 | V_5TUPLE_LOOKUP(p->use_5tuple_mode) | | ||
39 | V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG); | ||
40 | /* | ||
41 | * Enable pause frame deadlock prevention. | ||
42 | */ | ||
43 | if (is_T2(ap) && ap->params.nports > 1) { | ||
44 | u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); | ||
45 | |||
46 | writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | | ||
47 | V_DROP_TICKS_CNT(drop_ticks) | | ||
48 | V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), | ||
49 | ap->regs + A_TP_TX_DROP_CONFIG); | ||
53 | } | 50 | } |
54 | } | 51 | } |
55 | 52 | ||
@@ -61,6 +58,7 @@ void t1_tp_destroy(struct petp *tp) | |||
61 | struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) | 58 | struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) |
62 | { | 59 | { |
63 | struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); | 60 | struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); |
61 | |||
64 | if (!tp) | 62 | if (!tp) |
65 | return NULL; | 63 | return NULL; |
66 | 64 | ||
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c index 85dc3b1dc309..534ffa0f616e 100644 --- a/drivers/net/chelsio/vsc7326.c +++ b/drivers/net/chelsio/vsc7326.c | |||
@@ -226,22 +226,21 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len) | |||
226 | if (ib[i].addr == INITBLOCK_SLEEP) { | 226 | if (ib[i].addr == INITBLOCK_SLEEP) { |
227 | udelay( ib[i].data ); | 227 | udelay( ib[i].data ); |
228 | CH_ERR("sleep %d us\n",ib[i].data); | 228 | CH_ERR("sleep %d us\n",ib[i].data); |
229 | } else { | 229 | } else |
230 | vsc_write( adapter, ib[i].addr, ib[i].data ); | 230 | vsc_write( adapter, ib[i].addr, ib[i].data ); |
231 | } | ||
232 | } | 231 | } |
233 | } | 232 | } |
234 | 233 | ||
235 | static int bist_rd(adapter_t *adapter, int moduleid, int address) | 234 | static int bist_rd(adapter_t *adapter, int moduleid, int address) |
236 | { | 235 | { |
237 | int data=0; | 236 | int data = 0; |
238 | u32 result=0; | 237 | u32 result = 0; |
239 | 238 | ||
240 | if( (address != 0x0) && | 239 | if ((address != 0x0) && |
241 | (address != 0x1) && | 240 | (address != 0x1) && |
242 | (address != 0x2) && | 241 | (address != 0x2) && |
243 | (address != 0xd) && | 242 | (address != 0xd) && |
244 | (address != 0xe)) | 243 | (address != 0xe)) |
245 | CH_ERR("No bist address: 0x%x\n", address); | 244 | CH_ERR("No bist address: 0x%x\n", address); |
246 | 245 | ||
247 | data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | | 246 | data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | |
@@ -251,27 +250,27 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address) | |||
251 | udelay(10); | 250 | udelay(10); |
252 | 251 | ||
253 | vsc_read(adapter, REG_RAM_BIST_RESULT, &result); | 252 | vsc_read(adapter, REG_RAM_BIST_RESULT, &result); |
254 | if((result & (1<<9)) != 0x0) | 253 | if ((result & (1 << 9)) != 0x0) |
255 | CH_ERR("Still in bist read: 0x%x\n", result); | 254 | CH_ERR("Still in bist read: 0x%x\n", result); |
256 | else if((result & (1<<8)) != 0x0) | 255 | else if ((result & (1 << 8)) != 0x0) |
257 | CH_ERR("bist read error: 0x%x\n", result); | 256 | CH_ERR("bist read error: 0x%x\n", result); |
258 | 257 | ||
259 | return(result & 0xff); | 258 | return (result & 0xff); |
260 | } | 259 | } |
261 | 260 | ||
262 | static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) | 261 | static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) |
263 | { | 262 | { |
264 | int data=0; | 263 | int data = 0; |
265 | u32 result=0; | 264 | u32 result = 0; |
266 | 265 | ||
267 | if( (address != 0x0) && | 266 | if ((address != 0x0) && |
268 | (address != 0x1) && | 267 | (address != 0x1) && |
269 | (address != 0x2) && | 268 | (address != 0x2) && |
270 | (address != 0xd) && | 269 | (address != 0xd) && |
271 | (address != 0xe)) | 270 | (address != 0xe)) |
272 | CH_ERR("No bist address: 0x%x\n", address); | 271 | CH_ERR("No bist address: 0x%x\n", address); |
273 | 272 | ||
274 | if( value>255 ) | 273 | if (value > 255) |
275 | CH_ERR("Suspicious write out of range value: 0x%x\n", value); | 274 | CH_ERR("Suspicious write out of range value: 0x%x\n", value); |
276 | 275 | ||
277 | data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | | 276 | data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | |
@@ -281,12 +280,12 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) | |||
281 | udelay(5); | 280 | udelay(5); |
282 | 281 | ||
283 | vsc_read(adapter, REG_RAM_BIST_CMD, &result); | 282 | vsc_read(adapter, REG_RAM_BIST_CMD, &result); |
284 | if((result & (1<<27)) != 0x0) | 283 | if ((result & (1 << 27)) != 0x0) |
285 | CH_ERR("Still in bist write: 0x%x\n", result); | 284 | CH_ERR("Still in bist write: 0x%x\n", result); |
286 | else if((result & (1<<26)) != 0x0) | 285 | else if ((result & (1 << 26)) != 0x0) |
287 | CH_ERR("bist write error: 0x%x\n", result); | 286 | CH_ERR("bist write error: 0x%x\n", result); |
288 | 287 | ||
289 | return(0); | 288 | return 0; |
290 | } | 289 | } |
291 | 290 | ||
292 | static int run_bist(adapter_t *adapter, int moduleid) | 291 | static int run_bist(adapter_t *adapter, int moduleid) |
@@ -295,7 +294,7 @@ static int run_bist(adapter_t *adapter, int moduleid) | |||
295 | (void) bist_wr(adapter,moduleid, 0x00, 0x02); | 294 | (void) bist_wr(adapter,moduleid, 0x00, 0x02); |
296 | (void) bist_wr(adapter,moduleid, 0x01, 0x01); | 295 | (void) bist_wr(adapter,moduleid, 0x01, 0x01); |
297 | 296 | ||
298 | return(0); | 297 | return 0; |
299 | } | 298 | } |
300 | 299 | ||
301 | static int check_bist(adapter_t *adapter, int moduleid) | 300 | static int check_bist(adapter_t *adapter, int moduleid) |
@@ -309,27 +308,26 @@ static int check_bist(adapter_t *adapter, int moduleid) | |||
309 | if ((result & 3) != 0x3) | 308 | if ((result & 3) != 0x3) |
310 | CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", | 309 | CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", |
311 | result, moduleid, column); | 310 | result, moduleid, column); |
312 | return(0); | 311 | return 0; |
313 | } | 312 | } |
314 | 313 | ||
315 | static int enable_mem(adapter_t *adapter, int moduleid) | 314 | static int enable_mem(adapter_t *adapter, int moduleid) |
316 | { | 315 | { |
317 | /*enable mem*/ | 316 | /*enable mem*/ |
318 | (void) bist_wr(adapter,moduleid, 0x00, 0x00); | 317 | (void) bist_wr(adapter,moduleid, 0x00, 0x00); |
319 | return(0); | 318 | return 0; |
320 | } | 319 | } |
321 | 320 | ||
322 | static int run_bist_all(adapter_t *adapter) | 321 | static int run_bist_all(adapter_t *adapter) |
323 | { | 322 | { |
324 | int port=0; | 323 | int port = 0; |
325 | u32 val=0; | 324 | u32 val = 0; |
326 | 325 | ||
327 | vsc_write(adapter, REG_MEM_BIST, 0x5); | 326 | vsc_write(adapter, REG_MEM_BIST, 0x5); |
328 | vsc_read(adapter, REG_MEM_BIST, &val); | 327 | vsc_read(adapter, REG_MEM_BIST, &val); |
329 | 328 | ||
330 | for(port=0; port<12; port++){ | 329 | for (port = 0; port < 12; port++) |
331 | vsc_write(adapter, REG_DEV_SETUP(port), 0x0); | 330 | vsc_write(adapter, REG_DEV_SETUP(port), 0x0); |
332 | } | ||
333 | 331 | ||
334 | udelay(300); | 332 | udelay(300); |
335 | vsc_write(adapter, REG_SPI4_MISC, 0x00040409); | 333 | vsc_write(adapter, REG_SPI4_MISC, 0x00040409); |
@@ -352,13 +350,13 @@ static int run_bist_all(adapter_t *adapter) | |||
352 | udelay(300); | 350 | udelay(300); |
353 | vsc_write(adapter, REG_SPI4_MISC, 0x60040400); | 351 | vsc_write(adapter, REG_SPI4_MISC, 0x60040400); |
354 | udelay(300); | 352 | udelay(300); |
355 | for(port=0; port<12; port++){ | 353 | for (port = 0; port < 12; port++) |
356 | vsc_write(adapter, REG_DEV_SETUP(port), 0x1); | 354 | vsc_write(adapter, REG_DEV_SETUP(port), 0x1); |
357 | } | 355 | |
358 | udelay(300); | 356 | udelay(300); |
359 | vsc_write(adapter, REG_MEM_BIST, 0x0); | 357 | vsc_write(adapter, REG_MEM_BIST, 0x0); |
360 | mdelay(10); | 358 | mdelay(10); |
361 | return(0); | 359 | return 0; |
362 | } | 360 | } |
363 | 361 | ||
364 | static int mac_intr_handler(struct cmac *mac) | 362 | static int mac_intr_handler(struct cmac *mac) |
@@ -591,40 +589,46 @@ static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat) | |||
591 | 589 | ||
592 | static void port_stats_update(struct cmac *mac) | 590 | static void port_stats_update(struct cmac *mac) |
593 | { | 591 | { |
594 | int port = mac->instance->index; | 592 | struct { |
593 | unsigned int reg; | ||
594 | unsigned int offset; | ||
595 | } hw_stats[] = { | ||
596 | |||
597 | #define HW_STAT(reg, stat_name) \ | ||
598 | { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } | ||
599 | |||
600 | /* Rx stats */ | ||
601 | HW_STAT(RxUnicast, RxUnicastFramesOK), | ||
602 | HW_STAT(RxMulticast, RxMulticastFramesOK), | ||
603 | HW_STAT(RxBroadcast, RxBroadcastFramesOK), | ||
604 | HW_STAT(Crc, RxFCSErrors), | ||
605 | HW_STAT(RxAlignment, RxAlignErrors), | ||
606 | HW_STAT(RxOversize, RxFrameTooLongErrors), | ||
607 | HW_STAT(RxPause, RxPauseFrames), | ||
608 | HW_STAT(RxJabbers, RxJabberErrors), | ||
609 | HW_STAT(RxFragments, RxRuntErrors), | ||
610 | HW_STAT(RxUndersize, RxRuntErrors), | ||
611 | HW_STAT(RxSymbolCarrier, RxSymbolErrors), | ||
612 | HW_STAT(RxSize1519ToMax, RxJumboFramesOK), | ||
613 | |||
614 | /* Tx stats (skip collision stats as we are full-duplex only) */ | ||
615 | HW_STAT(TxUnicast, TxUnicastFramesOK), | ||
616 | HW_STAT(TxMulticast, TxMulticastFramesOK), | ||
617 | HW_STAT(TxBroadcast, TxBroadcastFramesOK), | ||
618 | HW_STAT(TxPause, TxPauseFrames), | ||
619 | HW_STAT(TxUnderrun, TxUnderrun), | ||
620 | HW_STAT(TxSize1519ToMax, TxJumboFramesOK), | ||
621 | }, *p = hw_stats; | ||
622 | unsigned int port = mac->instance->index; | ||
623 | u64 *stats = (u64 *)&mac->stats; | ||
624 | unsigned int i; | ||
625 | |||
626 | for (i = 0; i < ARRAY_SIZE(hw_stats); i++) | ||
627 | rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset); | ||
595 | 628 | ||
596 | /* Rx stats */ | 629 | rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK); |
597 | rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); | 630 | rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); |
598 | rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); | 631 | rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); |
599 | rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK); | ||
600 | rmon_update(mac, REG_RX_MULTICAST(port), | ||
601 | &mac->stats.RxMulticastFramesOK); | ||
602 | rmon_update(mac, REG_RX_BROADCAST(port), | ||
603 | &mac->stats.RxBroadcastFramesOK); | ||
604 | rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors); | ||
605 | rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors); | ||
606 | rmon_update(mac, REG_RX_OVERSIZE(port), | ||
607 | &mac->stats.RxFrameTooLongErrors); | ||
608 | rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames); | ||
609 | rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors); | ||
610 | rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors); | ||
611 | rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors); | ||
612 | rmon_update(mac, REG_RX_SYMBOL_CARRIER(port), | ||
613 | &mac->stats.RxSymbolErrors); | ||
614 | rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port), | ||
615 | &mac->stats.RxJumboFramesOK); | ||
616 | |||
617 | /* Tx stats (skip collision stats as we are full-duplex only) */ | ||
618 | rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK); | ||
619 | rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK); | ||
620 | rmon_update(mac, REG_TX_MULTICAST(port), | ||
621 | &mac->stats.TxMulticastFramesOK); | ||
622 | rmon_update(mac, REG_TX_BROADCAST(port), | ||
623 | &mac->stats.TxBroadcastFramesOK); | ||
624 | rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames); | ||
625 | rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun); | ||
626 | rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port), | ||
627 | &mac->stats.TxJumboFramesOK); | ||
628 | } | 632 | } |
629 | 633 | ||
630 | /* | 634 | /* |
@@ -686,7 +690,8 @@ static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index) | |||
686 | int i; | 690 | int i; |
687 | 691 | ||
688 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); | 692 | mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); |
689 | if (!mac) return NULL; | 693 | if (!mac) |
694 | return NULL; | ||
690 | 695 | ||
691 | mac->ops = &vsc7326_ops; | 696 | mac->ops = &vsc7326_ops; |
692 | mac->instance = (cmac_instance *)(mac + 1); | 697 | mac->instance = (cmac_instance *)(mac + 1); |
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h index 491bcf75c4fb..479edbcabe68 100644 --- a/drivers/net/chelsio/vsc7326_reg.h +++ b/drivers/net/chelsio/vsc7326_reg.h | |||
@@ -192,73 +192,84 @@ | |||
192 | #define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ | 192 | #define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ |
193 | 193 | ||
194 | /* Statistics */ | 194 | /* Statistics */ |
195 | /* CRA(0x4,pn,reg) */ | ||
196 | /* reg below */ | ||
195 | /* pn = port number, 0-a, a = 10GbE */ | 197 | /* pn = port number, 0-a, a = 10GbE */ |
196 | #define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */ | ||
197 | #define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */ | ||
198 | #define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */ | ||
199 | #define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */ | ||
200 | #define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */ | ||
201 | #define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */ | ||
202 | #define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */ | ||
203 | #define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */ | ||
204 | #define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */ | ||
205 | #define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */ | ||
206 | #define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */ | ||
207 | #define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */ | ||
208 | #define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */ | ||
209 | #define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */ | ||
210 | #define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */ | ||
211 | #define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */ | ||
212 | #define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */ | ||
213 | #define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */ | ||
214 | #define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */ | ||
215 | #define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */ | ||
216 | #define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */ | ||
217 | #define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */ | ||
218 | #define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */ | ||
219 | #define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */ | ||
220 | 198 | ||
221 | #define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */ | 199 | enum { |
222 | #define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */ | 200 | RxInBytes = 0x00, // # Rx in octets |
223 | #define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */ | 201 | RxSymbolCarrier = 0x01, // Frames w/ symbol errors |
224 | #define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */ | 202 | RxPause = 0x02, // # pause frames received |
225 | #define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */ | 203 | RxUnsupOpcode = 0x03, // # control frames with unsupported opcode |
226 | #define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */ | 204 | RxOkBytes = 0x04, // # octets in good frames |
227 | #define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */ | 205 | RxBadBytes = 0x05, // # octets in bad frames |
228 | #define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */ | 206 | RxUnicast = 0x06, // # good unicast frames |
229 | #define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */ | 207 | RxMulticast = 0x07, // # good multicast frames |
230 | #define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */ | 208 | RxBroadcast = 0x08, // # good broadcast frames |
231 | #define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */ | 209 | Crc = 0x09, // # frames w/ bad CRC only |
232 | #define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */ | 210 | RxAlignment = 0x0a, // # frames w/ alignment err |
233 | #define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */ | 211 | RxUndersize = 0x0b, // # frames undersize |
234 | #define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */ | 212 | RxFragments = 0x0c, // # frames undersize w/ crc err |
235 | #define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */ | 213 | RxInRangeLengthError = 0x0d, // # frames with length error |
236 | #define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */ | 214 | RxOutOfRangeError = 0x0e, // # frames with illegal length field |
237 | #define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */ | 215 | RxOversize = 0x0f, // # frames oversize |
238 | #define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */ | 216 | RxJabbers = 0x10, // # frames oversize w/ crc err |
239 | #define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */ | 217 | RxSize64 = 0x11, // # frames 64 octets long |
240 | #define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */ | 218 | RxSize65To127 = 0x12, // # frames 65-127 octets |
241 | #define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */ | 219 | RxSize128To255 = 0x13, // # frames 128-255 |
242 | #define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */ | 220 | RxSize256To511 = 0x14, // # frames 256-511 |
243 | #define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */ | 221 | RxSize512To1023 = 0x15, // # frames 512-1023 |
244 | #define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */ | 222 | RxSize1024To1518 = 0x16, // # frames 1024-1518 |
245 | #define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */ | 223 | RxSize1519ToMax = 0x17, // # frames 1519-max |
246 | #define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */ | ||
247 | #define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */ | ||
248 | #define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */ | ||
249 | #define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */ | ||
250 | #define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */ | ||
251 | #define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */ | ||
252 | #define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */ | ||
253 | #define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */ | ||
254 | #define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */ | ||
255 | #define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */ | ||
256 | #define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */ | ||
257 | #define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */ | ||
258 | 224 | ||
259 | #define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */ | 225 | TxOutBytes = 0x18, // # octets tx |
260 | #define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */ | 226 | TxPause = 0x19, // # pause frames sent |
261 | #define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */ | 227 | TxOkBytes = 0x1a, // # octets tx OK |
228 | TxUnicast = 0x1b, // # frames unicast | ||
229 | TxMulticast = 0x1c, // # frames multicast | ||
230 | TxBroadcast = 0x1d, // # frames broadcast | ||
231 | TxMultipleColl = 0x1e, // # frames tx after multiple collisions | ||
232 | TxLateColl = 0x1f, // # late collisions detected | ||
233 | TxXcoll = 0x20, // # frames lost, excessive collisions | ||
234 | TxDefer = 0x21, // # frames deferred on first tx attempt | ||
235 | TxXdefer = 0x22, // # frames excessively deferred | ||
236 | TxCsense = 0x23, // carrier sense errors at frame end | ||
237 | TxSize64 = 0x24, // # frames 64 octets long | ||
238 | TxSize65To127 = 0x25, // # frames 65-127 octets | ||
239 | TxSize128To255 = 0x26, // # frames 128-255 | ||
240 | TxSize256To511 = 0x27, // # frames 256-511 | ||
241 | TxSize512To1023 = 0x28, // # frames 512-1023 | ||
242 | TxSize1024To1518 = 0x29, // # frames 1024-1518 | ||
243 | TxSize1519ToMax = 0x2a, // # frames 1519-max | ||
244 | TxSingleColl = 0x2b, // # frames tx after single collision | ||
245 | TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions | ||
246 | TxBackoff3 = 0x2d, // after 3 backoffs/collisions | ||
247 | TxBackoff4 = 0x2e, // after 4 | ||
248 | TxBackoff5 = 0x2f, // after 5 | ||
249 | TxBackoff6 = 0x30, // after 6 | ||
250 | TxBackoff7 = 0x31, // after 7 | ||
251 | TxBackoff8 = 0x32, // after 8 | ||
252 | TxBackoff9 = 0x33, // after 9 | ||
253 | TxBackoff10 = 0x34, // after 10 | ||
254 | TxBackoff11 = 0x35, // after 11 | ||
255 | TxBackoff12 = 0x36, // after 12 | ||
256 | TxBackoff13 = 0x37, // after 13 | ||
257 | TxBackoff14 = 0x38, // after 14 | ||
258 | TxBackoff15 = 0x39, // after 15 | ||
259 | TxUnderrun = 0x3a, // # frames dropped from underrun | ||
260 | // Hole. See REG_RX_XGMII_PROT_ERR below. | ||
261 | RxIpgShrink = 0x3c, // # of IPG shrinks detected | ||
262 | // Duplicate. See REG_STAT_STICKY10G below. | ||
263 | StatSticky1G = 0x3e, // tri-speed sticky bits | ||
264 | StatInit = 0x3f // Clear all statistics | ||
265 | }; | ||
266 | |||
267 | #define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */ | ||
268 | #define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */ | ||
269 | |||
270 | #define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes) | ||
271 | #define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes) | ||
272 | #define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes) | ||
262 | 273 | ||
263 | /* MII-Management Block registers */ | 274 | /* MII-Management Block registers */ |
264 | /* These are for MII-M interface 0, which is the bidirectional LVTTL one. If | 275 | /* These are for MII-M interface 0, which is the bidirectional LVTTL one. If |
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c index c493e783d459..251d4859c91d 100644 --- a/drivers/net/chelsio/vsc8244.c +++ b/drivers/net/chelsio/vsc8244.c | |||
@@ -54,7 +54,7 @@ enum { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ | 56 | #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ |
57 | VSC_INTR_NEG_DONE) | 57 | VSC_INTR_NEG_DONE) |
58 | #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ | 58 | #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ |
59 | VSC_INTR_ENABLE) | 59 | VSC_INTR_ENABLE) |
60 | 60 | ||
@@ -94,19 +94,18 @@ static int vsc8244_intr_enable(struct cphy *cphy) | |||
94 | { | 94 | { |
95 | simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK); | 95 | simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK); |
96 | 96 | ||
97 | /* Enable interrupts through Elmer */ | 97 | /* Enable interrupts through Elmer */ |
98 | if (t1_is_asic(cphy->adapter)) { | 98 | if (t1_is_asic(cphy->adapter)) { |
99 | u32 elmer; | 99 | u32 elmer; |
100 | 100 | ||
101 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | 101 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); |
102 | elmer |= ELMER0_GP_BIT1; | 102 | elmer |= ELMER0_GP_BIT1; |
103 | if (is_T2(cphy->adapter)) { | 103 | if (is_T2(cphy->adapter)) |
104 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | 104 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; |
105 | } | ||
106 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | 105 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); |
107 | } | 106 | } |
108 | 107 | ||
109 | return 0; | 108 | return 0; |
110 | } | 109 | } |
111 | 110 | ||
112 | static int vsc8244_intr_disable(struct cphy *cphy) | 111 | static int vsc8244_intr_disable(struct cphy *cphy) |
@@ -118,19 +117,18 @@ static int vsc8244_intr_disable(struct cphy *cphy) | |||
118 | 117 | ||
119 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | 118 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); |
120 | elmer &= ~ELMER0_GP_BIT1; | 119 | elmer &= ~ELMER0_GP_BIT1; |
121 | if (is_T2(cphy->adapter)) { | 120 | if (is_T2(cphy->adapter)) |
122 | elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); | 121 | elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); |
123 | } | ||
124 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | 122 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); |
125 | } | 123 | } |
126 | 124 | ||
127 | return 0; | 125 | return 0; |
128 | } | 126 | } |
129 | 127 | ||
130 | static int vsc8244_intr_clear(struct cphy *cphy) | 128 | static int vsc8244_intr_clear(struct cphy *cphy) |
131 | { | 129 | { |
132 | u32 val; | 130 | u32 val; |
133 | u32 elmer; | 131 | u32 elmer; |
134 | 132 | ||
135 | /* Clear PHY interrupts by reading the register. */ | 133 | /* Clear PHY interrupts by reading the register. */ |
136 | simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val); | 134 | simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val); |
@@ -138,13 +136,12 @@ static int vsc8244_intr_clear(struct cphy *cphy) | |||
138 | if (t1_is_asic(cphy->adapter)) { | 136 | if (t1_is_asic(cphy->adapter)) { |
139 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); | 137 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); |
140 | elmer |= ELMER0_GP_BIT1; | 138 | elmer |= ELMER0_GP_BIT1; |
141 | if (is_T2(cphy->adapter)) { | 139 | if (is_T2(cphy->adapter)) |
142 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; | 140 | elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; |
143 | } | ||
144 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | 141 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); |
145 | } | 142 | } |
146 | 143 | ||
147 | return 0; | 144 | return 0; |
148 | } | 145 | } |
149 | 146 | ||
150 | /* | 147 | /* |
@@ -179,13 +176,13 @@ static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex) | |||
179 | 176 | ||
180 | int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits) | 177 | int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits) |
181 | { | 178 | { |
182 | int ret; | 179 | int ret; |
183 | unsigned int val; | 180 | unsigned int val; |
184 | 181 | ||
185 | ret = mdio_read(phy, mmd, reg, &val); | 182 | ret = mdio_read(phy, mmd, reg, &val); |
186 | if (!ret) | 183 | if (!ret) |
187 | ret = mdio_write(phy, mmd, reg, val | bits); | 184 | ret = mdio_write(phy, mmd, reg, val | bits); |
188 | return ret; | 185 | return ret; |
189 | } | 186 | } |
190 | 187 | ||
191 | static int vsc8244_autoneg_enable(struct cphy *cphy) | 188 | static int vsc8244_autoneg_enable(struct cphy *cphy) |
@@ -235,7 +232,7 @@ static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map) | |||
235 | } | 232 | } |
236 | 233 | ||
237 | static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok, | 234 | static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok, |
238 | int *speed, int *duplex, int *fc) | 235 | int *speed, int *duplex, int *fc) |
239 | { | 236 | { |
240 | unsigned int bmcr, status, lpa, adv; | 237 | unsigned int bmcr, status, lpa, adv; |
241 | int err, sp = -1, dplx = -1, pause = 0; | 238 | int err, sp = -1, dplx = -1, pause = 0; |
@@ -343,11 +340,13 @@ static struct cphy_ops vsc8244_ops = { | |||
343 | .get_link_status = vsc8244_get_link_status | 340 | .get_link_status = vsc8244_get_link_status |
344 | }; | 341 | }; |
345 | 342 | ||
346 | static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops) | 343 | static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, |
344 | struct mdio_ops *mdio_ops) | ||
347 | { | 345 | { |
348 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); | 346 | struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); |
349 | 347 | ||
350 | if (!cphy) return NULL; | 348 | if (!cphy) |
349 | return NULL; | ||
351 | 350 | ||
352 | cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops); | 351 | cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops); |
353 | 352 | ||