aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/benet/be.h11
-rw-r--r--drivers/net/benet/be_cmds.c116
-rw-r--r--drivers/net/benet/be_cmds.h53
-rw-r--r--drivers/net/benet/be_ethtool.c37
-rw-r--r--drivers/net/benet/be_hw.h47
-rw-r--r--drivers/net/benet/be_main.c12
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/sfc/efx.c49
-rw-r--r--drivers/net/sfc/efx.h15
-rw-r--r--drivers/net/sfc/filter.c117
-rw-r--r--drivers/net/sfc/net_driver.h3
-rw-r--r--drivers/net/sh_eth.c208
12 files changed, 627 insertions, 43 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 3a800e2bc94b..ed709a5d07d7 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -225,6 +225,10 @@ struct be_rx_obj {
225 u32 cache_line_barrier[15]; 225 u32 cache_line_barrier[15];
226}; 226};
227 227
228struct be_drv_stats {
229 u8 be_on_die_temperature;
230};
231
228struct be_vf_cfg { 232struct be_vf_cfg {
229 unsigned char vf_mac_addr[ETH_ALEN]; 233 unsigned char vf_mac_addr[ETH_ALEN];
230 u32 vf_if_handle; 234 u32 vf_if_handle;
@@ -234,6 +238,7 @@ struct be_vf_cfg {
234}; 238};
235 239
236#define BE_INVALID_PMAC_ID 0xffffffff 240#define BE_INVALID_PMAC_ID 0xffffffff
241
237struct be_adapter { 242struct be_adapter {
238 struct pci_dev *pdev; 243 struct pci_dev *pdev;
239 struct net_device *netdev; 244 struct net_device *netdev;
@@ -269,6 +274,7 @@ struct be_adapter {
269 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 274 u32 big_page_size; /* Compounded page size shared by rx wrbs */
270 275
271 u8 msix_vec_next_idx; 276 u8 msix_vec_next_idx;
277 struct be_drv_stats drv_stats;
272 278
273 struct vlan_group *vlan_grp; 279 struct vlan_group *vlan_grp;
274 u16 vlans_added; 280 u16 vlans_added;
@@ -281,6 +287,7 @@ struct be_adapter {
281 struct be_dma_mem stats_cmd; 287 struct be_dma_mem stats_cmd;
282 /* Work queue used to perform periodic tasks like getting statistics */ 288 /* Work queue used to perform periodic tasks like getting statistics */
283 struct delayed_work work; 289 struct delayed_work work;
290 u16 work_counter;
284 291
285 /* Ethtool knobs and info */ 292 /* Ethtool knobs and info */
286 bool rx_csum; /* BE card must perform rx-checksumming */ 293 bool rx_csum; /* BE card must perform rx-checksumming */
@@ -298,7 +305,7 @@ struct be_adapter {
298 u32 rx_fc; /* Rx flow control */ 305 u32 rx_fc; /* Rx flow control */
299 u32 tx_fc; /* Tx flow control */ 306 u32 tx_fc; /* Tx flow control */
300 bool ue_detected; 307 bool ue_detected;
301 bool stats_ioctl_sent; 308 bool stats_cmd_sent;
302 int link_speed; 309 int link_speed;
303 u8 port_type; 310 u8 port_type;
304 u8 transceiver; 311 u8 transceiver;
@@ -311,6 +318,8 @@ struct be_adapter {
311 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 318 struct be_vf_cfg vf_cfg[BE_MAX_VF];
312 u8 is_virtfn; 319 u8 is_virtfn;
313 u32 sli_family; 320 u32 sli_family;
321 u8 hba_port_num;
322 u16 pvid;
314}; 323};
315 324
316#define be_physfn(adapter) (!adapter->is_virtfn) 325#define be_physfn(adapter) (!adapter->is_virtfn)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 619ebc24602e..1822ecdadc7e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -18,6 +18,9 @@
18#include "be.h" 18#include "be.h"
19#include "be_cmds.h" 19#include "be_cmds.h"
20 20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
21static void be_mcc_notify(struct be_adapter *adapter) 24static void be_mcc_notify(struct be_adapter *adapter)
22{ 25{
23 struct be_queue_info *mccq = &adapter->mcc_obj.q; 26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
@@ -81,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
81 be_dws_le_to_cpu(&resp->hw_stats, 84 be_dws_le_to_cpu(&resp->hw_stats,
82 sizeof(resp->hw_stats)); 85 sizeof(resp->hw_stats));
83 netdev_stats_update(adapter); 86 netdev_stats_update(adapter);
84 adapter->stats_ioctl_sent = false; 87 adapter->stats_cmd_sent = false;
85 } 88 }
86 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && 89 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
87 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { 90 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -124,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
124 } 127 }
125} 128}
126 129
130/*Grp5 PVID evt*/
131static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
132 struct be_async_event_grp5_pvid_state *evt)
133{
134 if (evt->enabled)
135 adapter->pvid = evt->tag;
136 else
137 adapter->pvid = 0;
138}
139
127static void be_async_grp5_evt_process(struct be_adapter *adapter, 140static void be_async_grp5_evt_process(struct be_adapter *adapter,
128 u32 trailer, struct be_mcc_compl *evt) 141 u32 trailer, struct be_mcc_compl *evt)
129{ 142{
@@ -141,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
141 be_async_grp5_qos_speed_process(adapter, 154 be_async_grp5_qos_speed_process(adapter,
142 (struct be_async_event_grp5_qos_link_speed *)evt); 155 (struct be_async_event_grp5_qos_link_speed *)evt);
143 break; 156 break;
157 case ASYNC_EVENT_PVID_STATE:
158 be_async_grp5_pvid_state_process(adapter,
159 (struct be_async_event_grp5_pvid_state *)evt);
160 break;
144 default: 161 default:
145 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 162 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
146 break; 163 break;
@@ -1055,6 +1072,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1055 struct be_sge *sge; 1072 struct be_sge *sge;
1056 int status = 0; 1073 int status = 0;
1057 1074
1075 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1076 be_cmd_get_die_temperature(adapter);
1077
1058 spin_lock_bh(&adapter->mcc_lock); 1078 spin_lock_bh(&adapter->mcc_lock);
1059 1079
1060 wrb = wrb_from_mccq(adapter); 1080 wrb = wrb_from_mccq(adapter);
@@ -1075,7 +1095,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1075 sge->len = cpu_to_le32(nonemb_cmd->size); 1095 sge->len = cpu_to_le32(nonemb_cmd->size);
1076 1096
1077 be_mcc_notify(adapter); 1097 be_mcc_notify(adapter);
1078 adapter->stats_ioctl_sent = true; 1098 adapter->stats_cmd_sent = true;
1079 1099
1080err: 1100err:
1081 spin_unlock_bh(&adapter->mcc_lock); 1101 spin_unlock_bh(&adapter->mcc_lock);
@@ -1122,6 +1142,44 @@ err:
1122 return status; 1142 return status;
1123} 1143}
1124 1144
1145/* Uses synchronous mcc */
1146int be_cmd_get_die_temperature(struct be_adapter *adapter)
1147{
1148 struct be_mcc_wrb *wrb;
1149 struct be_cmd_req_get_cntl_addnl_attribs *req;
1150 int status;
1151
1152 spin_lock_bh(&adapter->mcc_lock);
1153
1154 wrb = wrb_from_mccq(adapter);
1155 if (!wrb) {
1156 status = -EBUSY;
1157 goto err;
1158 }
1159 req = embedded_payload(wrb);
1160
1161 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1162 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1163
1164 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1165 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1166
1167 status = be_mcc_notify_wait(adapter);
1168 if (!status) {
1169 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1170 embedded_payload(wrb);
1171 adapter->drv_stats.be_on_die_temperature =
1172 resp->on_die_temperature;
1173 }
1174 /* If IOCTL fails once, do not bother issuing it again */
1175 else
1176 be_get_temp_freq = 0;
1177
1178err:
1179 spin_unlock_bh(&adapter->mcc_lock);
1180 return status;
1181}
1182
1125/* Uses Mbox */ 1183/* Uses Mbox */
1126int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1184int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1127{ 1185{
@@ -1896,3 +1954,57 @@ err:
1896 spin_unlock_bh(&adapter->mcc_lock); 1954 spin_unlock_bh(&adapter->mcc_lock);
1897 return status; 1955 return status;
1898} 1956}
1957
1958int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1959{
1960 struct be_mcc_wrb *wrb;
1961 struct be_cmd_req_cntl_attribs *req;
1962 struct be_cmd_resp_cntl_attribs *resp;
1963 struct be_sge *sge;
1964 int status;
1965 int payload_len = max(sizeof(*req), sizeof(*resp));
1966 struct mgmt_controller_attrib *attribs;
1967 struct be_dma_mem attribs_cmd;
1968
1969 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1970 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1971 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1972 &attribs_cmd.dma);
1973 if (!attribs_cmd.va) {
1974 dev_err(&adapter->pdev->dev,
1975 "Memory allocation failure\n");
1976 return -ENOMEM;
1977 }
1978
1979 if (mutex_lock_interruptible(&adapter->mbox_lock))
1980 return -1;
1981
1982 wrb = wrb_from_mbox(adapter);
1983 if (!wrb) {
1984 status = -EBUSY;
1985 goto err;
1986 }
1987 req = attribs_cmd.va;
1988 sge = nonembedded_sgl(wrb);
1989
1990 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1991 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
1992 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1993 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
1994 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
1995 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
1996 sge->len = cpu_to_le32(attribs_cmd.size);
1997
1998 status = be_mbox_notify_wait(adapter);
1999 if (!status) {
2000 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2001 sizeof(struct be_cmd_resp_hdr));
2002 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2003 }
2004
2005err:
2006 mutex_unlock(&adapter->mbox_lock);
2007 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2008 attribs_cmd.dma);
2009 return status;
2010}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 91c5d2b09aa1..93e5768fc705 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -88,6 +88,7 @@ struct be_mcc_compl {
88#define ASYNC_EVENT_CODE_GRP_5 0x5 88#define ASYNC_EVENT_CODE_GRP_5 0x5
89#define ASYNC_EVENT_QOS_SPEED 0x1 89#define ASYNC_EVENT_QOS_SPEED 0x1
90#define ASYNC_EVENT_COS_PRIORITY 0x2 90#define ASYNC_EVENT_COS_PRIORITY 0x2
91#define ASYNC_EVENT_PVID_STATE 0x3
91struct be_async_event_trailer { 92struct be_async_event_trailer {
92 u32 code; 93 u32 code;
93}; 94};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
134 struct be_async_event_trailer trailer; 135 struct be_async_event_trailer trailer;
135} __packed; 136} __packed;
136 137
138/* When the event code of an async trailer is GRP5 and event type is
139 * PVID state, the mcc_compl must be interpreted as follows
140 */
141struct be_async_event_grp5_pvid_state {
142 u8 enabled;
143 u8 rsvd0;
144 u16 tag;
145 u32 event_tag;
146 u32 rsvd1;
147 struct be_async_event_trailer trailer;
148} __packed;
149
137struct be_mcc_mailbox { 150struct be_mcc_mailbox {
138 struct be_mcc_wrb wrb; 151 struct be_mcc_wrb wrb;
139 struct be_mcc_compl compl; 152 struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
156#define OPCODE_COMMON_SET_QOS 28 169#define OPCODE_COMMON_SET_QOS 28
157#define OPCODE_COMMON_MCC_CREATE_EXT 90 170#define OPCODE_COMMON_MCC_CREATE_EXT 90
158#define OPCODE_COMMON_SEEPROM_READ 30 171#define OPCODE_COMMON_SEEPROM_READ 30
172#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
159#define OPCODE_COMMON_NTWK_RX_FILTER 34 173#define OPCODE_COMMON_NTWK_RX_FILTER 34
160#define OPCODE_COMMON_GET_FW_VERSION 35 174#define OPCODE_COMMON_GET_FW_VERSION 35
161#define OPCODE_COMMON_SET_FLOW_CONTROL 36 175#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,7 @@ struct be_mcc_mailbox {
176#define OPCODE_COMMON_GET_BEACON_STATE 70 190#define OPCODE_COMMON_GET_BEACON_STATE 70
177#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
178#define OPCODE_COMMON_GET_PHY_DETAILS 102 192#define OPCODE_COMMON_GET_PHY_DETAILS 102
193#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
179 194
180#define OPCODE_ETH_RSS_CONFIG 1 195#define OPCODE_ETH_RSS_CONFIG 1
181#define OPCODE_ETH_ACPI_CONFIG 2 196#define OPCODE_ETH_ACPI_CONFIG 2
@@ -619,7 +634,10 @@ struct be_rxf_stats {
619 u32 rx_drops_invalid_ring; /* dword 145*/ 634 u32 rx_drops_invalid_ring; /* dword 145*/
620 u32 forwarded_packets; /* dword 146*/ 635 u32 forwarded_packets; /* dword 146*/
621 u32 rx_drops_mtu; /* dword 147*/ 636 u32 rx_drops_mtu; /* dword 147*/
622 u32 rsvd0[15]; 637 u32 rsvd0[7];
638 u32 port0_jabber_events;
639 u32 port1_jabber_events;
640 u32 rsvd1[6];
623}; 641};
624 642
625struct be_erx_stats { 643struct be_erx_stats {
@@ -630,11 +648,16 @@ struct be_erx_stats {
630 u32 debug_pmem_pbuf_dealloc; /* dword 47*/ 648 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
631}; 649};
632 650
651struct be_pmem_stats {
652 u32 eth_red_drops;
653 u32 rsvd[4];
654};
655
633struct be_hw_stats { 656struct be_hw_stats {
634 struct be_rxf_stats rxf; 657 struct be_rxf_stats rxf;
635 u32 rsvd[48]; 658 u32 rsvd[48];
636 struct be_erx_stats erx; 659 struct be_erx_stats erx;
637 u32 rsvd1[6]; 660 struct be_pmem_stats pmem;
638}; 661};
639 662
640struct be_cmd_req_get_stats { 663struct be_cmd_req_get_stats {
@@ -647,6 +670,20 @@ struct be_cmd_resp_get_stats {
647 struct be_hw_stats hw_stats; 670 struct be_hw_stats hw_stats;
648}; 671};
649 672
673struct be_cmd_req_get_cntl_addnl_attribs {
674 struct be_cmd_req_hdr hdr;
675 u8 rsvd[8];
676};
677
678struct be_cmd_resp_get_cntl_addnl_attribs {
679 struct be_cmd_resp_hdr hdr;
680 u16 ipl_file_number;
681 u8 ipl_file_version;
682 u8 rsvd0;
683 u8 on_die_temperature; /* in degrees centigrade*/
684 u8 rsvd1[3];
685};
686
650struct be_cmd_req_vlan_config { 687struct be_cmd_req_vlan_config {
651 struct be_cmd_req_hdr hdr; 688 struct be_cmd_req_hdr hdr;
652 u8 interface_id; 689 u8 interface_id;
@@ -994,6 +1031,16 @@ struct be_cmd_resp_set_qos {
994 u32 rsvd; 1031 u32 rsvd;
995}; 1032};
996 1033
1034/*********************** Controller Attributes ***********************/
1035struct be_cmd_req_cntl_attribs {
1036 struct be_cmd_req_hdr hdr;
1037};
1038
1039struct be_cmd_resp_cntl_attribs {
1040 struct be_cmd_resp_hdr hdr;
1041 struct mgmt_controller_attrib attribs;
1042};
1043
997extern int be_pci_fnum_get(struct be_adapter *adapter); 1044extern int be_pci_fnum_get(struct be_adapter *adapter);
998extern int be_cmd_POST(struct be_adapter *adapter); 1045extern int be_cmd_POST(struct be_adapter *adapter);
999extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1046extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1078,4 +1125,6 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1078 struct be_dma_mem *cmd); 1125 struct be_dma_mem *cmd);
1079extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1126extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1080extern void be_detect_dump_ue(struct be_adapter *adapter); 1127extern void be_detect_dump_ue(struct be_adapter *adapter);
1128extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1129extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1081 1130
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 07b4ab902b17..6e5e43380c2a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 PMEMSTAT, DRVSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 32 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 33#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
43 field) 44 field)
44#define ERXSTAT_INFO(field) #field, ERXSTAT,\ 45#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45 FIELDINFO(struct be_erx_stats, field) 46 FIELDINFO(struct be_erx_stats, field)
47#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
48 FIELDINFO(struct be_pmem_stats, field)
49#define DRVSTAT_INFO(field) #field, DRVSTAT,\
50 FIELDINFO(struct be_drv_stats, \
51 field)
46 52
47static const struct be_ethtool_stat et_stats[] = { 53static const struct be_ethtool_stat et_stats[] = {
48 {NETSTAT_INFO(rx_packets)}, 54 {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
99 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 105 {MISCSTAT_INFO(rx_drops_too_many_frags)},
100 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 106 {MISCSTAT_INFO(rx_drops_invalid_ring)},
101 {MISCSTAT_INFO(forwarded_packets)}, 107 {MISCSTAT_INFO(forwarded_packets)},
102 {MISCSTAT_INFO(rx_drops_mtu)} 108 {MISCSTAT_INFO(rx_drops_mtu)},
109 {MISCSTAT_INFO(port0_jabber_events)},
110 {MISCSTAT_INFO(port1_jabber_events)},
111 {PMEMSTAT_INFO(eth_red_drops)},
112 {DRVSTAT_INFO(be_on_die_temperature)}
103}; 113};
104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 114#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
105 115
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
121 "MAC Loopback test", 131 "MAC Loopback test",
122 "PHY Loopback test", 132 "PHY Loopback test",
123 "External Loopback test", 133 "External Loopback test",
124 "DDR DMA test" 134 "DDR DMA test",
125 "Link test" 135 "Link test"
126}; 136};
127 137
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
276 case MISCSTAT: 286 case MISCSTAT:
277 p = &hw_stats->rxf; 287 p = &hw_stats->rxf;
278 break; 288 break;
289 case PMEMSTAT:
290 p = &hw_stats->pmem;
291 break;
292 case DRVSTAT:
293 p = &adapter->drv_stats;
294 break;
279 } 295 }
280 296
281 p = (u8 *)p + et_stats[i].offset; 297 p = (u8 *)p + et_stats[i].offset;
@@ -497,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
497 int status; 513 int status;
498 u32 cur; 514 u32 cur;
499 515
500 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur); 516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
501 517
502 if (cur == BEACON_STATE_ENABLED) 518 if (cur == BEACON_STATE_ENABLED)
503 return 0; 519 return 0;
@@ -505,12 +521,12 @@ be_phys_id(struct net_device *netdev, u32 data)
505 if (data < 2) 521 if (data < 2)
506 data = 2; 522 data = 2;
507 523
508 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
509 BEACON_STATE_ENABLED); 525 BEACON_STATE_ENABLED);
510 set_current_state(TASK_INTERRUPTIBLE); 526 set_current_state(TASK_INTERRUPTIBLE);
511 schedule_timeout(data*HZ); 527 schedule_timeout(data*HZ);
512 528
513 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
514 BEACON_STATE_DISABLED); 530 BEACON_STATE_DISABLED);
515 531
516 return status; 532 return status;
@@ -589,12 +605,12 @@ err:
589static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 605static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
590 u64 *status) 606 u64 *status)
591{ 607{
592 be_cmd_set_loopback(adapter, adapter->port_num, 608 be_cmd_set_loopback(adapter, adapter->hba_port_num,
593 loopback_type, 1); 609 loopback_type, 1);
594 *status = be_cmd_loopback_test(adapter, adapter->port_num, 610 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
595 loopback_type, 1500, 611 loopback_type, 1500,
596 2, 0xabc); 612 2, 0xabc);
597 be_cmd_set_loopback(adapter, adapter->port_num, 613 be_cmd_set_loopback(adapter, adapter->hba_port_num,
598 BE_NO_LOOPBACK, 1); 614 BE_NO_LOOPBACK, 1);
599 return *status; 615 return *status;
600} 616}
@@ -633,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
633 &qos_link_speed) != 0) { 649 &qos_link_speed) != 0) {
634 test->flags |= ETH_TEST_FL_FAILED; 650 test->flags |= ETH_TEST_FL_FAILED;
635 data[4] = -1; 651 data[4] = -1;
636 } else if (mac_speed) { 652 } else if (!mac_speed) {
653 test->flags |= ETH_TEST_FL_FAILED;
637 data[4] = 1; 654 data[4] = 1;
638 } 655 }
639} 656}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d9778234..3f459f76cd1d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -327,6 +327,53 @@ struct be_eth_rx_compl {
327 u32 dw[4]; 327 u32 dw[4];
328}; 328};
329 329
330struct mgmt_hba_attribs {
331 u8 flashrom_version_string[32];
332 u8 manufacturer_name[32];
333 u32 supported_modes;
334 u32 rsvd0[3];
335 u8 ncsi_ver_string[12];
336 u32 default_extended_timeout;
337 u8 controller_model_number[32];
338 u8 controller_description[64];
339 u8 controller_serial_number[32];
340 u8 ip_version_string[32];
341 u8 firmware_version_string[32];
342 u8 bios_version_string[32];
343 u8 redboot_version_string[32];
344 u8 driver_version_string[32];
345 u8 fw_on_flash_version_string[32];
346 u32 functionalities_supported;
347 u16 max_cdblength;
348 u8 asic_revision;
349 u8 generational_guid[16];
350 u8 hba_port_count;
351 u16 default_link_down_timeout;
352 u8 iscsi_ver_min_max;
353 u8 multifunction_device;
354 u8 cache_valid;
355 u8 hba_status;
356 u8 max_domains_supported;
357 u8 phy_port;
358 u32 firmware_post_status;
359 u32 hba_mtu[8];
360 u32 rsvd1[4];
361};
362
363struct mgmt_controller_attrib {
364 struct mgmt_hba_attribs hba_attribs;
365 u16 pci_vendor_id;
366 u16 pci_device_id;
367 u16 pci_sub_vendor_id;
368 u16 pci_sub_system_id;
369 u8 pci_bus_number;
370 u8 pci_device_number;
371 u8 pci_function_number;
372 u8 interface_type;
373 u64 unique_identifier;
374 u32 rsvd0[5];
375};
376
330struct controller_id { 377struct controller_id {
331 u32 vendor; 378 u32 vendor;
332 u32 device; 379 u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index aad7ea37d589..0bdccb10aac5 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1047,6 +1047,9 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1047 if ((adapter->function_mode & 0x400) && !vtm) 1047 if ((adapter->function_mode & 0x400) && !vtm)
1048 vlanf = 0; 1048 vlanf = 0;
1049 1049
1050 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1051 vlanf = 0;
1052
1050 if (unlikely(vlanf)) { 1053 if (unlikely(vlanf)) {
1051 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1054 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1052 kfree_skb(skb); 1055 kfree_skb(skb);
@@ -1087,6 +1090,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1087 if ((adapter->function_mode & 0x400) && !vtm) 1090 if ((adapter->function_mode & 0x400) && !vtm)
1088 vlanf = 0; 1091 vlanf = 0;
1089 1092
1093 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1094 vlanf = 0;
1095
1090 skb = napi_get_frags(&eq_obj->napi); 1096 skb = napi_get_frags(&eq_obj->napi);
1091 if (!skb) { 1097 if (!skb) {
1092 be_rx_compl_discard(adapter, rxo, rxcp); 1098 be_rx_compl_discard(adapter, rxo, rxcp);
@@ -1873,7 +1879,7 @@ static void be_worker(struct work_struct *work)
1873 goto reschedule; 1879 goto reschedule;
1874 } 1880 }
1875 1881
1876 if (!adapter->stats_ioctl_sent) 1882 if (!adapter->stats_cmd_sent)
1877 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1883 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1878 1884
1879 be_tx_rate_update(adapter); 1885 be_tx_rate_update(adapter);
@@ -2862,6 +2868,10 @@ static int be_get_config(struct be_adapter *adapter)
2862 else 2868 else
2863 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 2869 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2864 2870
2871 status = be_cmd_get_cntl_attributes(adapter);
2872 if (status)
2873 return status;
2874
2865 return 0; 2875 return 0;
2866} 2876}
2867 2877
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 74798bee672e..634c0daeecec 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -983,8 +983,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
983 for (i = 0; i < PHY_MAX_ADDR; i++) 983 for (i = 0; i < PHY_MAX_ADDR; i++)
984 fep->mii_bus->irq[i] = PHY_POLL; 984 fep->mii_bus->irq[i] = PHY_POLL;
985 985
986 platform_set_drvdata(ndev, fep->mii_bus);
987
988 if (mdiobus_register(fep->mii_bus)) 986 if (mdiobus_register(fep->mii_bus))
989 goto err_out_free_mdio_irq; 987 goto err_out_free_mdio_irq;
990 988
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d4e04256730b..35b7bc52a2d1 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -21,6 +21,7 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
24#include "net_driver.h" 25#include "net_driver.h"
25#include "efx.h" 26#include "efx.h"
26#include "nic.h" 27#include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
307 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
308 } 309 }
309 310
311 efx_filter_rfs_expire(channel);
312
310 /* There is no race here; although napi_disable() will 313 /* There is no race here; although napi_disable() will
311 * only wait for napi_complete(), this isn't a problem 314 * only wait for napi_complete(), this isn't a problem
312 * since efx_channel_processed() will have no effect if 315 * since efx_channel_processed() will have no effect if
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
1175 return count; 1178 return count;
1176} 1179}
1177 1180
1181static int
1182efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1183{
1184#ifdef CONFIG_RFS_ACCEL
1185 int i, rc;
1186
1187 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1188 if (!efx->net_dev->rx_cpu_rmap)
1189 return -ENOMEM;
1190 for (i = 0; i < efx->n_rx_channels; i++) {
1191 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1192 xentries[i].vector);
1193 if (rc) {
1194 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1195 efx->net_dev->rx_cpu_rmap = NULL;
1196 return rc;
1197 }
1198 }
1199#endif
1200 return 0;
1201}
1202
1178/* Probe the number and type of interrupts we are able to obtain, and 1203/* Probe the number and type of interrupts we are able to obtain, and
1179 * the resulting numbers of channels and RX queues. 1204 * the resulting numbers of channels and RX queues.
1180 */ 1205 */
1181static void efx_probe_interrupts(struct efx_nic *efx) 1206static int efx_probe_interrupts(struct efx_nic *efx)
1182{ 1207{
1183 int max_channels = 1208 int max_channels =
1184 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1209 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1220 efx->n_tx_channels = efx->n_channels; 1245 efx->n_tx_channels = efx->n_channels;
1221 efx->n_rx_channels = efx->n_channels; 1246 efx->n_rx_channels = efx->n_channels;
1222 } 1247 }
1248 rc = efx_init_rx_cpu_rmap(efx, xentries);
1249 if (rc) {
1250 pci_disable_msix(efx->pci_dev);
1251 return rc;
1252 }
1223 for (i = 0; i < n_channels; i++) 1253 for (i = 0; i < n_channels; i++)
1224 efx_get_channel(efx, i)->irq = 1254 efx_get_channel(efx, i)->irq =
1225 xentries[i].vector; 1255 xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1253 efx->n_tx_channels = 1; 1283 efx->n_tx_channels = 1;
1254 efx->legacy_irq = efx->pci_dev->irq; 1284 efx->legacy_irq = efx->pci_dev->irq;
1255 } 1285 }
1286
1287 return 0;
1256} 1288}
1257 1289
1258static void efx_remove_interrupts(struct efx_nic *efx) 1290static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1289,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1289 1321
1290 /* Determine the number of channels and queues by trying to hook 1322 /* Determine the number of channels and queues by trying to hook
1291 * in MSI-X interrupts. */ 1323 * in MSI-X interrupts. */
1292 efx_probe_interrupts(efx); 1324 rc = efx_probe_interrupts(efx);
1325 if (rc)
1326 goto fail;
1293 1327
1294 if (efx->n_channels > 1) 1328 if (efx->n_channels > 1)
1295 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1329 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1304,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
1304 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1338 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1305 1339
1306 return 0; 1340 return 0;
1341
1342fail:
1343 efx->type->remove(efx);
1344 return rc;
1307} 1345}
1308 1346
1309static void efx_remove_nic(struct efx_nic *efx) 1347static void efx_remove_nic(struct efx_nic *efx)
@@ -1837,6 +1875,9 @@ static const struct net_device_ops efx_netdev_ops = {
1837 .ndo_poll_controller = efx_netpoll, 1875 .ndo_poll_controller = efx_netpoll,
1838#endif 1876#endif
1839 .ndo_setup_tc = efx_setup_tc, 1877 .ndo_setup_tc = efx_setup_tc,
1878#ifdef CONFIG_RFS_ACCEL
1879 .ndo_rx_flow_steer = efx_filter_rfs,
1880#endif
1840}; 1881};
1841 1882
1842static void efx_update_name(struct efx_nic *efx) 1883static void efx_update_name(struct efx_nic *efx)
@@ -2274,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2274 */ 2315 */
2275static void efx_pci_remove_main(struct efx_nic *efx) 2316static void efx_pci_remove_main(struct efx_nic *efx)
2276{ 2317{
2318#ifdef CONFIG_RFS_ACCEL
2319 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2320 efx->net_dev->rx_cpu_rmap = NULL;
2321#endif
2277 efx_nic_fini_interrupt(efx); 2322 efx_nic_fini_interrupt(efx);
2278 efx_fini_channels(efx); 2323 efx_fini_channels(efx);
2279 efx_fini_port(efx); 2324 efx_fini_port(efx);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 0cb198a64a63..cbce62b9c996 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -76,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
76 struct efx_filter_spec *spec); 76 struct efx_filter_spec *spec);
77extern void efx_filter_clear_rx(struct efx_nic *efx, 77extern void efx_filter_clear_rx(struct efx_nic *efx,
78 enum efx_filter_priority priority); 78 enum efx_filter_priority priority);
79#ifdef CONFIG_RFS_ACCEL
80extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
81 u16 rxq_index, u32 flow_id);
82extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
83static inline void efx_filter_rfs_expire(struct efx_channel *channel)
84{
85 if (channel->rfs_filters_added >= 60 &&
86 __efx_filter_rfs_expire(channel->efx, 100))
87 channel->rfs_filters_added -= 60;
88}
89#define efx_filter_rfs_enabled() 1
90#else
91static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
92#define efx_filter_rfs_enabled() 0
93#endif
79 94
80/* Channels */ 95/* Channels */
81extern void efx_process_channel_now(struct efx_channel *channel); 96extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c41c4ce..95a980fd63d5 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/in.h> 10#include <linux/in.h>
11#include <net/ip.h>
11#include "efx.h" 12#include "efx.h"
12#include "filter.h" 13#include "filter.h"
13#include "io.h" 14#include "io.h"
@@ -27,6 +28,10 @@
27 */ 28 */
28#define FILTER_CTL_SRCH_MAX 200 29#define FILTER_CTL_SRCH_MAX 200
29 30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
30enum efx_filter_table_id { 35enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
47struct efx_filter_state { 52struct efx_filter_state {
48 spinlock_t lock; 53 spinlock_t lock;
49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 54 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
55#ifdef CONFIG_RFS_ACCEL
56 u32 *rps_flow_id;
57 unsigned rps_expire_index;
58#endif
50}; 59};
51 60
52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 61/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
325 struct efx_filter_spec *spec, u32 key, 334 struct efx_filter_spec *spec, u32 key,
326 bool for_insert, int *depth_required) 335 bool for_insert, int *depth_required)
327{ 336{
328 unsigned hash, incr, filter_idx, depth; 337 unsigned hash, incr, filter_idx, depth, depth_max;
329 struct efx_filter_spec *cmp; 338 struct efx_filter_spec *cmp;
330 339
331 hash = efx_filter_hash(key); 340 hash = efx_filter_hash(key);
332 incr = efx_filter_increment(key); 341 incr = efx_filter_increment(key);
342 depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
343 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
333 344
334 for (depth = 1, filter_idx = hash & (table->size - 1); 345 for (depth = 1, filter_idx = hash & (table->size - 1);
335 depth <= FILTER_CTL_SRCH_MAX && 346 depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
336 test_bit(filter_idx, table->used_bitmap);
337 ++depth) { 347 ++depth) {
338 cmp = &table->spec[filter_idx]; 348 cmp = &table->spec[filter_idx];
339 if (efx_filter_equal(spec, cmp)) 349 if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
342 } 352 }
343 if (!for_insert) 353 if (!for_insert)
344 return -ENOENT; 354 return -ENOENT;
345 if (depth > FILTER_CTL_SRCH_MAX) 355 if (depth > depth_max)
346 return -EBUSY; 356 return -EBUSY;
347found: 357found:
348 *depth_required = depth; 358 *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
562 spin_lock_init(&state->lock); 572 spin_lock_init(&state->lock);
563 573
564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 574 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
575#ifdef CONFIG_RFS_ACCEL
576 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
577 sizeof(*state->rps_flow_id),
578 GFP_KERNEL);
579 if (!state->rps_flow_id)
580 goto fail;
581#endif
565 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 582 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP; 583 table->id = EFX_FILTER_TABLE_RX_IP;
567 table->offset = FR_BZ_RX_FILTER_TBL0; 584 table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
607 kfree(state->table[table_id].used_bitmap); 624 kfree(state->table[table_id].used_bitmap);
608 vfree(state->table[table_id].spec); 625 vfree(state->table[table_id].spec);
609 } 626 }
627#ifdef CONFIG_RFS_ACCEL
628 kfree(state->rps_flow_id);
629#endif
610 kfree(state); 630 kfree(state);
611} 631}
632
633#ifdef CONFIG_RFS_ACCEL
634
635int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
636 u16 rxq_index, u32 flow_id)
637{
638 struct efx_nic *efx = netdev_priv(net_dev);
639 struct efx_channel *channel;
640 struct efx_filter_state *state = efx->filter_state;
641 struct efx_filter_spec spec;
642 const struct iphdr *ip;
643 const __be16 *ports;
644 int nhoff;
645 int rc;
646
647 nhoff = skb_network_offset(skb);
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return -EPROTONOSUPPORT;
651
652 /* RFS must validate the IP header length before calling us */
653 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
654 ip = (const struct iphdr *)(skb->data + nhoff);
655 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
656 return -EPROTONOSUPPORT;
657 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
658 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
659
660 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
661 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
662 ip->daddr, ports[1], ip->saddr, ports[0]);
663 if (rc)
664 return rc;
665
666 rc = efx_filter_insert_filter(efx, &spec, true);
667 if (rc < 0)
668 return rc;
669
670 /* Remember this so we can check whether to expire the filter later */
671 state->rps_flow_id[rc] = flow_id;
672 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
673 ++channel->rfs_filters_added;
674
675 netif_info(efx, rx_status, efx->net_dev,
676 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
677 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
678 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
679 rxq_index, flow_id, rc);
680
681 return rc;
682}
683
684bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
685{
686 struct efx_filter_state *state = efx->filter_state;
687 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
688 unsigned mask = table->size - 1;
689 unsigned index;
690 unsigned stop;
691
692 if (!spin_trylock_bh(&state->lock))
693 return false;
694
695 index = state->rps_expire_index;
696 stop = (index + quota) & mask;
697
698 while (index != stop) {
699 if (test_bit(index, table->used_bitmap) &&
700 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
701 rps_may_expire_flow(efx->net_dev,
702 table->spec[index].dmaq_id,
703 state->rps_flow_id[index], index)) {
704 netif_info(efx, rx_status, efx->net_dev,
705 "expiring filter %d [flow %u]\n",
706 index, state->rps_flow_id[index]);
707 efx_filter_table_clear_entry(efx, table, index);
708 }
709 index = (index + 1) & mask;
710 }
711
712 state->rps_expire_index = stop;
713 if (table->used == 0)
714 efx_filter_table_reset_search_depth(table);
715
716 spin_unlock_bh(&state->lock);
717 return true;
718}
719
720#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 96e22ad34970..15b9068e5b87 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -362,6 +362,9 @@ struct efx_channel {
362 362
363 unsigned int irq_count; 363 unsigned int irq_count;
364 unsigned int irq_mod_score; 364 unsigned int irq_mod_score;
365#ifdef CONFIG_RFS_ACCEL
366 unsigned int rfs_filters_added;
367#endif
365 368
366 int rx_alloc_level; 369 int rx_alloc_level;
367 int rx_alloc_push_pages; 370 int rx_alloc_push_pages;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2ab..095e52580884 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,10 +32,17 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ethtool.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36 37
37#include "sh_eth.h" 38#include "sh_eth.h"
38 39
40#define SH_ETH_DEF_MSG_ENABLE \
41 (NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_RX_ERR| \
44 NETIF_MSG_TX_ERR)
45
39/* There is CPU dependent code */ 46/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724) 47#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT 1 48#define SH_ETH_RESET_DEFAULT 1
@@ -817,6 +824,20 @@ static int sh_eth_rx(struct net_device *ndev)
817 return 0; 824 return 0;
818} 825}
819 826
827static void sh_eth_rcv_snd_disable(u32 ioaddr)
828{
829 /* disable tx and rx */
830 writel(readl(ioaddr + ECMR) &
831 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
832}
833
834static void sh_eth_rcv_snd_enable(u32 ioaddr)
835{
836 /* enable tx and rx */
837 writel(readl(ioaddr + ECMR) |
838 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
839}
840
820/* error control function */ 841/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status) 842static void sh_eth_error(struct net_device *ndev, int intr_status)
822{ 843{
@@ -843,11 +864,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
843 if (mdp->ether_link_active_low) 864 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 865 link_stat = ~link_stat;
845 } 866 }
846 if (!(link_stat & PHY_ST_LINK)) { 867 if (!(link_stat & PHY_ST_LINK))
847 /* Link Down : disable tx and rx */ 868 sh_eth_rcv_snd_disable(ioaddr);
848 writel(readl(ioaddr + ECMR) & 869 else {
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else {
851 /* Link Up */ 870 /* Link Up */
852 writel(readl(ioaddr + EESIPR) & 871 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 872 ~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
857 writel(readl(ioaddr + EESIPR) | 876 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 877 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 878 /* enable tx and rx */
860 writel(readl(ioaddr + ECMR) | 879 sh_eth_rcv_snd_enable(ioaddr);
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 880 }
863 } 881 }
864 } 882 }
@@ -867,6 +885,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
867 /* Write buck end. unused write back interrupt */ 885 /* Write buck end. unused write back interrupt */
868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 886 if (intr_status & EESR_TABT) /* Transmit Abort int */
869 mdp->stats.tx_aborted_errors++; 887 mdp->stats.tx_aborted_errors++;
888 if (netif_msg_tx_err(mdp))
889 dev_err(&ndev->dev, "Transmit Abort\n");
870 } 890 }
871 891
872 if (intr_status & EESR_RABT) { 892 if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
874 if (intr_status & EESR_RFRMER) { 894 if (intr_status & EESR_RFRMER) {
875 /* Receive Frame Overflow int */ 895 /* Receive Frame Overflow int */
876 mdp->stats.rx_frame_errors++; 896 mdp->stats.rx_frame_errors++;
877 dev_err(&ndev->dev, "Receive Frame Overflow\n"); 897 if (netif_msg_rx_err(mdp))
898 dev_err(&ndev->dev, "Receive Abort\n");
878 } 899 }
879 } 900 }
880 901
881 if (!mdp->cd->no_ade) { 902 if (intr_status & EESR_TDE) {
882 if (intr_status & EESR_ADE && intr_status & EESR_TDE && 903 /* Transmit Descriptor Empty int */
883 intr_status & EESR_TFE) 904 mdp->stats.tx_fifo_errors++;
884 mdp->stats.tx_fifo_errors++; 905 if (netif_msg_tx_err(mdp))
906 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
907 }
908
909 if (intr_status & EESR_TFE) {
910 /* FIFO under flow */
911 mdp->stats.tx_fifo_errors++;
912 if (netif_msg_tx_err(mdp))
913 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
885 } 914 }
886 915
887 if (intr_status & EESR_RDE) { 916 if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
890 919
891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 920 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 writel(EDRRR_R, ioaddr + EDRRR); 921 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 922 if (netif_msg_rx_err(mdp))
923 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 924 }
925
895 if (intr_status & EESR_RFE) { 926 if (intr_status & EESR_RFE) {
896 /* Receive FIFO Overflow int */ 927 /* Receive FIFO Overflow int */
897 mdp->stats.rx_fifo_errors++; 928 mdp->stats.rx_fifo_errors++;
898 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 929 if (netif_msg_rx_err(mdp))
930 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
931 }
932
933 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
934 /* Address Error */
935 mdp->stats.tx_fifo_errors++;
936 if (netif_msg_tx_err(mdp))
937 dev_err(&ndev->dev, "Address Error\n");
899 } 938 }
900 939
901 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 940 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1012 mdp->duplex = -1; 1051 mdp->duplex = -1;
1013 } 1052 }
1014 1053
1015 if (new_state) 1054 if (new_state && netif_msg_link(mdp))
1016 phy_print_status(phydev); 1055 phy_print_status(phydev);
1017} 1056}
1018 1057
@@ -1063,6 +1102,132 @@ static int sh_eth_phy_start(struct net_device *ndev)
1063 return 0; 1102 return 0;
1064} 1103}
1065 1104
1105static int sh_eth_get_settings(struct net_device *ndev,
1106 struct ethtool_cmd *ecmd)
1107{
1108 struct sh_eth_private *mdp = netdev_priv(ndev);
1109 unsigned long flags;
1110 int ret;
1111
1112 spin_lock_irqsave(&mdp->lock, flags);
1113 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1114 spin_unlock_irqrestore(&mdp->lock, flags);
1115
1116 return ret;
1117}
1118
1119static int sh_eth_set_settings(struct net_device *ndev,
1120 struct ethtool_cmd *ecmd)
1121{
1122 struct sh_eth_private *mdp = netdev_priv(ndev);
1123 unsigned long flags;
1124 int ret;
1125 u32 ioaddr = ndev->base_addr;
1126
1127 spin_lock_irqsave(&mdp->lock, flags);
1128
1129 /* disable tx and rx */
1130 sh_eth_rcv_snd_disable(ioaddr);
1131
1132 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1133 if (ret)
1134 goto error_exit;
1135
1136 if (ecmd->duplex == DUPLEX_FULL)
1137 mdp->duplex = 1;
1138 else
1139 mdp->duplex = 0;
1140
1141 if (mdp->cd->set_duplex)
1142 mdp->cd->set_duplex(ndev);
1143
1144error_exit:
1145 mdelay(1);
1146
1147 /* enable tx and rx */
1148 sh_eth_rcv_snd_enable(ioaddr);
1149
1150 spin_unlock_irqrestore(&mdp->lock, flags);
1151
1152 return ret;
1153}
1154
1155static int sh_eth_nway_reset(struct net_device *ndev)
1156{
1157 struct sh_eth_private *mdp = netdev_priv(ndev);
1158 unsigned long flags;
1159 int ret;
1160
1161 spin_lock_irqsave(&mdp->lock, flags);
1162 ret = phy_start_aneg(mdp->phydev);
1163 spin_unlock_irqrestore(&mdp->lock, flags);
1164
1165 return ret;
1166}
1167
1168static u32 sh_eth_get_msglevel(struct net_device *ndev)
1169{
1170 struct sh_eth_private *mdp = netdev_priv(ndev);
1171 return mdp->msg_enable;
1172}
1173
1174static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1175{
1176 struct sh_eth_private *mdp = netdev_priv(ndev);
1177 mdp->msg_enable = value;
1178}
1179
1180static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1181 "rx_current", "tx_current",
1182 "rx_dirty", "tx_dirty",
1183};
1184#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1185
1186static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1187{
1188 switch (sset) {
1189 case ETH_SS_STATS:
1190 return SH_ETH_STATS_LEN;
1191 default:
1192 return -EOPNOTSUPP;
1193 }
1194}
1195
1196static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1197 struct ethtool_stats *stats, u64 *data)
1198{
1199 struct sh_eth_private *mdp = netdev_priv(ndev);
1200 int i = 0;
1201
1202 /* device-specific stats */
1203 data[i++] = mdp->cur_rx;
1204 data[i++] = mdp->cur_tx;
1205 data[i++] = mdp->dirty_rx;
1206 data[i++] = mdp->dirty_tx;
1207}
1208
1209static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1210{
1211 switch (stringset) {
1212 case ETH_SS_STATS:
1213 memcpy(data, *sh_eth_gstrings_stats,
1214 sizeof(sh_eth_gstrings_stats));
1215 break;
1216 }
1217}
1218
1219static struct ethtool_ops sh_eth_ethtool_ops = {
1220 .get_settings = sh_eth_get_settings,
1221 .set_settings = sh_eth_set_settings,
1222 .nway_reset = sh_eth_nway_reset,
1223 .get_msglevel = sh_eth_get_msglevel,
1224 .set_msglevel = sh_eth_set_msglevel,
1225 .get_link = ethtool_op_get_link,
1226 .get_strings = sh_eth_get_strings,
1227 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1228 .get_sset_count = sh_eth_get_sset_count,
1229};
1230
1066/* network device open function */ 1231/* network device open function */
1067static int sh_eth_open(struct net_device *ndev) 1232static int sh_eth_open(struct net_device *ndev)
1068{ 1233{
@@ -1073,8 +1238,8 @@ static int sh_eth_open(struct net_device *ndev)
1073 1238
1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1239 ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1240#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1241 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757) 1242 defined(CONFIG_CPU_SUBTYPE_SH7757)
1078 IRQF_SHARED, 1243 IRQF_SHARED,
1079#else 1244#else
1080 0, 1245 0,
@@ -1123,8 +1288,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1123 1288
1124 netif_stop_queue(ndev); 1289 netif_stop_queue(ndev);
1125 1290
1126 /* worning message out. */ 1291 if (netif_msg_timer(mdp))
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1292 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1293 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1294
1130 /* tx_errors count up */ 1295 /* tx_errors count up */
@@ -1167,6 +1332,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167 spin_lock_irqsave(&mdp->lock, flags); 1332 spin_lock_irqsave(&mdp->lock, flags);
1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1333 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169 if (!sh_eth_txfree(ndev)) { 1334 if (!sh_eth_txfree(ndev)) {
1335 if (netif_msg_tx_queued(mdp))
1336 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1170 netif_stop_queue(ndev); 1337 netif_stop_queue(ndev);
1171 spin_unlock_irqrestore(&mdp->lock, flags); 1338 spin_unlock_irqrestore(&mdp->lock, flags);
1172 return NETDEV_TX_BUSY; 1339 return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1497 1664
1498 /* set function */ 1665 /* set function */
1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1666 ndev->netdev_ops = &sh_eth_netdev_ops;
1667 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1500 ndev->watchdog_timeo = TX_TIMEOUT; 1668 ndev->watchdog_timeo = TX_TIMEOUT;
1501 1669
1670 /* debug message level */
1671 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1502 mdp->post_rx = POST_RX >> (devno << 1); 1672 mdp->post_rx = POST_RX >> (devno << 1);
1503 mdp->post_fw = POST_FW >> (devno << 1); 1673 mdp->post_fw = POST_FW >> (devno << 1);
1504 1674