aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-25 14:31:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-25 14:31:35 -0400
commit8937b7349ca9e25a02b2a72ccb7fba404ddedc5b (patch)
tree434ffe23d318f8af55850a26c3431ce8b82ab6f4 /drivers/net
parent997302259f386bca8fe1db67c50296ca426c438f (diff)
parentffe7f95bb1a4d1e9ca5d252445dc38476e1a208e (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6 into devel
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/benet/Kconfig7
-rw-r--r--drivers/net/benet/Makefile7
-rw-r--r--drivers/net/benet/be.h328
-rw-r--r--drivers/net/benet/be_cmds.c861
-rw-r--r--drivers/net/benet/be_cmds.h688
-rw-r--r--drivers/net/benet/be_ethtool.c362
-rw-r--r--drivers/net/benet/be_hw.h211
-rw-r--r--drivers/net/benet/be_main.c1911
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/bnx2x.h2
-rw-r--r--drivers/net/bnx2x_init.h4
-rw-r--r--drivers/net/bnx2x_main.c21
-rw-r--r--drivers/net/bonding/bond_main.c25
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/dnet.c994
-rw-r--r--drivers/net/dnet.h225
-rw-r--r--drivers/net/ibm_newemac/core.c3
-rw-r--r--drivers/net/igb/igb_main.c26
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/netxen/netxen_nic.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c22
-rw-r--r--drivers/net/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/qlge/qlge.h1
-rw-r--r--drivers/net/qlge/qlge_main.c57
-rw-r--r--drivers/net/r8169.c122
-rw-r--r--drivers/net/sh_eth.c20
-rw-r--r--drivers/net/sh_eth.h4
-rw-r--r--drivers/net/smsc911x.c4
-rw-r--r--drivers/net/sungem.c9
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/tulip/tulip_core.c45
-rw-r--r--drivers/net/ucc_geth.c34
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/via-velocity.c15
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath9k/core.h33
-rw-r--r--drivers/net/wireless/ath9k/hw.c22
-rw-r--r--drivers/net/wireless/ath9k/main.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
43 files changed, 5843 insertions, 289 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a2f185fd7072..62d732a886f1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1040,6 +1040,17 @@ config NI65
1040 To compile this driver as a module, choose M here. The module 1040 To compile this driver as a module, choose M here. The module
1041 will be called ni65. 1041 will be called ni65.
1042 1042
1043config DNET
1044 tristate "Dave ethernet support (DNET)"
1045 depends on NET_ETHERNET && HAS_IOMEM
1046 select PHYLIB
1047 help
1048 The Dave ethernet interface (DNET) is found on Qong Board FPGA.
1049 Say Y to include support for the DNET chip.
1050
1051 To compile this driver as a module, choose M here: the module
1052 will be called dnet.
1053
1043source "drivers/net/tulip/Kconfig" 1054source "drivers/net/tulip/Kconfig"
1044 1055
1045config AT1700 1056config AT1700
@@ -2619,6 +2630,8 @@ config QLGE
2619 2630
2620source "drivers/net/sfc/Kconfig" 2631source "drivers/net/sfc/Kconfig"
2621 2632
2633source "drivers/net/benet/Kconfig"
2634
2622endif # NETDEV_10000 2635endif # NETDEV_10000
2623 2636
2624source "drivers/net/tokenring/Kconfig" 2637source "drivers/net/tokenring/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index aca8492db654..471baaff229f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_GIANFAR) += gianfar_driver.o
22obj-$(CONFIG_TEHUTI) += tehuti.o 22obj-$(CONFIG_TEHUTI) += tehuti.o
23obj-$(CONFIG_ENIC) += enic/ 23obj-$(CONFIG_ENIC) += enic/
24obj-$(CONFIG_JME) += jme.o 24obj-$(CONFIG_JME) += jme.o
25obj-$(CONFIG_BE2NET) += benet/
25 26
26gianfar_driver-objs := gianfar.o \ 27gianfar_driver-objs := gianfar.o \
27 gianfar_ethtool.o \ 28 gianfar_ethtool.o \
@@ -231,6 +232,7 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o
231 232
232obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 233obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
233 234
235obj-$(CONFIG_DNET) += dnet.o
234obj-$(CONFIG_MACB) += macb.o 236obj-$(CONFIG_MACB) += macb.o
235 237
236obj-$(CONFIG_ARM) += arm/ 238obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
new file mode 100644
index 000000000000..c6934f179c09
--- /dev/null
+++ b/drivers/net/benet/Kconfig
@@ -0,0 +1,7 @@
1config BE2NET
2 tristate "ServerEngines' 10Gbps NIC - BladeEngine 2"
3 depends on PCI && INET
4 select INET_LRO
5 help
6 This driver implements the NIC functionality for ServerEngines'
7 10Gbps network adapter - BladeEngine 2.
diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
new file mode 100644
index 000000000000..a60cd8051135
--- /dev/null
+++ b/drivers/net/benet/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile to build the network driver for ServerEngine's BladeEngine.
3#
4
5obj-$(CONFIG_BE2NET) += be2net.o
6
7be2net-y := be_main.o be_cmds.o be_ethtool.o
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
new file mode 100644
index 000000000000..f327be57ca96
--- /dev/null
+++ b/drivers/net/benet/be.h
@@ -0,0 +1,328 @@
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#ifndef BE_H
19#define BE_H
20
21#include <linux/pci.h>
22#include <linux/etherdevice.h>
23#include <linux/version.h>
24#include <linux/delay.h>
25#include <net/tcp.h>
26#include <net/ip.h>
27#include <net/ipv6.h>
28#include <linux/if_vlan.h>
29#include <linux/workqueue.h>
30#include <linux/interrupt.h>
31#include <linux/inet_lro.h>
32
33#include "be_hw.h"
34
35#define DRV_VER "2.0.348"
36#define DRV_NAME "be2net"
37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
38#define DRV_DESC BE_NAME "Driver"
39
40/* Number of bytes of an RX frame that are copied to skb->data */
41#define BE_HDR_LEN 64
42#define BE_MAX_JUMBO_FRAME_SIZE 9018
43#define BE_MIN_MTU 256
44
45#define BE_NUM_VLANS_SUPPORTED 64
46#define BE_MAX_EQD 96
47#define BE_MAX_TX_FRAG_COUNT 30
48
49#define EVNT_Q_LEN 1024
50#define TX_Q_LEN 2048
51#define TX_CQ_LEN 1024
52#define RX_Q_LEN 1024 /* Does not support any other value */
53#define RX_CQ_LEN 1024
54#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
55#define MCC_CQ_LEN 256
56
57#define BE_NAPI_WEIGHT 64
58#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
59#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
60
61#define BE_MAX_LRO_DESCRIPTORS 16
62#define BE_MAX_FRAGS_PER_FRAME 16
63
64struct be_dma_mem {
65 void *va;
66 dma_addr_t dma;
67 u32 size;
68};
69
70struct be_queue_info {
71 struct be_dma_mem dma_mem;
72 u16 len;
73 u16 entry_size; /* Size of an element in the queue */
74 u16 id;
75 u16 tail, head;
76 bool created;
77 atomic_t used; /* Number of valid elements in the queue */
78};
79
80struct be_ctrl_info {
81 u8 __iomem *csr;
82 u8 __iomem *db; /* Door Bell */
83 u8 __iomem *pcicfg; /* PCI config space */
84 int pci_func;
85
86 /* Mbox used for cmd request/response */
87 spinlock_t cmd_lock; /* For serializing cmds to BE card */
88 struct be_dma_mem mbox_mem;
89 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
90 * is stored for freeing purpose */
91 struct be_dma_mem mbox_mem_alloced;
92};
93
94#include "be_cmds.h"
95
96struct be_drvr_stats {
97 u32 be_tx_reqs; /* number of TX requests initiated */
98 u32 be_tx_stops; /* number of times TX Q was stopped */
99 u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
100 u32 be_tx_wrbs; /* number of tx WRBs used */
101 u32 be_tx_events; /* number of tx completion events */
102 u32 be_tx_compl; /* number of tx completion entries processed */
103 u64 be_tx_jiffies;
104 ulong be_tx_bytes;
105 ulong be_tx_bytes_prev;
106 u32 be_tx_rate;
107
108 u32 cache_barrier[16];
109
110 u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
111 u32 be_polls; /* number of times NAPI called poll function */
112 u32 be_rx_events; /* number of ucast rx completion events */
113 u32 be_rx_compl; /* number of rx completion entries processed */
114 u32 be_lro_hgram_data[8]; /* histogram of LRO data packets */
115 u32 be_lro_hgram_ack[8]; /* histogram of LRO ACKs */
116 u64 be_rx_jiffies;
117 ulong be_rx_bytes;
118 ulong be_rx_bytes_prev;
119 u32 be_rx_rate;
120 /* number of non ether type II frames dropped where
121 * frame len > length field of Mac Hdr */
122 u32 be_802_3_dropped_frames;
123 /* number of non ether type II frames malformed where
124 * in frame len < length field of Mac Hdr */
125 u32 be_802_3_malformed_frames;
126 u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
127 ulong rx_fps_jiffies; /* jiffies at last FPS calc */
128 u32 be_rx_frags;
129 u32 be_prev_rx_frags;
130 u32 be_rx_fps; /* Rx frags per second */
131};
132
133struct be_stats_obj {
134 struct be_drvr_stats drvr_stats;
135 struct net_device_stats net_stats;
136 struct be_dma_mem cmd;
137};
138
139struct be_eq_obj {
140 struct be_queue_info q;
141 char desc[32];
142
143 /* Adaptive interrupt coalescing (AIC) info */
144 bool enable_aic;
145 u16 min_eqd; /* in usecs */
146 u16 max_eqd; /* in usecs */
147 u16 cur_eqd; /* in usecs */
148
149 struct napi_struct napi;
150};
151
152struct be_tx_obj {
153 struct be_queue_info q;
154 struct be_queue_info cq;
155 /* Remember the skbs that were transmitted */
156 struct sk_buff *sent_skb_list[TX_Q_LEN];
157};
158
159/* Struct to remember the pages posted for rx frags */
160struct be_rx_page_info {
161 struct page *page;
162 dma_addr_t bus;
163 u16 page_offset;
164 bool last_page_user;
165};
166
167struct be_rx_obj {
168 struct be_queue_info q;
169 struct be_queue_info cq;
170 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
171 struct net_lro_mgr lro_mgr;
172 struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
173};
174
175#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
176struct be_adapter {
177 struct pci_dev *pdev;
178 struct net_device *netdev;
179
180 /* Mbox, pci config, csr address information */
181 struct be_ctrl_info ctrl;
182
183 struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
184 bool msix_enabled;
185 bool isr_registered;
186
187 /* TX Rings */
188 struct be_eq_obj tx_eq;
189 struct be_tx_obj tx_obj;
190
191 u32 cache_line_break[8];
192
193 /* Rx rings */
194 struct be_eq_obj rx_eq;
195 struct be_rx_obj rx_obj;
196 u32 big_page_size; /* Compounded page size shared by rx wrbs */
197 bool rx_post_starved; /* Zero rx frags have been posted to BE */
198
199 struct vlan_group *vlan_grp;
200 u16 num_vlans;
201 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
202
203 struct be_stats_obj stats;
204 /* Work queue used to perform periodic tasks like getting statistics */
205 struct delayed_work work;
206
207 /* Ethtool knobs and info */
208 bool rx_csum; /* BE card must perform rx-checksumming */
209 u32 max_rx_coal;
210 char fw_ver[FW_VER_LEN];
211 u32 if_handle; /* Used to configure filtering */
212 u32 pmac_id; /* MAC addr handle used by BE card */
213
214 struct be_link_info link;
215 u32 port_num;
216};
217
218extern struct ethtool_ops be_ethtool_ops;
219
220#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
221
222#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
223
224static inline u32 MODULO(u16 val, u16 limit)
225{
226 BUG_ON(limit & (limit - 1));
227 return val & (limit - 1);
228}
229
230static inline void index_adv(u16 *index, u16 val, u16 limit)
231{
232 *index = MODULO((*index + val), limit);
233}
234
235static inline void index_inc(u16 *index, u16 limit)
236{
237 *index = MODULO((*index + 1), limit);
238}
239
240#define PAGE_SHIFT_4K 12
241#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
242
243/* Returns number of pages spanned by the data starting at the given addr */
244#define PAGES_4K_SPANNED(_address, size) \
245 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
246 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
247
248/* Byte offset into the page corresponding to given address */
249#define OFFSET_IN_PAGE(addr) \
250 ((size_t)(addr) & (PAGE_SIZE_4K-1))
251
252/* Returns bit offset within a DWORD of a bitfield */
253#define AMAP_BIT_OFFSET(_struct, field) \
254 (((size_t)&(((_struct *)0)->field))%32)
255
256/* Returns the bit mask of the field that is NOT shifted into location. */
257static inline u32 amap_mask(u32 bitsize)
258{
259 return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
260}
261
262static inline void
263amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
264{
265 u32 *dw = (u32 *) ptr + dw_offset;
266 *dw &= ~(mask << offset);
267 *dw |= (mask & value) << offset;
268}
269
270#define AMAP_SET_BITS(_struct, field, ptr, val) \
271 amap_set(ptr, \
272 offsetof(_struct, field)/32, \
273 amap_mask(sizeof(((_struct *)0)->field)), \
274 AMAP_BIT_OFFSET(_struct, field), \
275 val)
276
277static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
278{
279 u32 *dw = (u32 *) ptr;
280 return mask & (*(dw + dw_offset) >> offset);
281}
282
283#define AMAP_GET_BITS(_struct, field, ptr) \
284 amap_get(ptr, \
285 offsetof(_struct, field)/32, \
286 amap_mask(sizeof(((_struct *)0)->field)), \
287 AMAP_BIT_OFFSET(_struct, field))
288
289#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
290#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
291static inline void swap_dws(void *wrb, int len)
292{
293#ifdef __BIG_ENDIAN
294 u32 *dw = wrb;
295 BUG_ON(len % 4);
296 do {
297 *dw = cpu_to_le32(*dw);
298 dw++;
299 len -= 4;
300 } while (len);
301#endif /* __BIG_ENDIAN */
302}
303
304static inline u8 is_tcp_pkt(struct sk_buff *skb)
305{
306 u8 val = 0;
307
308 if (ip_hdr(skb)->version == 4)
309 val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
310 else if (ip_hdr(skb)->version == 6)
311 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
312
313 return val;
314}
315
316static inline u8 is_udp_pkt(struct sk_buff *skb)
317{
318 u8 val = 0;
319
320 if (ip_hdr(skb)->version == 4)
321 val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
322 else if (ip_hdr(skb)->version == 6)
323 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
324
325 return val;
326}
327
328#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
new file mode 100644
index 000000000000..d444aed962bc
--- /dev/null
+++ b/drivers/net/benet/be_cmds.c
@@ -0,0 +1,861 @@
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19
20static int be_mbox_db_ready_wait(void __iomem *db)
21{
22 int cnt = 0, wait = 5;
23 u32 ready;
24
25 do {
26 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
27 if (ready)
28 break;
29
30 if (cnt > 200000) {
31 printk(KERN_WARNING DRV_NAME
32 ": mbox_db poll timed out\n");
33 return -1;
34 }
35
36 if (cnt > 50)
37 wait = 200;
38 cnt += wait;
39 udelay(wait);
40 } while (true);
41
42 return 0;
43}
44
45/*
46 * Insert the mailbox address into the doorbell in two steps
47 */
48static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
49{
50 int status;
51 u16 compl_status, extd_status;
52 u32 val = 0;
53 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
54 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
55 struct be_mcc_mailbox *mbox = mbox_mem->va;
56 struct be_mcc_cq_entry *cqe = &mbox->cqe;
57
58 memset(cqe, 0, sizeof(*cqe));
59
60 val &= ~MPU_MAILBOX_DB_RDY_MASK;
61 val |= MPU_MAILBOX_DB_HI_MASK;
62 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
63 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
64 iowrite32(val, db);
65
66 /* wait for ready to be set */
67 status = be_mbox_db_ready_wait(db);
68 if (status != 0)
69 return status;
70
71 val = 0;
72 val &= ~MPU_MAILBOX_DB_RDY_MASK;
73 val &= ~MPU_MAILBOX_DB_HI_MASK;
74 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
75 val |= (u32)(mbox_mem->dma >> 4) << 2;
76 iowrite32(val, db);
77
78 status = be_mbox_db_ready_wait(db);
79 if (status != 0)
80 return status;
81
82 /* compl entry has been made now */
83 be_dws_le_to_cpu(cqe, sizeof(*cqe));
84 if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
85 printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
86 return -1;
87 }
88
89 compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
90 CQE_STATUS_COMPL_MASK;
91 if (compl_status != MCC_STATUS_SUCCESS) {
92 extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
93 CQE_STATUS_EXTD_MASK;
94 printk(KERN_WARNING DRV_NAME
95 ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
96 compl_status, extd_status);
97 }
98
99 return compl_status;
100}
101
102static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
103{
104 u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
105
106 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
107 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
108 return -1;
109 else
110 return 0;
111}
112
113static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage)
114{
115 u16 stage, cnt, error;
116 for (cnt = 0; cnt < 5000; cnt++) {
117 error = be_POST_stage_get(ctrl, &stage);
118 if (error)
119 return -1;
120
121 if (stage == poll_stage)
122 break;
123 udelay(1000);
124 }
125 if (stage != poll_stage)
126 return -1;
127 return 0;
128}
129
130
131int be_cmd_POST(struct be_ctrl_info *ctrl)
132{
133 u16 stage, error;
134
135 error = be_POST_stage_get(ctrl, &stage);
136 if (error)
137 goto err;
138
139 if (stage == POST_STAGE_ARMFW_RDY)
140 return 0;
141
142 if (stage != POST_STAGE_AWAITING_HOST_RDY)
143 goto err;
144
145 /* On awaiting host rdy, reset and again poll on awaiting host rdy */
146 iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
147 error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY);
148 if (error)
149 goto err;
150
151 /* Now kickoff POST and poll on armfw ready */
152 iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
153 error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY);
154 if (error)
155 goto err;
156
157 return 0;
158err:
159 printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
160 return -1;
161}
162
163static inline void *embedded_payload(struct be_mcc_wrb *wrb)
164{
165 return wrb->payload.embedded_payload;
166}
167
168static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
169{
170 return &wrb->payload.sgl[0];
171}
172
173/* Don't touch the hdr after it's prepared */
174static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
175 bool embedded, u8 sge_cnt)
176{
177 if (embedded)
178 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
179 else
180 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
181 MCC_WRB_SGE_CNT_SHIFT;
182 wrb->payload_length = payload_len;
183 be_dws_cpu_to_le(wrb, 20);
184}
185
186/* Don't touch the hdr after it's prepared */
187static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
188 u8 subsystem, u8 opcode, int cmd_len)
189{
190 req_hdr->opcode = opcode;
191 req_hdr->subsystem = subsystem;
192 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
193}
194
195static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
196 struct be_dma_mem *mem)
197{
198 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
199 u64 dma = (u64)mem->dma;
200
201 for (i = 0; i < buf_pages; i++) {
202 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
203 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
204 dma += PAGE_SIZE_4K;
205 }
206}
207
208/* Converts interrupt delay in microseconds to multiplier value */
209static u32 eq_delay_to_mult(u32 usec_delay)
210{
211#define MAX_INTR_RATE 651042
212 const u32 round = 10;
213 u32 multiplier;
214
215 if (usec_delay == 0)
216 multiplier = 0;
217 else {
218 u32 interrupt_rate = 1000000 / usec_delay;
219 /* Max delay, corresponding to the lowest interrupt rate */
220 if (interrupt_rate == 0)
221 multiplier = 1023;
222 else {
223 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
224 multiplier /= interrupt_rate;
225 /* Round the multiplier to the closest value.*/
226 multiplier = (multiplier + round/2) / round;
227 multiplier = min(multiplier, (u32)1023);
228 }
229 }
230 return multiplier;
231}
232
233static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
234{
235 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
236}
237
238int be_cmd_eq_create(struct be_ctrl_info *ctrl,
239 struct be_queue_info *eq, int eq_delay)
240{
241 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
242 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
243 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
244 struct be_dma_mem *q_mem = &eq->dma_mem;
245 int status;
246
247 spin_lock(&ctrl->cmd_lock);
248 memset(wrb, 0, sizeof(*wrb));
249
250 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
251
252 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
253 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
254
255 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
256
257 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
258 ctrl->pci_func);
259 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
260 /* 4byte eqe*/
261 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
262 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
263 __ilog2_u32(eq->len/256));
264 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
265 eq_delay_to_mult(eq_delay));
266 be_dws_cpu_to_le(req->context, sizeof(req->context));
267
268 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
269
270 status = be_mbox_db_ring(ctrl);
271 if (!status) {
272 eq->id = le16_to_cpu(resp->eq_id);
273 eq->created = true;
274 }
275 spin_unlock(&ctrl->cmd_lock);
276 return status;
277}
278
279int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
280 u8 type, bool permanent, u32 if_handle)
281{
282 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
283 struct be_cmd_req_mac_query *req = embedded_payload(wrb);
284 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
285 int status;
286
287 spin_lock(&ctrl->cmd_lock);
288 memset(wrb, 0, sizeof(*wrb));
289
290 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
291
292 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
293 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
294
295 req->type = type;
296 if (permanent) {
297 req->permanent = 1;
298 } else {
299 req->if_id = cpu_to_le16((u16)if_handle);
300 req->permanent = 0;
301 }
302
303 status = be_mbox_db_ring(ctrl);
304 if (!status)
305 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
306
307 spin_unlock(&ctrl->cmd_lock);
308 return status;
309}
310
311int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
312 u32 if_id, u32 *pmac_id)
313{
314 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
315 struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
316 int status;
317
318 spin_lock(&ctrl->cmd_lock);
319 memset(wrb, 0, sizeof(*wrb));
320
321 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
322
323 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
324 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
325
326 req->if_id = cpu_to_le32(if_id);
327 memcpy(req->mac_address, mac_addr, ETH_ALEN);
328
329 status = be_mbox_db_ring(ctrl);
330 if (!status) {
331 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
332 *pmac_id = le32_to_cpu(resp->pmac_id);
333 }
334
335 spin_unlock(&ctrl->cmd_lock);
336 return status;
337}
338
339int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
340{
341 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
342 struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
343 int status;
344
345 spin_lock(&ctrl->cmd_lock);
346 memset(wrb, 0, sizeof(*wrb));
347
348 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
349
350 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
351 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
352
353 req->if_id = cpu_to_le32(if_id);
354 req->pmac_id = cpu_to_le32(pmac_id);
355
356 status = be_mbox_db_ring(ctrl);
357 spin_unlock(&ctrl->cmd_lock);
358
359 return status;
360}
361
362int be_cmd_cq_create(struct be_ctrl_info *ctrl,
363 struct be_queue_info *cq, struct be_queue_info *eq,
364 bool sol_evts, bool no_delay, int coalesce_wm)
365{
366 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
367 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
368 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
369 struct be_dma_mem *q_mem = &cq->dma_mem;
370 void *ctxt = &req->context;
371 int status;
372
373 spin_lock(&ctrl->cmd_lock);
374 memset(wrb, 0, sizeof(*wrb));
375
376 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
377
378 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
379 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
380
381 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
382
383 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
384 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
385 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
386 __ilog2_u32(cq->len/256));
387 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
388 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
389 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
390 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
391 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
392 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
393 be_dws_cpu_to_le(ctxt, sizeof(req->context));
394
395 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
396
397 status = be_mbox_db_ring(ctrl);
398 if (!status) {
399 cq->id = le16_to_cpu(resp->cq_id);
400 cq->created = true;
401 }
402 spin_unlock(&ctrl->cmd_lock);
403
404 return status;
405}
406
407int be_cmd_txq_create(struct be_ctrl_info *ctrl,
408 struct be_queue_info *txq,
409 struct be_queue_info *cq)
410{
411 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
412 struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
413 struct be_dma_mem *q_mem = &txq->dma_mem;
414 void *ctxt = &req->context;
415 int status;
416 u32 len_encoded;
417
418 spin_lock(&ctrl->cmd_lock);
419 memset(wrb, 0, sizeof(*wrb));
420
421 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
422
423 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
424 sizeof(*req));
425
426 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
427 req->ulp_num = BE_ULP1_NUM;
428 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
429
430 len_encoded = fls(txq->len); /* log2(len) + 1 */
431 if (len_encoded == 16)
432 len_encoded = 0;
433 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
434 AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
435 ctrl->pci_func);
436 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
437 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
438
439 be_dws_cpu_to_le(ctxt, sizeof(req->context));
440
441 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
442
443 status = be_mbox_db_ring(ctrl);
444 if (!status) {
445 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
446 txq->id = le16_to_cpu(resp->cid);
447 txq->created = true;
448 }
449 spin_unlock(&ctrl->cmd_lock);
450
451 return status;
452}
453
454int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
455 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
456 u16 max_frame_size, u32 if_id, u32 rss)
457{
458 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
459 struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
460 struct be_dma_mem *q_mem = &rxq->dma_mem;
461 int status;
462
463 spin_lock(&ctrl->cmd_lock);
464 memset(wrb, 0, sizeof(*wrb));
465
466 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
467
468 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
469 sizeof(*req));
470
471 req->cq_id = cpu_to_le16(cq_id);
472 req->frag_size = fls(frag_size) - 1;
473 req->num_pages = 2;
474 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
475 req->interface_id = cpu_to_le32(if_id);
476 req->max_frame_size = cpu_to_le16(max_frame_size);
477 req->rss_queue = cpu_to_le32(rss);
478
479 status = be_mbox_db_ring(ctrl);
480 if (!status) {
481 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
482 rxq->id = le16_to_cpu(resp->id);
483 rxq->created = true;
484 }
485 spin_unlock(&ctrl->cmd_lock);
486
487 return status;
488}
489
490/* Generic destroyer function for all types of queues */
491int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
492 int queue_type)
493{
494 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
495 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
496 u8 subsys = 0, opcode = 0;
497 int status;
498
499 spin_lock(&ctrl->cmd_lock);
500
501 memset(wrb, 0, sizeof(*wrb));
502 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
503
504 switch (queue_type) {
505 case QTYPE_EQ:
506 subsys = CMD_SUBSYSTEM_COMMON;
507 opcode = OPCODE_COMMON_EQ_DESTROY;
508 break;
509 case QTYPE_CQ:
510 subsys = CMD_SUBSYSTEM_COMMON;
511 opcode = OPCODE_COMMON_CQ_DESTROY;
512 break;
513 case QTYPE_TXQ:
514 subsys = CMD_SUBSYSTEM_ETH;
515 opcode = OPCODE_ETH_TX_DESTROY;
516 break;
517 case QTYPE_RXQ:
518 subsys = CMD_SUBSYSTEM_ETH;
519 opcode = OPCODE_ETH_RX_DESTROY;
520 break;
521 default:
522 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
523 status = -1;
524 goto err;
525 }
526 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
527 req->id = cpu_to_le16(q->id);
528
529 status = be_mbox_db_ring(ctrl);
530err:
531 spin_unlock(&ctrl->cmd_lock);
532
533 return status;
534}
535
536/* Create an rx filtering policy configuration on an i/f */
537int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
538 bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
539{
540 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
541 struct be_cmd_req_if_create *req = embedded_payload(wrb);
542 int status;
543
544 spin_lock(&ctrl->cmd_lock);
545 memset(wrb, 0, sizeof(*wrb));
546
547 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
548
549 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
550 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
551
552 req->capability_flags = cpu_to_le32(flags);
553 req->enable_flags = cpu_to_le32(flags);
554 if (!pmac_invalid)
555 memcpy(req->mac_addr, mac, ETH_ALEN);
556
557 status = be_mbox_db_ring(ctrl);
558 if (!status) {
559 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
560 *if_handle = le32_to_cpu(resp->interface_id);
561 if (!pmac_invalid)
562 *pmac_id = le32_to_cpu(resp->pmac_id);
563 }
564
565 spin_unlock(&ctrl->cmd_lock);
566 return status;
567}
568
569int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
570{
571 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
572 struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
573 int status;
574
575 spin_lock(&ctrl->cmd_lock);
576 memset(wrb, 0, sizeof(*wrb));
577
578 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
579
580 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
581 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
582
583 req->interface_id = cpu_to_le32(interface_id);
584 status = be_mbox_db_ring(ctrl);
585
586 spin_unlock(&ctrl->cmd_lock);
587
588 return status;
589}
590
591/* Get stats is a non embedded command: the request is not embedded inside
592 * WRB but is a separate dma memory block
593 */
594int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
595{
596 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
597 struct be_cmd_req_get_stats *req = nonemb_cmd->va;
598 struct be_sge *sge = nonembedded_sgl(wrb);
599 int status;
600
601 spin_lock(&ctrl->cmd_lock);
602 memset(wrb, 0, sizeof(*wrb));
603
604 memset(req, 0, sizeof(*req));
605
606 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
607
608 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
609 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
610 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
611 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
612 sge->len = cpu_to_le32(nonemb_cmd->size);
613
614 status = be_mbox_db_ring(ctrl);
615 if (!status) {
616 struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
617 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
618 }
619
620 spin_unlock(&ctrl->cmd_lock);
621 return status;
622}
623
624int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
625 struct be_link_info *link)
626{
627 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
628 struct be_cmd_req_link_status *req = embedded_payload(wrb);
629 int status;
630
631 spin_lock(&ctrl->cmd_lock);
632 memset(wrb, 0, sizeof(*wrb));
633
634 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
635
636 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
637 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
638
639 status = be_mbox_db_ring(ctrl);
640 if (!status) {
641 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
642 link->speed = resp->mac_speed;
643 link->duplex = resp->mac_duplex;
644 link->fault = resp->mac_fault;
645 } else {
646 link->speed = PHY_LINK_SPEED_ZERO;
647 }
648
649 spin_unlock(&ctrl->cmd_lock);
650 return status;
651}
652
653int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
654{
655 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
656 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
657 int status;
658
659 spin_lock(&ctrl->cmd_lock);
660 memset(wrb, 0, sizeof(*wrb));
661
662 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
663
664 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
665 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
666
667 status = be_mbox_db_ring(ctrl);
668 if (!status) {
669 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
670 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
671 }
672
673 spin_unlock(&ctrl->cmd_lock);
674 return status;
675}
676
677/* set the EQ delay interval of an EQ to specified value */
678int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
679{
680 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
681 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
682 int status;
683
684 spin_lock(&ctrl->cmd_lock);
685 memset(wrb, 0, sizeof(*wrb));
686
687 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
688
689 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
690 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
691
692 req->num_eq = cpu_to_le32(1);
693 req->delay[0].eq_id = cpu_to_le32(eq_id);
694 req->delay[0].phase = 0;
695 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
696
697 status = be_mbox_db_ring(ctrl);
698
699 spin_unlock(&ctrl->cmd_lock);
700 return status;
701}
702
703int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
704 u32 num, bool untagged, bool promiscuous)
705{
706 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
707 struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
708 int status;
709
710 spin_lock(&ctrl->cmd_lock);
711 memset(wrb, 0, sizeof(*wrb));
712
713 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
714
715 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
716 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
717
718 req->interface_id = if_id;
719 req->promiscuous = promiscuous;
720 req->untagged = untagged;
721 req->num_vlan = num;
722 if (!promiscuous) {
723 memcpy(req->normal_vlan, vtag_array,
724 req->num_vlan * sizeof(vtag_array[0]));
725 }
726
727 status = be_mbox_db_ring(ctrl);
728
729 spin_unlock(&ctrl->cmd_lock);
730 return status;
731}
732
733int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
734{
735 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
736 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
737 int status;
738
739 spin_lock(&ctrl->cmd_lock);
740 memset(wrb, 0, sizeof(*wrb));
741
742 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
743
744 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
745 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
746
747 if (port_num)
748 req->port1_promiscuous = en;
749 else
750 req->port0_promiscuous = en;
751
752 status = be_mbox_db_ring(ctrl);
753
754 spin_unlock(&ctrl->cmd_lock);
755 return status;
756}
757
758int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
759 u32 num, bool promiscuous)
760{
761 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
762 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
763 int status;
764
765 spin_lock(&ctrl->cmd_lock);
766 memset(wrb, 0, sizeof(*wrb));
767
768 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
769
770 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
771 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
772
773 req->interface_id = if_id;
774 req->promiscuous = promiscuous;
775 if (!promiscuous) {
776 req->num_mac = cpu_to_le16(num);
777 if (num)
778 memcpy(req->mac, mac_table, ETH_ALEN * num);
779 }
780
781 status = be_mbox_db_ring(ctrl);
782
783 spin_unlock(&ctrl->cmd_lock);
784 return status;
785}
786
787int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
788{
789 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
790 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
791 int status;
792
793 spin_lock(&ctrl->cmd_lock);
794
795 memset(wrb, 0, sizeof(*wrb));
796
797 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
798
799 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
800 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
801
802 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
803 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
804
805 status = be_mbox_db_ring(ctrl);
806
807 spin_unlock(&ctrl->cmd_lock);
808 return status;
809}
810
811int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
812{
813 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
814 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
815 int status;
816
817 spin_lock(&ctrl->cmd_lock);
818
819 memset(wrb, 0, sizeof(*wrb));
820
821 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
822
823 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
824 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
825
826 status = be_mbox_db_ring(ctrl);
827 if (!status) {
828 struct be_cmd_resp_get_flow_control *resp =
829 embedded_payload(wrb);
830 *tx_fc = le16_to_cpu(resp->tx_flow_control);
831 *rx_fc = le16_to_cpu(resp->rx_flow_control);
832 }
833
834 spin_unlock(&ctrl->cmd_lock);
835 return status;
836}
837
838int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
839{
840 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
841 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
842 int status;
843
844 spin_lock(&ctrl->cmd_lock);
845
846 memset(wrb, 0, sizeof(*wrb));
847
848 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
849
850 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
851 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
852
853 status = be_mbox_db_ring(ctrl);
854 if (!status) {
855 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
856 *port_num = le32_to_cpu(resp->phys_port);
857 }
858
859 spin_unlock(&ctrl->cmd_lock);
860 return status;
861}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
new file mode 100644
index 000000000000..e499e2d5b8c3
--- /dev/null
+++ b/drivers/net/benet/be_cmds.h
@@ -0,0 +1,688 @@
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18/*
19 * The driver sends configuration and managements command requests to the
20 * firmware in the BE. These requests are communicated to the processor
21 * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
22 * WRB inside a MAILBOX.
23 * The commands are serviced by the ARM processor in the BladeEngine's MPU.
24 */
25
26struct be_sge {
27 u32 pa_lo;
28 u32 pa_hi;
29 u32 len;
30};
31
32#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
33#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
34#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
35struct be_mcc_wrb {
36 u32 embedded; /* dword 0 */
37 u32 payload_length; /* dword 1 */
38 u32 tag0; /* dword 2 */
39 u32 tag1; /* dword 3 */
40 u32 rsvd; /* dword 4 */
41 union {
42 u8 embedded_payload[236]; /* used by embedded cmds */
43 struct be_sge sgl[19]; /* used by non-embedded cmds */
44 } payload;
45};
46
47#define CQE_FLAGS_VALID_MASK (1 << 31)
48#define CQE_FLAGS_ASYNC_MASK (1 << 30)
49#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
51
52/* Completion Status */
53enum {
54 MCC_STATUS_SUCCESS = 0x0,
55/* The client does not have sufficient privileges to execute the command */
56 MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
57/* A parameter in the command was invalid. */
58 MCC_STATUS_INVALID_PARAMETER = 0x2,
59/* There are insufficient chip resources to execute the command */
60 MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
61/* The command is completing because the queue was getting flushed */
62 MCC_STATUS_QUEUE_FLUSHING = 0x4,
63/* The command is completing with a DMA error */
64 MCC_STATUS_DMA_FAILED = 0x5
65};
66
67#define CQE_STATUS_COMPL_MASK 0xFFFF
68#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
69#define CQE_STATUS_EXTD_MASK 0xFFFF
70#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */
71
72struct be_mcc_cq_entry {
73 u32 status; /* dword 0 */
74 u32 tag0; /* dword 1 */
75 u32 tag1; /* dword 2 */
76 u32 flags; /* dword 3 */
77};
78
79struct be_mcc_mailbox {
80 struct be_mcc_wrb wrb;
81 struct be_mcc_cq_entry cqe;
82};
83
84#define CMD_SUBSYSTEM_COMMON 0x1
85#define CMD_SUBSYSTEM_ETH 0x3
86
87#define OPCODE_COMMON_NTWK_MAC_QUERY 1
88#define OPCODE_COMMON_NTWK_MAC_SET 2
89#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
90#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
91#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
92#define OPCODE_COMMON_CQ_CREATE 12
93#define OPCODE_COMMON_EQ_CREATE 13
94#define OPCODE_COMMON_MCC_CREATE 21
95#define OPCODE_COMMON_NTWK_RX_FILTER 34
96#define OPCODE_COMMON_GET_FW_VERSION 35
97#define OPCODE_COMMON_SET_FLOW_CONTROL 36
98#define OPCODE_COMMON_GET_FLOW_CONTROL 37
99#define OPCODE_COMMON_SET_FRAME_SIZE 39
100#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
101#define OPCODE_COMMON_FIRMWARE_CONFIG 42
102#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
103#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
104#define OPCODE_COMMON_CQ_DESTROY 54
105#define OPCODE_COMMON_EQ_DESTROY 55
106#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
107#define OPCODE_COMMON_NTWK_PMAC_ADD 59
108#define OPCODE_COMMON_NTWK_PMAC_DEL 60
109
110#define OPCODE_ETH_ACPI_CONFIG 2
111#define OPCODE_ETH_PROMISCUOUS 3
112#define OPCODE_ETH_GET_STATISTICS 4
113#define OPCODE_ETH_TX_CREATE 7
114#define OPCODE_ETH_RX_CREATE 8
115#define OPCODE_ETH_TX_DESTROY 9
116#define OPCODE_ETH_RX_DESTROY 10
117
118struct be_cmd_req_hdr {
119 u8 opcode; /* dword 0 */
120 u8 subsystem; /* dword 0 */
121 u8 port_number; /* dword 0 */
122 u8 domain; /* dword 0 */
123 u32 timeout; /* dword 1 */
124 u32 request_length; /* dword 2 */
125 u32 rsvd; /* dword 3 */
126};
127
128#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
129#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
130struct be_cmd_resp_hdr {
131 u32 info; /* dword 0 */
132 u32 status; /* dword 1 */
133 u32 response_length; /* dword 2 */
134 u32 actual_resp_len; /* dword 3 */
135};
136
137struct phys_addr {
138 u32 lo;
139 u32 hi;
140};
141
142/**************************
143 * BE Command definitions *
144 **************************/
145
146/* Pseudo amap definition in which each bit of the actual structure is defined
147 * as a byte: used to calculate offset/shift/mask of each field */
148struct amap_eq_context {
149 u8 cidx[13]; /* dword 0*/
150 u8 rsvd0[3]; /* dword 0*/
151 u8 epidx[13]; /* dword 0*/
152 u8 valid; /* dword 0*/
153 u8 rsvd1; /* dword 0*/
154 u8 size; /* dword 0*/
155 u8 pidx[13]; /* dword 1*/
156 u8 rsvd2[3]; /* dword 1*/
157 u8 pd[10]; /* dword 1*/
158 u8 count[3]; /* dword 1*/
159 u8 solevent; /* dword 1*/
160 u8 stalled; /* dword 1*/
161 u8 armed; /* dword 1*/
162 u8 rsvd3[4]; /* dword 2*/
163 u8 func[8]; /* dword 2*/
164 u8 rsvd4; /* dword 2*/
165 u8 delaymult[10]; /* dword 2*/
166 u8 rsvd5[2]; /* dword 2*/
167 u8 phase[2]; /* dword 2*/
168 u8 nodelay; /* dword 2*/
169 u8 rsvd6[4]; /* dword 2*/
170 u8 rsvd7[32]; /* dword 3*/
171} __packed;
172
173struct be_cmd_req_eq_create {
174 struct be_cmd_req_hdr hdr;
175 u16 num_pages; /* sword */
176 u16 rsvd0; /* sword */
177 u8 context[sizeof(struct amap_eq_context) / 8];
178 struct phys_addr pages[8];
179} __packed;
180
181struct be_cmd_resp_eq_create {
182 struct be_cmd_resp_hdr resp_hdr;
183 u16 eq_id; /* sword */
184 u16 rsvd0; /* sword */
185} __packed;
186
187/******************** Mac query ***************************/
188enum {
189 MAC_ADDRESS_TYPE_STORAGE = 0x0,
190 MAC_ADDRESS_TYPE_NETWORK = 0x1,
191 MAC_ADDRESS_TYPE_PD = 0x2,
192 MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
193};
194
195struct mac_addr {
196 u16 size_of_struct;
197 u8 addr[ETH_ALEN];
198} __packed;
199
200struct be_cmd_req_mac_query {
201 struct be_cmd_req_hdr hdr;
202 u8 type;
203 u8 permanent;
204 u16 if_id;
205} __packed;
206
207struct be_cmd_resp_mac_query {
208 struct be_cmd_resp_hdr hdr;
209 struct mac_addr mac;
210};
211
212/******************** PMac Add ***************************/
213struct be_cmd_req_pmac_add {
214 struct be_cmd_req_hdr hdr;
215 u32 if_id;
216 u8 mac_address[ETH_ALEN];
217 u8 rsvd0[2];
218} __packed;
219
220struct be_cmd_resp_pmac_add {
221 struct be_cmd_resp_hdr hdr;
222 u32 pmac_id;
223};
224
225/******************** PMac Del ***************************/
226struct be_cmd_req_pmac_del {
227 struct be_cmd_req_hdr hdr;
228 u32 if_id;
229 u32 pmac_id;
230};
231
232/******************** Create CQ ***************************/
233/* Pseudo amap definition in which each bit of the actual structure is defined
234 * as a byte: used to calculate offset/shift/mask of each field */
235struct amap_cq_context {
236 u8 cidx[11]; /* dword 0*/
237 u8 rsvd0; /* dword 0*/
238 u8 coalescwm[2]; /* dword 0*/
239 u8 nodelay; /* dword 0*/
240 u8 epidx[11]; /* dword 0*/
241 u8 rsvd1; /* dword 0*/
242 u8 count[2]; /* dword 0*/
243 u8 valid; /* dword 0*/
244 u8 solevent; /* dword 0*/
245 u8 eventable; /* dword 0*/
246 u8 pidx[11]; /* dword 1*/
247 u8 rsvd2; /* dword 1*/
248 u8 pd[10]; /* dword 1*/
249 u8 eqid[8]; /* dword 1*/
250 u8 stalled; /* dword 1*/
251 u8 armed; /* dword 1*/
252 u8 rsvd3[4]; /* dword 2*/
253 u8 func[8]; /* dword 2*/
254 u8 rsvd4[20]; /* dword 2*/
255 u8 rsvd5[32]; /* dword 3*/
256} __packed;
257
258struct be_cmd_req_cq_create {
259 struct be_cmd_req_hdr hdr;
260 u16 num_pages;
261 u16 rsvd0;
262 u8 context[sizeof(struct amap_cq_context) / 8];
263 struct phys_addr pages[8];
264} __packed;
265
266struct be_cmd_resp_cq_create {
267 struct be_cmd_resp_hdr hdr;
268 u16 cq_id;
269 u16 rsvd0;
270} __packed;
271
272/******************** Create TxQ ***************************/
273#define BE_ETH_TX_RING_TYPE_STANDARD 2
274#define BE_ULP1_NUM 1
275
276/* Pseudo amap definition in which each bit of the actual structure is defined
277 * as a byte: used to calculate offset/shift/mask of each field */
278struct amap_tx_context {
279 u8 rsvd0[16]; /* dword 0 */
280 u8 tx_ring_size[4]; /* dword 0 */
281 u8 rsvd1[26]; /* dword 0 */
282 u8 pci_func_id[8]; /* dword 1 */
283 u8 rsvd2[9]; /* dword 1 */
284 u8 ctx_valid; /* dword 1 */
285 u8 cq_id_send[16]; /* dword 2 */
286 u8 rsvd3[16]; /* dword 2 */
287 u8 rsvd4[32]; /* dword 3 */
288 u8 rsvd5[32]; /* dword 4 */
289 u8 rsvd6[32]; /* dword 5 */
290 u8 rsvd7[32]; /* dword 6 */
291 u8 rsvd8[32]; /* dword 7 */
292 u8 rsvd9[32]; /* dword 8 */
293 u8 rsvd10[32]; /* dword 9 */
294 u8 rsvd11[32]; /* dword 10 */
295 u8 rsvd12[32]; /* dword 11 */
296 u8 rsvd13[32]; /* dword 12 */
297 u8 rsvd14[32]; /* dword 13 */
298 u8 rsvd15[32]; /* dword 14 */
299 u8 rsvd16[32]; /* dword 15 */
300} __packed;
301
302struct be_cmd_req_eth_tx_create {
303 struct be_cmd_req_hdr hdr;
304 u8 num_pages;
305 u8 ulp_num;
306 u8 type;
307 u8 bound_port;
308 u8 context[sizeof(struct amap_tx_context) / 8];
309 struct phys_addr pages[8];
310} __packed;
311
312struct be_cmd_resp_eth_tx_create {
313 struct be_cmd_resp_hdr hdr;
314 u16 cid;
315 u16 rsvd0;
316} __packed;
317
318/******************** Create RxQ ***************************/
319struct be_cmd_req_eth_rx_create {
320 struct be_cmd_req_hdr hdr;
321 u16 cq_id;
322 u8 frag_size;
323 u8 num_pages;
324 struct phys_addr pages[2];
325 u32 interface_id;
326 u16 max_frame_size;
327 u16 rsvd0;
328 u32 rss_queue;
329} __packed;
330
331struct be_cmd_resp_eth_rx_create {
332 struct be_cmd_resp_hdr hdr;
333 u16 id;
334 u8 cpu_id;
335 u8 rsvd0;
336} __packed;
337
338/******************** Q Destroy ***************************/
339/* Type of Queue to be destroyed */
340enum {
341 QTYPE_EQ = 1,
342 QTYPE_CQ,
343 QTYPE_TXQ,
344 QTYPE_RXQ
345};
346
347struct be_cmd_req_q_destroy {
348 struct be_cmd_req_hdr hdr;
349 u16 id;
350 u16 bypass_flush; /* valid only for rx q destroy */
351} __packed;
352
353/************ I/f Create (it's actually I/f Config Create)**********/
354
355/* Capability flags for the i/f */
356enum be_if_flags {
357 BE_IF_FLAGS_RSS = 0x4,
358 BE_IF_FLAGS_PROMISCUOUS = 0x8,
359 BE_IF_FLAGS_BROADCAST = 0x10,
360 BE_IF_FLAGS_UNTAGGED = 0x20,
361 BE_IF_FLAGS_ULP = 0x40,
362 BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
363 BE_IF_FLAGS_VLAN = 0x100,
364 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
365 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
366 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
367};
368
369/* An RX interface is an object with one or more MAC addresses and
370 * filtering capabilities. */
371struct be_cmd_req_if_create {
372 struct be_cmd_req_hdr hdr;
373 u32 version; /* ignore currntly */
374 u32 capability_flags;
375 u32 enable_flags;
376 u8 mac_addr[ETH_ALEN];
377 u8 rsvd0;
378 u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
379 u32 vlan_tag; /* not used currently */
380} __packed;
381
382struct be_cmd_resp_if_create {
383 struct be_cmd_resp_hdr hdr;
384 u32 interface_id;
385 u32 pmac_id;
386};
387
388/****** I/f Destroy(it's actually I/f Config Destroy )**********/
389struct be_cmd_req_if_destroy {
390 struct be_cmd_req_hdr hdr;
391 u32 interface_id;
392};
393
394/*************** HW Stats Get **********************************/
395struct be_port_rxf_stats {
396 u32 rx_bytes_lsd; /* dword 0*/
397 u32 rx_bytes_msd; /* dword 1*/
398 u32 rx_total_frames; /* dword 2*/
399 u32 rx_unicast_frames; /* dword 3*/
400 u32 rx_multicast_frames; /* dword 4*/
401 u32 rx_broadcast_frames; /* dword 5*/
402 u32 rx_crc_errors; /* dword 6*/
403 u32 rx_alignment_symbol_errors; /* dword 7*/
404 u32 rx_pause_frames; /* dword 8*/
405 u32 rx_control_frames; /* dword 9*/
406 u32 rx_in_range_errors; /* dword 10*/
407 u32 rx_out_range_errors; /* dword 11*/
408 u32 rx_frame_too_long; /* dword 12*/
409 u32 rx_address_match_errors; /* dword 13*/
410 u32 rx_vlan_mismatch; /* dword 14*/
411 u32 rx_dropped_too_small; /* dword 15*/
412 u32 rx_dropped_too_short; /* dword 16*/
413 u32 rx_dropped_header_too_small; /* dword 17*/
414 u32 rx_dropped_tcp_length; /* dword 18*/
415 u32 rx_dropped_runt; /* dword 19*/
416 u32 rx_64_byte_packets; /* dword 20*/
417 u32 rx_65_127_byte_packets; /* dword 21*/
418 u32 rx_128_256_byte_packets; /* dword 22*/
419 u32 rx_256_511_byte_packets; /* dword 23*/
420 u32 rx_512_1023_byte_packets; /* dword 24*/
421 u32 rx_1024_1518_byte_packets; /* dword 25*/
422 u32 rx_1519_2047_byte_packets; /* dword 26*/
423 u32 rx_2048_4095_byte_packets; /* dword 27*/
424 u32 rx_4096_8191_byte_packets; /* dword 28*/
425 u32 rx_8192_9216_byte_packets; /* dword 29*/
426 u32 rx_ip_checksum_errs; /* dword 30*/
427 u32 rx_tcp_checksum_errs; /* dword 31*/
428 u32 rx_udp_checksum_errs; /* dword 32*/
429 u32 rx_non_rss_packets; /* dword 33*/
430 u32 rx_ipv4_packets; /* dword 34*/
431 u32 rx_ipv6_packets; /* dword 35*/
432 u32 rx_ipv4_bytes_lsd; /* dword 36*/
433 u32 rx_ipv4_bytes_msd; /* dword 37*/
434 u32 rx_ipv6_bytes_lsd; /* dword 38*/
435 u32 rx_ipv6_bytes_msd; /* dword 39*/
436 u32 rx_chute1_packets; /* dword 40*/
437 u32 rx_chute2_packets; /* dword 41*/
438 u32 rx_chute3_packets; /* dword 42*/
439 u32 rx_management_packets; /* dword 43*/
440 u32 rx_switched_unicast_packets; /* dword 44*/
441 u32 rx_switched_multicast_packets; /* dword 45*/
442 u32 rx_switched_broadcast_packets; /* dword 46*/
443 u32 tx_bytes_lsd; /* dword 47*/
444 u32 tx_bytes_msd; /* dword 48*/
445 u32 tx_unicastframes; /* dword 49*/
446 u32 tx_multicastframes; /* dword 50*/
447 u32 tx_broadcastframes; /* dword 51*/
448 u32 tx_pauseframes; /* dword 52*/
449 u32 tx_controlframes; /* dword 53*/
450 u32 tx_64_byte_packets; /* dword 54*/
451 u32 tx_65_127_byte_packets; /* dword 55*/
452 u32 tx_128_256_byte_packets; /* dword 56*/
453 u32 tx_256_511_byte_packets; /* dword 57*/
454 u32 tx_512_1023_byte_packets; /* dword 58*/
455 u32 tx_1024_1518_byte_packets; /* dword 59*/
456 u32 tx_1519_2047_byte_packets; /* dword 60*/
457 u32 tx_2048_4095_byte_packets; /* dword 61*/
458 u32 tx_4096_8191_byte_packets; /* dword 62*/
459 u32 tx_8192_9216_byte_packets; /* dword 63*/
460 u32 rx_fifo_overflow; /* dword 64*/
461 u32 rx_input_fifo_overflow; /* dword 65*/
462};
463
464struct be_rxf_stats {
465 struct be_port_rxf_stats port[2];
466 u32 rx_drops_no_pbuf; /* dword 132*/
467 u32 rx_drops_no_txpb; /* dword 133*/
468 u32 rx_drops_no_erx_descr; /* dword 134*/
469 u32 rx_drops_no_tpre_descr; /* dword 135*/
470 u32 management_rx_port_packets; /* dword 136*/
471 u32 management_rx_port_bytes; /* dword 137*/
472 u32 management_rx_port_pause_frames; /* dword 138*/
473 u32 management_rx_port_errors; /* dword 139*/
474 u32 management_tx_port_packets; /* dword 140*/
475 u32 management_tx_port_bytes; /* dword 141*/
476 u32 management_tx_port_pause; /* dword 142*/
477 u32 management_rx_port_rxfifo_overflow; /* dword 143*/
478 u32 rx_drops_too_many_frags; /* dword 144*/
479 u32 rx_drops_invalid_ring; /* dword 145*/
480 u32 forwarded_packets; /* dword 146*/
481 u32 rx_drops_mtu; /* dword 147*/
482 u32 rsvd0[15];
483};
484
485struct be_erx_stats {
486 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
487 u32 debug_wdma_sent_hold; /* dword 44*/
488 u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
489 u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
490 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
491};
492
493struct be_hw_stats {
494 struct be_rxf_stats rxf;
495 u32 rsvd[48];
496 struct be_erx_stats erx;
497};
498
499struct be_cmd_req_get_stats {
500 struct be_cmd_req_hdr hdr;
501 u8 rsvd[sizeof(struct be_hw_stats)];
502};
503
504struct be_cmd_resp_get_stats {
505 struct be_cmd_resp_hdr hdr;
506 struct be_hw_stats hw_stats;
507};
508
509struct be_cmd_req_vlan_config {
510 struct be_cmd_req_hdr hdr;
511 u8 interface_id;
512 u8 promiscuous;
513 u8 untagged;
514 u8 num_vlan;
515 u16 normal_vlan[64];
516} __packed;
517
518struct be_cmd_req_promiscuous_config {
519 struct be_cmd_req_hdr hdr;
520 u8 port0_promiscuous;
521 u8 port1_promiscuous;
522 u16 rsvd0;
523} __packed;
524
525struct macaddr {
526 u8 byte[ETH_ALEN];
527};
528
529struct be_cmd_req_mcast_mac_config {
530 struct be_cmd_req_hdr hdr;
531 u16 num_mac;
532 u8 promiscuous;
533 u8 interface_id;
534 struct macaddr mac[32];
535} __packed;
536
537static inline struct be_hw_stats *
538hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
539{
540 return &cmd->hw_stats;
541}
542
543/******************** Link Status Query *******************/
544struct be_cmd_req_link_status {
545 struct be_cmd_req_hdr hdr;
546 u32 rsvd;
547};
548
549struct be_link_info {
550 u8 duplex;
551 u8 speed;
552 u8 fault;
553};
554
555enum {
556 PHY_LINK_DUPLEX_NONE = 0x0,
557 PHY_LINK_DUPLEX_HALF = 0x1,
558 PHY_LINK_DUPLEX_FULL = 0x2
559};
560
561enum {
562 PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
563 PHY_LINK_SPEED_10MBPS = 0x1,
564 PHY_LINK_SPEED_100MBPS = 0x2,
565 PHY_LINK_SPEED_1GBPS = 0x3,
566 PHY_LINK_SPEED_10GBPS = 0x4
567};
568
569struct be_cmd_resp_link_status {
570 struct be_cmd_resp_hdr hdr;
571 u8 physical_port;
572 u8 mac_duplex;
573 u8 mac_speed;
574 u8 mac_fault;
575 u8 mgmt_mac_duplex;
576 u8 mgmt_mac_speed;
577 u16 rsvd0;
578} __packed;
579
580/******************** Get FW Version *******************/
581#define FW_VER_LEN 32
582struct be_cmd_req_get_fw_version {
583 struct be_cmd_req_hdr hdr;
584 u8 rsvd0[FW_VER_LEN];
585 u8 rsvd1[FW_VER_LEN];
586} __packed;
587
588struct be_cmd_resp_get_fw_version {
589 struct be_cmd_resp_hdr hdr;
590 u8 firmware_version_string[FW_VER_LEN];
591 u8 fw_on_flash_version_string[FW_VER_LEN];
592} __packed;
593
594/******************** Set Flow Contrl *******************/
595struct be_cmd_req_set_flow_control {
596 struct be_cmd_req_hdr hdr;
597 u16 tx_flow_control;
598 u16 rx_flow_control;
599} __packed;
600
601/******************** Get Flow Contrl *******************/
602struct be_cmd_req_get_flow_control {
603 struct be_cmd_req_hdr hdr;
604 u32 rsvd;
605};
606
607struct be_cmd_resp_get_flow_control {
608 struct be_cmd_resp_hdr hdr;
609 u16 tx_flow_control;
610 u16 rx_flow_control;
611} __packed;
612
613/******************** Modify EQ Delay *******************/
614struct be_cmd_req_modify_eq_delay {
615 struct be_cmd_req_hdr hdr;
616 u32 num_eq;
617 struct {
618 u32 eq_id;
619 u32 phase;
620 u32 delay_multiplier;
621 } delay[8];
622} __packed;
623
624struct be_cmd_resp_modify_eq_delay {
625 struct be_cmd_resp_hdr hdr;
626 u32 rsvd0;
627} __packed;
628
629/******************** Get FW Config *******************/
630struct be_cmd_req_query_fw_cfg {
631 struct be_cmd_req_hdr hdr;
632 u32 rsvd[30];
633};
634
635struct be_cmd_resp_query_fw_cfg {
636 struct be_cmd_resp_hdr hdr;
637 u32 be_config_number;
638 u32 asic_revision;
639 u32 phys_port;
640 u32 function_mode;
641 u32 rsvd[26];
642};
643
644extern int be_pci_fnum_get(struct be_ctrl_info *ctrl);
645extern int be_cmd_POST(struct be_ctrl_info *ctrl);
646extern int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
647 u8 type, bool permanent, u32 if_handle);
648extern int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
649 u32 if_id, u32 *pmac_id);
650extern int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id);
651extern int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 if_flags, u8 *mac,
652 bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
653extern int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 if_handle);
654extern int be_cmd_eq_create(struct be_ctrl_info *ctrl,
655 struct be_queue_info *eq, int eq_delay);
656extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
657 struct be_queue_info *cq, struct be_queue_info *eq,
658 bool sol_evts, bool no_delay,
659 int num_cqe_dma_coalesce);
660extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
661 struct be_queue_info *txq,
662 struct be_queue_info *cq);
663extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
664 struct be_queue_info *rxq, u16 cq_id,
665 u16 frag_size, u16 max_frame_size, u32 if_id,
666 u32 rss);
667extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
668 int type);
669extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
670 struct be_link_info *link);
671extern int be_cmd_reset(struct be_ctrl_info *ctrl);
672extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
673 struct be_dma_mem *nonemb_cmd);
674extern int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver);
675
676extern int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd);
677extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id,
678 u16 *vtag_array, u32 num, bool untagged,
679 bool promiscuous);
680extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
681 u8 port_num, bool en);
682extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
683 u8 *mac_table, u32 num, bool promiscuous);
684extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
685 u32 tx_fc, u32 rx_fc);
686extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
687 u32 *tx_fc, u32 *rx_fc);
688extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
new file mode 100644
index 000000000000..04f4b73fa8d8
--- /dev/null
+++ b/drivers/net/benet/be_ethtool.c
@@ -0,0 +1,362 @@
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19#include <linux/ethtool.h>
20
21struct be_ethtool_stat {
22 char desc[ETH_GSTRING_LEN];
23 int type;
24 int size;
25 int offset;
26};
27
28enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
29#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
30 offsetof(_struct, field)
31#define NETSTAT_INFO(field) #field, NETSTAT,\
32 FIELDINFO(struct net_device_stats,\
33 field)
34#define DRVSTAT_INFO(field) #field, DRVSTAT,\
35 FIELDINFO(struct be_drvr_stats, field)
36#define MISCSTAT_INFO(field) #field, MISCSTAT,\
37 FIELDINFO(struct be_rxf_stats, field)
38#define PORTSTAT_INFO(field) #field, PORTSTAT,\
39 FIELDINFO(struct be_port_rxf_stats, \
40 field)
41#define ERXSTAT_INFO(field) #field, ERXSTAT,\
42 FIELDINFO(struct be_erx_stats, field)
43
44static const struct be_ethtool_stat et_stats[] = {
45 {NETSTAT_INFO(rx_packets)},
46 {NETSTAT_INFO(tx_packets)},
47 {NETSTAT_INFO(rx_bytes)},
48 {NETSTAT_INFO(tx_bytes)},
49 {NETSTAT_INFO(rx_errors)},
50 {NETSTAT_INFO(tx_errors)},
51 {NETSTAT_INFO(rx_dropped)},
52 {NETSTAT_INFO(tx_dropped)},
53 {DRVSTAT_INFO(be_tx_reqs)},
54 {DRVSTAT_INFO(be_tx_stops)},
55 {DRVSTAT_INFO(be_fwd_reqs)},
56 {DRVSTAT_INFO(be_tx_wrbs)},
57 {DRVSTAT_INFO(be_polls)},
58 {DRVSTAT_INFO(be_tx_events)},
59 {DRVSTAT_INFO(be_rx_events)},
60 {DRVSTAT_INFO(be_tx_compl)},
61 {DRVSTAT_INFO(be_rx_compl)},
62 {DRVSTAT_INFO(be_ethrx_post_fail)},
63 {DRVSTAT_INFO(be_802_3_dropped_frames)},
64 {DRVSTAT_INFO(be_802_3_malformed_frames)},
65 {DRVSTAT_INFO(be_tx_rate)},
66 {DRVSTAT_INFO(be_rx_rate)},
67 {PORTSTAT_INFO(rx_unicast_frames)},
68 {PORTSTAT_INFO(rx_multicast_frames)},
69 {PORTSTAT_INFO(rx_broadcast_frames)},
70 {PORTSTAT_INFO(rx_crc_errors)},
71 {PORTSTAT_INFO(rx_alignment_symbol_errors)},
72 {PORTSTAT_INFO(rx_pause_frames)},
73 {PORTSTAT_INFO(rx_control_frames)},
74 {PORTSTAT_INFO(rx_in_range_errors)},
75 {PORTSTAT_INFO(rx_out_range_errors)},
76 {PORTSTAT_INFO(rx_frame_too_long)},
77 {PORTSTAT_INFO(rx_address_match_errors)},
78 {PORTSTAT_INFO(rx_vlan_mismatch)},
79 {PORTSTAT_INFO(rx_dropped_too_small)},
80 {PORTSTAT_INFO(rx_dropped_too_short)},
81 {PORTSTAT_INFO(rx_dropped_header_too_small)},
82 {PORTSTAT_INFO(rx_dropped_tcp_length)},
83 {PORTSTAT_INFO(rx_dropped_runt)},
84 {PORTSTAT_INFO(rx_fifo_overflow)},
85 {PORTSTAT_INFO(rx_input_fifo_overflow)},
86 {PORTSTAT_INFO(rx_ip_checksum_errs)},
87 {PORTSTAT_INFO(rx_tcp_checksum_errs)},
88 {PORTSTAT_INFO(rx_udp_checksum_errs)},
89 {PORTSTAT_INFO(rx_non_rss_packets)},
90 {PORTSTAT_INFO(rx_ipv4_packets)},
91 {PORTSTAT_INFO(rx_ipv6_packets)},
92 {PORTSTAT_INFO(tx_unicastframes)},
93 {PORTSTAT_INFO(tx_multicastframes)},
94 {PORTSTAT_INFO(tx_broadcastframes)},
95 {PORTSTAT_INFO(tx_pauseframes)},
96 {PORTSTAT_INFO(tx_controlframes)},
97 {MISCSTAT_INFO(rx_drops_no_pbuf)},
98 {MISCSTAT_INFO(rx_drops_no_txpb)},
99 {MISCSTAT_INFO(rx_drops_no_erx_descr)},
100 {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
101 {MISCSTAT_INFO(rx_drops_too_many_frags)},
102 {MISCSTAT_INFO(rx_drops_invalid_ring)},
103 {MISCSTAT_INFO(forwarded_packets)},
104 {MISCSTAT_INFO(rx_drops_mtu)},
105 {ERXSTAT_INFO(rx_drops_no_fragments)},
106};
107#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
108
109static void
110be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
111{
112 struct be_adapter *adapter = netdev_priv(netdev);
113
114 strcpy(drvinfo->driver, DRV_NAME);
115 strcpy(drvinfo->version, DRV_VER);
116 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
117 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
118 drvinfo->testinfo_len = 0;
119 drvinfo->regdump_len = 0;
120 drvinfo->eedump_len = 0;
121}
122
123static int
124be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
125{
126 struct be_adapter *adapter = netdev_priv(netdev);
127 struct be_eq_obj *rx_eq = &adapter->rx_eq;
128 struct be_eq_obj *tx_eq = &adapter->tx_eq;
129
130 coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
131
132 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
133 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
134 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
135
136 coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
137 coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
138 coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
139
140 coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
141 coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
142
143 return 0;
144}
145
146/*
147 * This routine is used to set interrup coalescing delay *as well as*
148 * the number of pkts to coalesce for LRO.
149 */
150static int
151be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
152{
153 struct be_adapter *adapter = netdev_priv(netdev);
154 struct be_ctrl_info *ctrl = &adapter->ctrl;
155 struct be_eq_obj *rx_eq = &adapter->rx_eq;
156 struct be_eq_obj *tx_eq = &adapter->tx_eq;
157 u32 tx_max, tx_min, tx_cur;
158 u32 rx_max, rx_min, rx_cur;
159 int status = 0;
160
161 if (coalesce->use_adaptive_tx_coalesce == 1)
162 return -EINVAL;
163
164 adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
165 if (adapter->max_rx_coal > MAX_SKB_FRAGS)
166 adapter->max_rx_coal = MAX_SKB_FRAGS - 1;
167
168 /* if AIC is being turned on now, start with an EQD of 0 */
169 if (rx_eq->enable_aic == 0 &&
170 coalesce->use_adaptive_rx_coalesce == 1) {
171 rx_eq->cur_eqd = 0;
172 }
173 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
174
175 rx_max = coalesce->rx_coalesce_usecs_high;
176 rx_min = coalesce->rx_coalesce_usecs_low;
177 rx_cur = coalesce->rx_coalesce_usecs;
178
179 tx_max = coalesce->tx_coalesce_usecs_high;
180 tx_min = coalesce->tx_coalesce_usecs_low;
181 tx_cur = coalesce->tx_coalesce_usecs;
182
183 if (tx_cur > BE_MAX_EQD)
184 tx_cur = BE_MAX_EQD;
185 if (tx_eq->cur_eqd != tx_cur) {
186 status = be_cmd_modify_eqd(ctrl, tx_eq->q.id, tx_cur);
187 if (!status)
188 tx_eq->cur_eqd = tx_cur;
189 }
190
191 if (rx_eq->enable_aic) {
192 if (rx_max > BE_MAX_EQD)
193 rx_max = BE_MAX_EQD;
194 if (rx_min > rx_max)
195 rx_min = rx_max;
196 rx_eq->max_eqd = rx_max;
197 rx_eq->min_eqd = rx_min;
198 if (rx_eq->cur_eqd > rx_max)
199 rx_eq->cur_eqd = rx_max;
200 if (rx_eq->cur_eqd < rx_min)
201 rx_eq->cur_eqd = rx_min;
202 } else {
203 if (rx_cur > BE_MAX_EQD)
204 rx_cur = BE_MAX_EQD;
205 if (rx_eq->cur_eqd != rx_cur) {
206 status = be_cmd_modify_eqd(ctrl, rx_eq->q.id, rx_cur);
207 if (!status)
208 rx_eq->cur_eqd = rx_cur;
209 }
210 }
211 return 0;
212}
213
214static u32 be_get_rx_csum(struct net_device *netdev)
215{
216 struct be_adapter *adapter = netdev_priv(netdev);
217
218 return adapter->rx_csum;
219}
220
221static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
222{
223 struct be_adapter *adapter = netdev_priv(netdev);
224
225 if (data)
226 adapter->rx_csum = true;
227 else
228 adapter->rx_csum = false;
229
230 return 0;
231}
232
233static void
234be_get_ethtool_stats(struct net_device *netdev,
235 struct ethtool_stats *stats, uint64_t *data)
236{
237 struct be_adapter *adapter = netdev_priv(netdev);
238 struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
239 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241 struct be_port_rxf_stats *port_stats =
242 &rxf_stats->port[adapter->port_num];
243 struct net_device_stats *net_stats = &adapter->stats.net_stats;
244 struct be_erx_stats *erx_stats = &hw_stats->erx;
245 void *p = NULL;
246 int i;
247
248 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
249 switch (et_stats[i].type) {
250 case NETSTAT:
251 p = net_stats;
252 break;
253 case DRVSTAT:
254 p = drvr_stats;
255 break;
256 case PORTSTAT:
257 p = port_stats;
258 break;
259 case MISCSTAT:
260 p = rxf_stats;
261 break;
262 case ERXSTAT: /* Currently only one ERX stat is provided */
263 p = (u32 *)erx_stats + adapter->rx_obj.q.id;
264 break;
265 }
266
267 p = (u8 *)p + et_stats[i].offset;
268 data[i] = (et_stats[i].size == sizeof(u64)) ?
269 *(u64 *)p: *(u32 *)p;
270 }
271
272 return;
273}
274
275static void
276be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
277 uint8_t *data)
278{
279 int i;
280 switch (stringset) {
281 case ETH_SS_STATS:
282 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
283 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
284 data += ETH_GSTRING_LEN;
285 }
286 break;
287 }
288}
289
290static int be_get_stats_count(struct net_device *netdev)
291{
292 return ETHTOOL_STATS_NUM;
293}
294
295static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
296{
297 ecmd->speed = SPEED_10000;
298 ecmd->duplex = DUPLEX_FULL;
299 ecmd->autoneg = AUTONEG_DISABLE;
300 return 0;
301}
302
303static void
304be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
307
308 ring->rx_max_pending = adapter->rx_obj.q.len;
309 ring->tx_max_pending = adapter->tx_obj.q.len;
310
311 ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
312 ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
313}
314
315static void
316be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
317{
318 struct be_adapter *adapter = netdev_priv(netdev);
319
320 be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause,
321 &ecmd->rx_pause);
322 ecmd->autoneg = AUTONEG_ENABLE;
323}
324
325static int
326be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
327{
328 struct be_adapter *adapter = netdev_priv(netdev);
329 int status;
330
331 if (ecmd->autoneg != AUTONEG_ENABLE)
332 return -EINVAL;
333
334 status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause,
335 ecmd->rx_pause);
336 if (!status)
337 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
338
339 return status;
340}
341
342struct ethtool_ops be_ethtool_ops = {
343 .get_settings = be_get_settings,
344 .get_drvinfo = be_get_drvinfo,
345 .get_link = ethtool_op_get_link,
346 .get_coalesce = be_get_coalesce,
347 .set_coalesce = be_set_coalesce,
348 .get_ringparam = be_get_ringparam,
349 .get_pauseparam = be_get_pauseparam,
350 .set_pauseparam = be_set_pauseparam,
351 .get_rx_csum = be_get_rx_csum,
352 .set_rx_csum = be_set_rx_csum,
353 .get_tx_csum = ethtool_op_get_tx_csum,
354 .set_tx_csum = ethtool_op_set_tx_csum,
355 .get_sg = ethtool_op_get_sg,
356 .set_sg = ethtool_op_set_sg,
357 .get_tso = ethtool_op_get_tso,
358 .set_tso = ethtool_op_set_tso,
359 .get_strings = be_get_stat_strings,
360 .get_stats_count = be_get_stats_count,
361 .get_ethtool_stats = be_get_ethtool_stats,
362};
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
new file mode 100644
index 000000000000..b132aa4893ca
--- /dev/null
+++ b/drivers/net/benet/be_hw.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18/********* Mailbox door bell *************/
19/* Used for driver communication with the FW.
20 * The software must write this register twice to post any command. First,
21 * it writes the register with hi=1 and the upper bits of the physical address
22 * for the MAILBOX structure. Software must poll the ready bit until this
23 * is acknowledged. Then, sotware writes the register with hi=0 with the lower
24 * bits in the address. It must poll the ready bit until the command is
25 * complete. Upon completion, the MAILBOX will contain a valid completion
26 * queue entry.
27 */
28#define MPU_MAILBOX_DB_OFFSET 0x160
29#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
30#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
31
32#define MPU_EP_CONTROL 0
33
34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
37#define EP_SEMAPHORE_POST_ERR_MASK 0x1
38#define EP_SEMAPHORE_POST_ERR_SHIFT 31
39/* MPU semphore POST stage values */
40#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
41#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
42#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
43#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
44
45/********* Memory BAR register ************/
46#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
47/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
48 * Disable" may still globally block interrupts in addition to individual
49 * interrupt masks; a mechanism for the device driver to block all interrupts
50 * atomically without having to arbitrate for the PCI Interrupt Disable bit
51 * with the OS.
52 */
53#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
54/* PCI physical function number */
55#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */
56#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26
57
58/********* Event Q door bell *************/
59#define DB_EQ_OFFSET DB_CQ_OFFSET
60#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
61/* Clear the interrupt for this eq */
62#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
63/* Must be 1 */
64#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
65/* Number of event entries processed */
66#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
67/* Rearm bit */
68#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
69
70/********* Compl Q door bell *************/
71#define DB_CQ_OFFSET 0x120
72#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
73/* Number of event entries processed */
74#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
75/* Rearm bit */
76#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
77
78/********** TX ULP door bell *************/
79#define DB_TXULP1_OFFSET 0x60
80#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
81/* Number of tx entries posted */
82#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
83#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
84
85/********** RQ(erx) door bell ************/
86#define DB_RQ_OFFSET 0x100
87#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
88/* Number of rx frags posted */
89#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
90
91/*
92 * BE descriptors: host memory data structures whose formats
93 * are hardwired in BE silicon.
94 */
95/* Event Queue Descriptor */
96#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
97#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
98#define EQ_ENTRY_RES_ID_SHIFT 16
99struct be_eq_entry {
100 u32 evt;
101};
102
103/* TX Queue Descriptor */
104#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
105struct be_eth_wrb {
106 u32 frag_pa_hi; /* dword 0 */
107 u32 frag_pa_lo; /* dword 1 */
108 u32 rsvd0; /* dword 2 */
109 u32 frag_len; /* dword 3: bits 0 - 15 */
110} __packed;
111
112/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
113 * actual structure is defined as a byte : used to calculate
114 * offset/shift/mask of each field */
115struct amap_eth_hdr_wrb {
116 u8 rsvd0[32]; /* dword 0 */
117 u8 rsvd1[32]; /* dword 1 */
118 u8 complete; /* dword 2 */
119 u8 event;
120 u8 crc;
121 u8 forward;
122 u8 ipsec;
123 u8 mgmt;
124 u8 ipcs;
125 u8 udpcs;
126 u8 tcpcs;
127 u8 lso;
128 u8 vlan;
129 u8 gso[2];
130 u8 num_wrb[5];
131 u8 lso_mss[14];
132 u8 len[16]; /* dword 3 */
133 u8 vlan_tag[16];
134} __packed;
135
136struct be_eth_hdr_wrb {
137 u32 dw[4];
138};
139
140/* TX Compl Queue Descriptor */
141
142/* Pseudo amap definition for eth_tx_compl in which each bit of the
143 * actual structure is defined as a byte: used to calculate
144 * offset/shift/mask of each field */
145struct amap_eth_tx_compl {
146 u8 wrb_index[16]; /* dword 0 */
147 u8 ct[2]; /* dword 0 */
148 u8 port[2]; /* dword 0 */
149 u8 rsvd0[8]; /* dword 0 */
150 u8 status[4]; /* dword 0 */
151 u8 user_bytes[16]; /* dword 1 */
152 u8 nwh_bytes[8]; /* dword 1 */
153 u8 lso; /* dword 1 */
154 u8 cast_enc[2]; /* dword 1 */
155 u8 rsvd1[5]; /* dword 1 */
156 u8 rsvd2[32]; /* dword 2 */
157 u8 pkts[16]; /* dword 3 */
158 u8 ringid[11]; /* dword 3 */
159 u8 hash_val[4]; /* dword 3 */
160 u8 valid; /* dword 3 */
161} __packed;
162
163struct be_eth_tx_compl {
164 u32 dw[4];
165};
166
167/* RX Queue Descriptor */
168struct be_eth_rx_d {
169 u32 fragpa_hi;
170 u32 fragpa_lo;
171};
172
173/* RX Compl Queue Descriptor */
174
175/* Pseudo amap definition for eth_rx_compl in which each bit of the
176 * actual structure is defined as a byte: used to calculate
177 * offset/shift/mask of each field */
178struct amap_eth_rx_compl {
179 u8 vlan_tag[16]; /* dword 0 */
180 u8 pktsize[14]; /* dword 0 */
181 u8 port; /* dword 0 */
182 u8 ip_opt; /* dword 0 */
183 u8 err; /* dword 1 */
184 u8 rsshp; /* dword 1 */
185 u8 ipf; /* dword 1 */
186 u8 tcpf; /* dword 1 */
187 u8 udpf; /* dword 1 */
188 u8 ipcksm; /* dword 1 */
189 u8 l4_cksm; /* dword 1 */
190 u8 ip_version; /* dword 1 */
191 u8 macdst[6]; /* dword 1 */
192 u8 vtp; /* dword 1 */
193 u8 rsvd0; /* dword 1 */
194 u8 fragndx[10]; /* dword 1 */
195 u8 ct[2]; /* dword 1 */
196 u8 sw; /* dword 1 */
197 u8 numfrags[3]; /* dword 1 */
198 u8 rss_flush; /* dword 2 */
199 u8 cast_enc[2]; /* dword 2 */
200 u8 qnq; /* dword 2 */
201 u8 rss_bank; /* dword 2 */
202 u8 rsvd1[23]; /* dword 2 */
203 u8 lro_pkt; /* dword 2 */
204 u8 rsvd2[2]; /* dword 2 */
205 u8 valid; /* dword 2 */
206 u8 rsshash[32]; /* dword 3 */
207} __packed;
208
209struct be_eth_rx_compl {
210 u32 dw[4];
211};
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
new file mode 100644
index 000000000000..0ecaffb70e58
--- /dev/null
+++ b/drivers/net/benet/be_main.c
@@ -0,0 +1,1911 @@
1/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19
20MODULE_VERSION(DRV_VER);
21MODULE_DEVICE_TABLE(pci, be_dev_ids);
22MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
23MODULE_AUTHOR("ServerEngines Corporation");
24MODULE_LICENSE("GPL");
25
26static unsigned int rx_frag_size = 2048;
27module_param(rx_frag_size, uint, S_IRUGO);
28MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
29
30#define BE_VENDOR_ID 0x19a2
31#define BE2_DEVICE_ID_1 0x0211
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) },
34 { 0 }
35};
36MODULE_DEVICE_TABLE(pci, be_dev_ids);
37
38static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
39{
40 struct be_dma_mem *mem = &q->dma_mem;
41 if (mem->va)
42 pci_free_consistent(adapter->pdev, mem->size,
43 mem->va, mem->dma);
44}
45
46static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
47 u16 len, u16 entry_size)
48{
49 struct be_dma_mem *mem = &q->dma_mem;
50
51 memset(q, 0, sizeof(*q));
52 q->len = len;
53 q->entry_size = entry_size;
54 mem->size = len * entry_size;
55 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
56 if (!mem->va)
57 return -1;
58 memset(mem->va, 0, mem->size);
59 return 0;
60}
61
62static inline void *queue_head_node(struct be_queue_info *q)
63{
64 return q->dma_mem.va + q->head * q->entry_size;
65}
66
67static inline void *queue_tail_node(struct be_queue_info *q)
68{
69 return q->dma_mem.va + q->tail * q->entry_size;
70}
71
72static inline void queue_head_inc(struct be_queue_info *q)
73{
74 index_inc(&q->head, q->len);
75}
76
77static inline void queue_tail_inc(struct be_queue_info *q)
78{
79 index_inc(&q->tail, q->len);
80}
81
82static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
83{
84 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
85 u32 reg = ioread32(addr);
86 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
87 if (!enabled && enable) {
88 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
89 } else if (enabled && !enable) {
90 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
91 } else {
92 printk(KERN_WARNING DRV_NAME
93 ": bad value in membar_int_ctrl reg=0x%x\n", reg);
94 return;
95 }
96 iowrite32(reg, addr);
97}
98
99static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
100{
101 u32 val = 0;
102 val |= qid & DB_RQ_RING_ID_MASK;
103 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
104 iowrite32(val, ctrl->db + DB_RQ_OFFSET);
105}
106
107static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
108{
109 u32 val = 0;
110 val |= qid & DB_TXULP_RING_ID_MASK;
111 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
112 iowrite32(val, ctrl->db + DB_TXULP1_OFFSET);
113}
114
115static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
116 bool arm, bool clear_int, u16 num_popped)
117{
118 u32 val = 0;
119 val |= qid & DB_EQ_RING_ID_MASK;
120 if (arm)
121 val |= 1 << DB_EQ_REARM_SHIFT;
122 if (clear_int)
123 val |= 1 << DB_EQ_CLR_SHIFT;
124 val |= 1 << DB_EQ_EVNT_SHIFT;
125 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
126 iowrite32(val, ctrl->db + DB_EQ_OFFSET);
127}
128
129static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
130 bool arm, u16 num_popped)
131{
132 u32 val = 0;
133 val |= qid & DB_CQ_RING_ID_MASK;
134 if (arm)
135 val |= 1 << DB_CQ_REARM_SHIFT;
136 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
137 iowrite32(val, ctrl->db + DB_CQ_OFFSET);
138}
139
140
141static int be_mac_addr_set(struct net_device *netdev, void *p)
142{
143 struct be_adapter *adapter = netdev_priv(netdev);
144 struct sockaddr *addr = p;
145 int status = 0;
146
147 if (netif_running(netdev)) {
148 status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle,
149 adapter->pmac_id);
150 if (status)
151 return status;
152
153 status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data,
154 adapter->if_handle, &adapter->pmac_id);
155 }
156
157 if (!status)
158 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
159
160 return status;
161}
162
163static void netdev_stats_update(struct be_adapter *adapter)
164{
165 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
166 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
167 struct be_port_rxf_stats *port_stats =
168 &rxf_stats->port[adapter->port_num];
169 struct net_device_stats *dev_stats = &adapter->stats.net_stats;
170
171 dev_stats->rx_packets = port_stats->rx_total_frames;
172 dev_stats->tx_packets = port_stats->tx_unicastframes +
173 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
174 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
175 (u64) port_stats->rx_bytes_lsd;
176 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
177 (u64) port_stats->tx_bytes_lsd;
178
179 /* bad pkts received */
180 dev_stats->rx_errors = port_stats->rx_crc_errors +
181 port_stats->rx_alignment_symbol_errors +
182 port_stats->rx_in_range_errors +
183 port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
184
185 /* packet transmit problems */
186 dev_stats->tx_errors = 0;
187
188 /* no space in linux buffers */
189 dev_stats->rx_dropped = 0;
190
191 /* no space available in linux */
192 dev_stats->tx_dropped = 0;
193
194 dev_stats->multicast = port_stats->tx_multicastframes;
195 dev_stats->collisions = 0;
196
197 /* detailed rx errors */
198 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
199 port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
200 /* receive ring buffer overflow */
201 dev_stats->rx_over_errors = 0;
202 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
203
204 /* frame alignment errors */
205 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
206 /* receiver fifo overrun */
207 /* drops_no_pbuf is no per i/f, it's per BE card */
208 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
209 port_stats->rx_input_fifo_overflow +
210 rxf_stats->rx_drops_no_pbuf;
211 /* receiver missed packetd */
212 dev_stats->rx_missed_errors = 0;
213 /* detailed tx_errors */
214 dev_stats->tx_aborted_errors = 0;
215 dev_stats->tx_carrier_errors = 0;
216 dev_stats->tx_fifo_errors = 0;
217 dev_stats->tx_heartbeat_errors = 0;
218 dev_stats->tx_window_errors = 0;
219}
220
221static void be_link_status_update(struct be_adapter *adapter)
222{
223 struct be_link_info *prev = &adapter->link;
224 struct be_link_info now = { 0 };
225 struct net_device *netdev = adapter->netdev;
226
227 be_cmd_link_status_query(&adapter->ctrl, &now);
228
229 /* If link came up or went down */
230 if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
231 prev->speed == PHY_LINK_SPEED_ZERO)) {
232 if (now.speed == PHY_LINK_SPEED_ZERO) {
233 netif_stop_queue(netdev);
234 netif_carrier_off(netdev);
235 printk(KERN_INFO "%s: Link down\n", netdev->name);
236 } else {
237 netif_start_queue(netdev);
238 netif_carrier_on(netdev);
239 printk(KERN_INFO "%s: Link up\n", netdev->name);
240 }
241 }
242 *prev = now;
243}
244
245/* Update the EQ delay n BE based on the RX frags consumed / sec */
246static void be_rx_eqd_update(struct be_adapter *adapter)
247{
248 u32 eqd;
249 struct be_ctrl_info *ctrl = &adapter->ctrl;
250 struct be_eq_obj *rx_eq = &adapter->rx_eq;
251 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
252
253 /* Update once a second */
254 if (((jiffies - stats->rx_fps_jiffies) < HZ) || rx_eq->enable_aic == 0)
255 return;
256
257 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
258 ((jiffies - stats->rx_fps_jiffies) / HZ);
259
260 stats->rx_fps_jiffies = jiffies;
261 stats->be_prev_rx_frags = stats->be_rx_frags;
262 eqd = stats->be_rx_fps / 110000;
263 eqd = eqd << 3;
264 if (eqd > rx_eq->max_eqd)
265 eqd = rx_eq->max_eqd;
266 if (eqd < rx_eq->min_eqd)
267 eqd = rx_eq->min_eqd;
268 if (eqd < 10)
269 eqd = 0;
270 if (eqd != rx_eq->cur_eqd)
271 be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd);
272
273 rx_eq->cur_eqd = eqd;
274}
275
276static struct net_device_stats *be_get_stats(struct net_device *dev)
277{
278 struct be_adapter *adapter = netdev_priv(dev);
279
280 return &adapter->stats.net_stats;
281}
282
283static void be_tx_stats_update(struct be_adapter *adapter,
284 u32 wrb_cnt, u32 copied, bool stopped)
285{
286 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
287 stats->be_tx_reqs++;
288 stats->be_tx_wrbs += wrb_cnt;
289 stats->be_tx_bytes += copied;
290 if (stopped)
291 stats->be_tx_stops++;
292
293 /* Update tx rate once in two seconds */
294 if ((jiffies - stats->be_tx_jiffies) > 2 * HZ) {
295 u32 r;
296 r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) /
297 ((u32) (jiffies - stats->be_tx_jiffies) / HZ);
298 r = (r / 1000000); /* M bytes/s */
299 stats->be_tx_rate = (r * 8); /* M bits/s */
300 stats->be_tx_jiffies = jiffies;
301 stats->be_tx_bytes_prev = stats->be_tx_bytes;
302 }
303}
304
305/* Determine number of WRB entries needed to xmit data in an skb */
306static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
307{
308 int cnt = 0;
309 while (skb) {
310 if (skb->len > skb->data_len)
311 cnt++;
312 cnt += skb_shinfo(skb)->nr_frags;
313 skb = skb_shinfo(skb)->frag_list;
314 }
315 /* to account for hdr wrb */
316 cnt++;
317 if (cnt & 1) {
318 /* add a dummy to make it an even num */
319 cnt++;
320 *dummy = true;
321 } else
322 *dummy = false;
323 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
324 return cnt;
325}
326
327static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
328{
329 wrb->frag_pa_hi = upper_32_bits(addr);
330 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
331 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
332}
333
334static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
335 bool vlan, u32 wrb_cnt, u32 len)
336{
337 memset(hdr, 0, sizeof(*hdr));
338
339 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
340
341 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
342 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
343 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
344 hdr, skb_shinfo(skb)->gso_size);
345 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
346 if (is_tcp_pkt(skb))
347 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
348 else if (is_udp_pkt(skb))
349 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
350 }
351
352 if (vlan && vlan_tx_tag_present(skb)) {
353 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
354 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
355 hdr, vlan_tx_tag_get(skb));
356 }
357
358 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
360 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
361 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
362}
363
364
365static int make_tx_wrbs(struct be_adapter *adapter,
366 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
367{
368 u64 busaddr;
369 u32 i, copied = 0;
370 struct pci_dev *pdev = adapter->pdev;
371 struct sk_buff *first_skb = skb;
372 struct be_queue_info *txq = &adapter->tx_obj.q;
373 struct be_eth_wrb *wrb;
374 struct be_eth_hdr_wrb *hdr;
375
376 atomic_add(wrb_cnt, &txq->used);
377 hdr = queue_head_node(txq);
378 queue_head_inc(txq);
379
380 while (skb) {
381 if (skb->len > skb->data_len) {
382 int len = skb->len - skb->data_len;
383 busaddr = pci_map_single(pdev, skb->data, len,
384 PCI_DMA_TODEVICE);
385 wrb = queue_head_node(txq);
386 wrb_fill(wrb, busaddr, len);
387 be_dws_cpu_to_le(wrb, sizeof(*wrb));
388 queue_head_inc(txq);
389 copied += len;
390 }
391
392 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
393 struct skb_frag_struct *frag =
394 &skb_shinfo(skb)->frags[i];
395 busaddr = pci_map_page(pdev, frag->page,
396 frag->page_offset,
397 frag->size, PCI_DMA_TODEVICE);
398 wrb = queue_head_node(txq);
399 wrb_fill(wrb, busaddr, frag->size);
400 be_dws_cpu_to_le(wrb, sizeof(*wrb));
401 queue_head_inc(txq);
402 copied += frag->size;
403 }
404 skb = skb_shinfo(skb)->frag_list;
405 }
406
407 if (dummy_wrb) {
408 wrb = queue_head_node(txq);
409 wrb_fill(wrb, 0, 0);
410 be_dws_cpu_to_le(wrb, sizeof(*wrb));
411 queue_head_inc(txq);
412 }
413
414 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
415 wrb_cnt, copied);
416 be_dws_cpu_to_le(hdr, sizeof(*hdr));
417
418 return copied;
419}
420
421static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
422{
423 struct be_adapter *adapter = netdev_priv(netdev);
424 struct be_tx_obj *tx_obj = &adapter->tx_obj;
425 struct be_queue_info *txq = &tx_obj->q;
426 u32 wrb_cnt = 0, copied = 0;
427 u32 start = txq->head;
428 bool dummy_wrb, stopped = false;
429
430 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
431
432 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
433
434 /* record the sent skb in the sent_skb table */
435 BUG_ON(tx_obj->sent_skb_list[start]);
436 tx_obj->sent_skb_list[start] = skb;
437
438 /* Ensure that txq has space for the next skb; Else stop the queue
439 * *BEFORE* ringing the tx doorbell, so that we serialze the
440 * tx compls of the current transmit which'll wake up the queue
441 */
442 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
443 netif_stop_queue(netdev);
444 stopped = true;
445 }
446
447 be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
448
449 netdev->trans_start = jiffies;
450
451 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
452 return NETDEV_TX_OK;
453}
454
455static int be_change_mtu(struct net_device *netdev, int new_mtu)
456{
457 struct be_adapter *adapter = netdev_priv(netdev);
458 if (new_mtu < BE_MIN_MTU ||
459 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
460 dev_info(&adapter->pdev->dev,
461 "MTU must be between %d and %d bytes\n",
462 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
463 return -EINVAL;
464 }
465 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
466 netdev->mtu, new_mtu);
467 netdev->mtu = new_mtu;
468 return 0;
469}
470
471/*
472 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
473 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
474 * set the BE in promiscuous VLAN mode.
475 */
476static void be_vid_config(struct net_device *netdev)
477{
478 struct be_adapter *adapter = netdev_priv(netdev);
479 u16 vtag[BE_NUM_VLANS_SUPPORTED];
480 u16 ntags = 0, i;
481
482 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
483 /* Construct VLAN Table to give to HW */
484 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
485 if (adapter->vlan_tag[i]) {
486 vtag[ntags] = cpu_to_le16(i);
487 ntags++;
488 }
489 }
490 be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
491 vtag, ntags, 1, 0);
492 } else {
493 be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
494 NULL, 0, 1, 1);
495 }
496}
497
498static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
499{
500 struct be_adapter *adapter = netdev_priv(netdev);
501 struct be_eq_obj *rx_eq = &adapter->rx_eq;
502 struct be_eq_obj *tx_eq = &adapter->tx_eq;
503 struct be_ctrl_info *ctrl = &adapter->ctrl;
504
505 be_eq_notify(ctrl, rx_eq->q.id, false, false, 0);
506 be_eq_notify(ctrl, tx_eq->q.id, false, false, 0);
507 adapter->vlan_grp = grp;
508 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
509 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
510}
511
512static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
513{
514 struct be_adapter *adapter = netdev_priv(netdev);
515
516 adapter->num_vlans++;
517 adapter->vlan_tag[vid] = 1;
518
519 be_vid_config(netdev);
520}
521
522static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
523{
524 struct be_adapter *adapter = netdev_priv(netdev);
525
526 adapter->num_vlans--;
527 adapter->vlan_tag[vid] = 0;
528
529 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
530 be_vid_config(netdev);
531}
532
533static void be_set_multicast_filter(struct net_device *netdev)
534{
535 struct be_adapter *adapter = netdev_priv(netdev);
536 struct dev_mc_list *mc_ptr;
537 u8 mac_addr[32][ETH_ALEN];
538 int i = 0;
539
540 if (netdev->flags & IFF_ALLMULTI) {
541 /* set BE in Multicast promiscuous */
542 be_cmd_mcast_mac_set(&adapter->ctrl,
543 adapter->if_handle, NULL, 0, true);
544 return;
545 }
546
547 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
548 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
549 if (++i >= 32) {
550 be_cmd_mcast_mac_set(&adapter->ctrl,
551 adapter->if_handle, &mac_addr[0][0], i, false);
552 i = 0;
553 }
554
555 }
556
557 if (i) {
558 /* reset the promiscuous mode also. */
559 be_cmd_mcast_mac_set(&adapter->ctrl,
560 adapter->if_handle, &mac_addr[0][0], i, false);
561 }
562}
563
564static void be_set_multicast_list(struct net_device *netdev)
565{
566 struct be_adapter *adapter = netdev_priv(netdev);
567
568 if (netdev->flags & IFF_PROMISC) {
569 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
570 } else {
571 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
572 be_set_multicast_filter(netdev);
573 }
574}
575
576static void be_rx_rate_update(struct be_adapter *adapter, u32 pktsize,
577 u16 numfrags)
578{
579 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
580 u32 rate;
581
582 stats->be_rx_compl++;
583 stats->be_rx_frags += numfrags;
584 stats->be_rx_bytes += pktsize;
585
586 /* Update the rate once in two seconds */
587 if ((jiffies - stats->be_rx_jiffies) < 2 * HZ)
588 return;
589
590 rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) /
591 ((u32) (jiffies - stats->be_rx_jiffies) / HZ);
592 rate = (rate / 1000000); /* MB/Sec */
593 stats->be_rx_rate = (rate * 8); /* Mega Bits/Sec */
594 stats->be_rx_jiffies = jiffies;
595 stats->be_rx_bytes_prev = stats->be_rx_bytes;
596}
597
598static struct be_rx_page_info *
599get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
600{
601 struct be_rx_page_info *rx_page_info;
602 struct be_queue_info *rxq = &adapter->rx_obj.q;
603
604 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
605 BUG_ON(!rx_page_info->page);
606
607 if (rx_page_info->last_page_user)
608 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
609 adapter->big_page_size, PCI_DMA_FROMDEVICE);
610
611 atomic_dec(&rxq->used);
612 return rx_page_info;
613}
614
615/* Throwaway the data in the Rx completion */
616static void be_rx_compl_discard(struct be_adapter *adapter,
617 struct be_eth_rx_compl *rxcp)
618{
619 struct be_queue_info *rxq = &adapter->rx_obj.q;
620 struct be_rx_page_info *page_info;
621 u16 rxq_idx, i, num_rcvd;
622
623 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
624 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
625
626 for (i = 0; i < num_rcvd; i++) {
627 page_info = get_rx_page_info(adapter, rxq_idx);
628 put_page(page_info->page);
629 memset(page_info, 0, sizeof(*page_info));
630 index_inc(&rxq_idx, rxq->len);
631 }
632}
633
634/*
635 * skb_fill_rx_data forms a complete skb for an ether frame
636 * indicated by rxcp.
637 */
638static void skb_fill_rx_data(struct be_adapter *adapter,
639 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
640{
641 struct be_queue_info *rxq = &adapter->rx_obj.q;
642 struct be_rx_page_info *page_info;
643 u16 rxq_idx, i, num_rcvd;
644 u32 pktsize, hdr_len, curr_frag_len;
645 u8 *start;
646
647 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
648 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
649 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
650
651 page_info = get_rx_page_info(adapter, rxq_idx);
652
653 start = page_address(page_info->page) + page_info->page_offset;
654 prefetch(start);
655
656 /* Copy data in the first descriptor of this completion */
657 curr_frag_len = min(pktsize, rx_frag_size);
658
659 /* Copy the header portion into skb_data */
660 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
661 memcpy(skb->data, start, hdr_len);
662 skb->len = curr_frag_len;
663 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
664 /* Complete packet has now been moved to data */
665 put_page(page_info->page);
666 skb->data_len = 0;
667 skb->tail += curr_frag_len;
668 } else {
669 skb_shinfo(skb)->nr_frags = 1;
670 skb_shinfo(skb)->frags[0].page = page_info->page;
671 skb_shinfo(skb)->frags[0].page_offset =
672 page_info->page_offset + hdr_len;
673 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
674 skb->data_len = curr_frag_len - hdr_len;
675 skb->tail += hdr_len;
676 }
677 memset(page_info, 0, sizeof(*page_info));
678
679 if (pktsize <= rx_frag_size) {
680 BUG_ON(num_rcvd != 1);
681 return;
682 }
683
684 /* More frags present for this completion */
685 pktsize -= curr_frag_len; /* account for above copied frag */
686 for (i = 1; i < num_rcvd; i++) {
687 index_inc(&rxq_idx, rxq->len);
688 page_info = get_rx_page_info(adapter, rxq_idx);
689
690 curr_frag_len = min(pktsize, rx_frag_size);
691
692 skb_shinfo(skb)->frags[i].page = page_info->page;
693 skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
694 skb_shinfo(skb)->frags[i].size = curr_frag_len;
695 skb->len += curr_frag_len;
696 skb->data_len += curr_frag_len;
697 skb_shinfo(skb)->nr_frags++;
698 pktsize -= curr_frag_len;
699
700 memset(page_info, 0, sizeof(*page_info));
701 }
702
703 be_rx_rate_update(adapter, pktsize, num_rcvd);
704 return;
705}
706
707/* Process the RX completion indicated by rxcp when LRO is disabled */
708static void be_rx_compl_process(struct be_adapter *adapter,
709 struct be_eth_rx_compl *rxcp)
710{
711 struct sk_buff *skb;
712 u32 vtp, vid;
713 int l4_cksm;
714
715 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
716 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
717
718 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
719 if (!skb) {
720 if (net_ratelimit())
721 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
722 be_rx_compl_discard(adapter, rxcp);
723 return;
724 }
725
726 skb_reserve(skb, NET_IP_ALIGN);
727
728 skb_fill_rx_data(adapter, skb, rxcp);
729
730 if (l4_cksm && adapter->rx_csum)
731 skb->ip_summed = CHECKSUM_UNNECESSARY;
732 else
733 skb->ip_summed = CHECKSUM_NONE;
734
735 skb->truesize = skb->len + sizeof(struct sk_buff);
736 skb->protocol = eth_type_trans(skb, adapter->netdev);
737 skb->dev = adapter->netdev;
738
739 if (vtp) {
740 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
741 kfree_skb(skb);
742 return;
743 }
744 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
745 vid = be16_to_cpu(vid);
746 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
747 } else {
748 netif_receive_skb(skb);
749 }
750
751 adapter->netdev->last_rx = jiffies;
752
753 return;
754}
755
756/* Process the RX completion indicated by rxcp when LRO is enabled */
757static void be_rx_compl_process_lro(struct be_adapter *adapter,
758 struct be_eth_rx_compl *rxcp)
759{
760 struct be_rx_page_info *page_info;
761 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
762 struct be_queue_info *rxq = &adapter->rx_obj.q;
763 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
764 u16 i, rxq_idx = 0, vid;
765
766 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
767 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
768 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
769 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
770
771 remaining = pkt_size;
772 for (i = 0; i < num_rcvd; i++) {
773 page_info = get_rx_page_info(adapter, rxq_idx);
774
775 curr_frag_len = min(remaining, rx_frag_size);
776
777 rx_frags[i].page = page_info->page;
778 rx_frags[i].page_offset = page_info->page_offset;
779 rx_frags[i].size = curr_frag_len;
780 remaining -= curr_frag_len;
781
782 index_inc(&rxq_idx, rxq->len);
783
784 memset(page_info, 0, sizeof(*page_info));
785 }
786
787 if (likely(!vlanf)) {
788 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
789 pkt_size, NULL, 0);
790 } else {
791 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
792 vid = be16_to_cpu(vid);
793
794 if (!adapter->vlan_grp || adapter->num_vlans == 0)
795 return;
796
797 lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr,
798 rx_frags, pkt_size, pkt_size, adapter->vlan_grp,
799 vid, NULL, 0);
800 }
801
802 be_rx_rate_update(adapter, pkt_size, num_rcvd);
803 return;
804}
805
806static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
807{
808 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
809
810 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
811 return NULL;
812
813 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
814
815 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
816
817 queue_tail_inc(&adapter->rx_obj.cq);
818 return rxcp;
819}
820
821static inline struct page *be_alloc_pages(u32 size)
822{
823 gfp_t alloc_flags = GFP_ATOMIC;
824 u32 order = get_order(size);
825 if (order > 0)
826 alloc_flags |= __GFP_COMP;
827 return alloc_pages(alloc_flags, order);
828}
829
830/*
831 * Allocate a page, split it to fragments of size rx_frag_size and post as
832 * receive buffers to BE
833 */
834static void be_post_rx_frags(struct be_adapter *adapter)
835{
836 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
837 struct be_rx_page_info *page_info = NULL;
838 struct be_queue_info *rxq = &adapter->rx_obj.q;
839 struct page *pagep = NULL;
840 struct be_eth_rx_d *rxd;
841 u64 page_dmaaddr = 0, frag_dmaaddr;
842 u32 posted, page_offset = 0;
843
844
845 page_info = &page_info_tbl[rxq->head];
846 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
847 if (!pagep) {
848 pagep = be_alloc_pages(adapter->big_page_size);
849 if (unlikely(!pagep)) {
850 drvr_stats(adapter)->be_ethrx_post_fail++;
851 break;
852 }
853 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
854 adapter->big_page_size,
855 PCI_DMA_FROMDEVICE);
856 page_info->page_offset = 0;
857 } else {
858 get_page(pagep);
859 page_info->page_offset = page_offset + rx_frag_size;
860 }
861 page_offset = page_info->page_offset;
862 page_info->page = pagep;
863 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
864 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
865
866 rxd = queue_head_node(rxq);
867 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
868 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
869 queue_head_inc(rxq);
870
871 /* Any space left in the current big page for another frag? */
872 if ((page_offset + rx_frag_size + rx_frag_size) >
873 adapter->big_page_size) {
874 pagep = NULL;
875 page_info->last_page_user = true;
876 }
877 page_info = &page_info_tbl[rxq->head];
878 }
879 if (pagep)
880 page_info->last_page_user = true;
881
882 if (posted) {
883 atomic_add(posted, &rxq->used);
884 be_rxq_notify(&adapter->ctrl, rxq->id, posted);
885 } else if (atomic_read(&rxq->used) == 0) {
886 /* Let be_worker replenish when memory is available */
887 adapter->rx_post_starved = true;
888 }
889
890 return;
891}
892
893static struct be_eth_tx_compl *
894be_tx_compl_get(struct be_adapter *adapter)
895{
896 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
897 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
898
899 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
900 return NULL;
901
902 be_dws_le_to_cpu(txcp, sizeof(*txcp));
903
904 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
905
906 queue_tail_inc(tx_cq);
907 return txcp;
908}
909
910static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
911{
912 struct be_queue_info *txq = &adapter->tx_obj.q;
913 struct be_eth_wrb *wrb;
914 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
915 struct sk_buff *sent_skb;
916 u64 busaddr;
917 u16 cur_index, num_wrbs = 0;
918
919 cur_index = txq->tail;
920 sent_skb = sent_skbs[cur_index];
921 BUG_ON(!sent_skb);
922 sent_skbs[cur_index] = NULL;
923
924 do {
925 cur_index = txq->tail;
926 wrb = queue_tail_node(txq);
927 be_dws_le_to_cpu(wrb, sizeof(*wrb));
928 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
929 if (busaddr != 0) {
930 pci_unmap_single(adapter->pdev, busaddr,
931 wrb->frag_len, PCI_DMA_TODEVICE);
932 }
933 num_wrbs++;
934 queue_tail_inc(txq);
935 } while (cur_index != last_index);
936
937 atomic_sub(num_wrbs, &txq->used);
938
939 kfree_skb(sent_skb);
940}
941
942static void be_rx_q_clean(struct be_adapter *adapter)
943{
944 struct be_rx_page_info *page_info;
945 struct be_queue_info *rxq = &adapter->rx_obj.q;
946 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
947 struct be_eth_rx_compl *rxcp;
948 u16 tail;
949
950 /* First cleanup pending rx completions */
951 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
952 be_rx_compl_discard(adapter, rxcp);
953 be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
954 }
955
956 /* Then free posted rx buffer that were not used */
957 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
958 for (; tail != rxq->head; index_inc(&tail, rxq->len)) {
959 page_info = get_rx_page_info(adapter, tail);
960 put_page(page_info->page);
961 memset(page_info, 0, sizeof(*page_info));
962 }
963 BUG_ON(atomic_read(&rxq->used));
964}
965
966static void be_tx_q_clean(struct be_adapter *adapter)
967{
968 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
969 struct sk_buff *sent_skb;
970 struct be_queue_info *txq = &adapter->tx_obj.q;
971 u16 last_index;
972 bool dummy_wrb;
973
974 while (atomic_read(&txq->used)) {
975 sent_skb = sent_skbs[txq->tail];
976 last_index = txq->tail;
977 index_adv(&last_index,
978 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
979 be_tx_compl_process(adapter, last_index);
980 }
981}
982
983static void be_tx_queues_destroy(struct be_adapter *adapter)
984{
985 struct be_queue_info *q;
986
987 q = &adapter->tx_obj.q;
988 if (q->created)
989 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
990 be_queue_free(adapter, q);
991
992 q = &adapter->tx_obj.cq;
993 if (q->created)
994 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
995 be_queue_free(adapter, q);
996
997 /* No more tx completions can be rcvd now; clean up if there are
998 * any pending completions or pending tx requests */
999 be_tx_q_clean(adapter);
1000
1001 q = &adapter->tx_eq.q;
1002 if (q->created)
1003 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
1004 be_queue_free(adapter, q);
1005}
1006
1007static int be_tx_queues_create(struct be_adapter *adapter)
1008{
1009 struct be_queue_info *eq, *q, *cq;
1010
1011 adapter->tx_eq.max_eqd = 0;
1012 adapter->tx_eq.min_eqd = 0;
1013 adapter->tx_eq.cur_eqd = 96;
1014 adapter->tx_eq.enable_aic = false;
1015 /* Alloc Tx Event queue */
1016 eq = &adapter->tx_eq.q;
1017 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1018 return -1;
1019
1020 /* Ask BE to create Tx Event queue */
1021 if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd))
1022 goto tx_eq_free;
1023 /* Alloc TX eth compl queue */
1024 cq = &adapter->tx_obj.cq;
1025 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1026 sizeof(struct be_eth_tx_compl)))
1027 goto tx_eq_destroy;
1028
1029 /* Ask BE to create Tx eth compl queue */
1030 if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3))
1031 goto tx_cq_free;
1032
1033 /* Alloc TX eth queue */
1034 q = &adapter->tx_obj.q;
1035 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1036 goto tx_cq_destroy;
1037
1038 /* Ask BE to create Tx eth queue */
1039 if (be_cmd_txq_create(&adapter->ctrl, q, cq))
1040 goto tx_q_free;
1041 return 0;
1042
1043tx_q_free:
1044 be_queue_free(adapter, q);
1045tx_cq_destroy:
1046 be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
1047tx_cq_free:
1048 be_queue_free(adapter, cq);
1049tx_eq_destroy:
1050 be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
1051tx_eq_free:
1052 be_queue_free(adapter, eq);
1053 return -1;
1054}
1055
1056static void be_rx_queues_destroy(struct be_adapter *adapter)
1057{
1058 struct be_queue_info *q;
1059
1060 q = &adapter->rx_obj.q;
1061 if (q->created) {
1062 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ);
1063 be_rx_q_clean(adapter);
1064 }
1065 be_queue_free(adapter, q);
1066
1067 q = &adapter->rx_obj.cq;
1068 if (q->created)
1069 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
1070 be_queue_free(adapter, q);
1071
1072 q = &adapter->rx_eq.q;
1073 if (q->created)
1074 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
1075 be_queue_free(adapter, q);
1076}
1077
1078static int be_rx_queues_create(struct be_adapter *adapter)
1079{
1080 struct be_queue_info *eq, *q, *cq;
1081 int rc;
1082
1083 adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
1084 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1085 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1086 adapter->rx_eq.min_eqd = 0;
1087 adapter->rx_eq.cur_eqd = 0;
1088 adapter->rx_eq.enable_aic = true;
1089
1090 /* Alloc Rx Event queue */
1091 eq = &adapter->rx_eq.q;
1092 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1093 sizeof(struct be_eq_entry));
1094 if (rc)
1095 return rc;
1096
1097 /* Ask BE to create Rx Event queue */
1098 rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd);
1099 if (rc)
1100 goto rx_eq_free;
1101
1102 /* Alloc RX eth compl queue */
1103 cq = &adapter->rx_obj.cq;
1104 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1105 sizeof(struct be_eth_rx_compl));
1106 if (rc)
1107 goto rx_eq_destroy;
1108
1109 /* Ask BE to create Rx eth compl queue */
1110 rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3);
1111 if (rc)
1112 goto rx_cq_free;
1113
1114 /* Alloc RX eth queue */
1115 q = &adapter->rx_obj.q;
1116 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1117 if (rc)
1118 goto rx_cq_destroy;
1119
1120 /* Ask BE to create Rx eth queue */
1121 rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size,
1122 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1123 if (rc)
1124 goto rx_q_free;
1125
1126 return 0;
1127rx_q_free:
1128 be_queue_free(adapter, q);
1129rx_cq_destroy:
1130 be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
1131rx_cq_free:
1132 be_queue_free(adapter, cq);
1133rx_eq_destroy:
1134 be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
1135rx_eq_free:
1136 be_queue_free(adapter, eq);
1137 return rc;
1138}
1139static bool event_get(struct be_eq_obj *eq_obj, u16 *rid)
1140{
1141 struct be_eq_entry *entry = queue_tail_node(&eq_obj->q);
1142 u32 evt = entry->evt;
1143
1144 if (!evt)
1145 return false;
1146
1147 evt = le32_to_cpu(evt);
1148 *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK;
1149 entry->evt = 0;
1150 queue_tail_inc(&eq_obj->q);
1151 return true;
1152}
1153
1154static int event_handle(struct be_ctrl_info *ctrl,
1155 struct be_eq_obj *eq_obj)
1156{
1157 u16 rid = 0, num = 0;
1158
1159 while (event_get(eq_obj, &rid))
1160 num++;
1161
1162 /* We can see an interrupt and no event */
1163 be_eq_notify(ctrl, eq_obj->q.id, true, true, num);
1164 if (num)
1165 napi_schedule(&eq_obj->napi);
1166
1167 return num;
1168}
1169
1170static irqreturn_t be_intx(int irq, void *dev)
1171{
1172 struct be_adapter *adapter = dev;
1173 struct be_ctrl_info *ctrl = &adapter->ctrl;
1174 int rx, tx;
1175
1176 tx = event_handle(ctrl, &adapter->tx_eq);
1177 rx = event_handle(ctrl, &adapter->rx_eq);
1178
1179 if (rx || tx)
1180 return IRQ_HANDLED;
1181 else
1182 return IRQ_NONE;
1183}
1184
1185static irqreturn_t be_msix_rx(int irq, void *dev)
1186{
1187 struct be_adapter *adapter = dev;
1188
1189 event_handle(&adapter->ctrl, &adapter->rx_eq);
1190
1191 return IRQ_HANDLED;
1192}
1193
1194static irqreturn_t be_msix_tx(int irq, void *dev)
1195{
1196 struct be_adapter *adapter = dev;
1197
1198 event_handle(&adapter->ctrl, &adapter->tx_eq);
1199
1200 return IRQ_HANDLED;
1201}
1202
1203static inline bool do_lro(struct be_adapter *adapter,
1204 struct be_eth_rx_compl *rxcp)
1205{
1206 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1207 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1208
1209 if (err)
1210 drvr_stats(adapter)->be_rxcp_err++;
1211
1212 return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ?
1213 false : true;
1214}
1215
1216int be_poll_rx(struct napi_struct *napi, int budget)
1217{
1218 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1219 struct be_adapter *adapter =
1220 container_of(rx_eq, struct be_adapter, rx_eq);
1221 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1222 struct be_eth_rx_compl *rxcp;
1223 u32 work_done;
1224
1225 for (work_done = 0; work_done < budget; work_done++) {
1226 rxcp = be_rx_compl_get(adapter);
1227 if (!rxcp)
1228 break;
1229
1230 if (do_lro(adapter, rxcp))
1231 be_rx_compl_process_lro(adapter, rxcp);
1232 else
1233 be_rx_compl_process(adapter, rxcp);
1234 }
1235
1236 lro_flush_all(&adapter->rx_obj.lro_mgr);
1237
1238 /* Refill the queue */
1239 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1240 be_post_rx_frags(adapter);
1241
1242 /* All consumed */
1243 if (work_done < budget) {
1244 napi_complete(napi);
1245 be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done);
1246 } else {
1247 /* More to be consumed; continue with interrupts disabled */
1248 be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done);
1249 }
1250 return work_done;
1251}
1252
1253/* For TX we don't honour budget; consume everything */
1254int be_poll_tx(struct napi_struct *napi, int budget)
1255{
1256 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1257 struct be_adapter *adapter =
1258 container_of(tx_eq, struct be_adapter, tx_eq);
1259 struct be_tx_obj *tx_obj = &adapter->tx_obj;
1260 struct be_queue_info *tx_cq = &tx_obj->cq;
1261 struct be_queue_info *txq = &tx_obj->q;
1262 struct be_eth_tx_compl *txcp;
1263 u32 num_cmpl = 0;
1264 u16 end_idx;
1265
1266 while ((txcp = be_tx_compl_get(adapter))) {
1267 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1268 wrb_index, txcp);
1269 be_tx_compl_process(adapter, end_idx);
1270 num_cmpl++;
1271 }
1272
1273 /* As Tx wrbs have been freed up, wake up netdev queue if
1274 * it was stopped due to lack of tx wrbs.
1275 */
1276 if (netif_queue_stopped(adapter->netdev) &&
1277 atomic_read(&txq->used) < txq->len / 2) {
1278 netif_wake_queue(adapter->netdev);
1279 }
1280
1281 napi_complete(napi);
1282
1283 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
1284
1285 drvr_stats(adapter)->be_tx_events++;
1286 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1287
1288 return 1;
1289}
1290
1291static void be_worker(struct work_struct *work)
1292{
1293 struct be_adapter *adapter =
1294 container_of(work, struct be_adapter, work.work);
1295 int status;
1296
1297 /* Check link */
1298 be_link_status_update(adapter);
1299
1300 /* Get Stats */
1301 status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
1302 if (!status)
1303 netdev_stats_update(adapter);
1304
1305 /* Set EQ delay */
1306 be_rx_eqd_update(adapter);
1307
1308 if (adapter->rx_post_starved) {
1309 adapter->rx_post_starved = false;
1310 be_post_rx_frags(adapter);
1311 }
1312
1313 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1314}
1315
1316static void be_msix_enable(struct be_adapter *adapter)
1317{
1318 int i, status;
1319
1320 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1321 adapter->msix_entries[i].entry = i;
1322
1323 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1324 BE_NUM_MSIX_VECTORS);
1325 if (status == 0)
1326 adapter->msix_enabled = true;
1327 return;
1328}
1329
1330static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1331{
1332 return adapter->msix_entries[eq_id -
1333 8 * adapter->ctrl.pci_func].vector;
1334}
1335
1336static int be_msix_register(struct be_adapter *adapter)
1337{
1338 struct net_device *netdev = adapter->netdev;
1339 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1340 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1341 int status, vec;
1342
1343 sprintf(tx_eq->desc, "%s-tx", netdev->name);
1344 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1345 status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
1346 if (status)
1347 goto err;
1348
1349 sprintf(rx_eq->desc, "%s-rx", netdev->name);
1350 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1351 status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter);
1352 if (status) { /* Free TX IRQ */
1353 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1354 free_irq(vec, adapter);
1355 goto err;
1356 }
1357 return 0;
1358err:
1359 dev_warn(&adapter->pdev->dev,
1360 "MSIX Request IRQ failed - err %d\n", status);
1361 pci_disable_msix(adapter->pdev);
1362 adapter->msix_enabled = false;
1363 return status;
1364}
1365
1366static int be_irq_register(struct be_adapter *adapter)
1367{
1368 struct net_device *netdev = adapter->netdev;
1369 int status;
1370
1371 if (adapter->msix_enabled) {
1372 status = be_msix_register(adapter);
1373 if (status == 0)
1374 goto done;
1375 }
1376
1377 /* INTx */
1378 netdev->irq = adapter->pdev->irq;
1379 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1380 adapter);
1381 if (status) {
1382 dev_err(&adapter->pdev->dev,
1383 "INTx request IRQ failed - err %d\n", status);
1384 return status;
1385 }
1386done:
1387 adapter->isr_registered = true;
1388 return 0;
1389}
1390
1391static void be_irq_unregister(struct be_adapter *adapter)
1392{
1393 struct net_device *netdev = adapter->netdev;
1394 int vec;
1395
1396 if (!adapter->isr_registered)
1397 return;
1398
1399 /* INTx */
1400 if (!adapter->msix_enabled) {
1401 free_irq(netdev->irq, adapter);
1402 goto done;
1403 }
1404
1405 /* MSIx */
1406 vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id);
1407 free_irq(vec, adapter);
1408 vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id);
1409 free_irq(vec, adapter);
1410done:
1411 adapter->isr_registered = false;
1412 return;
1413}
1414
1415static int be_open(struct net_device *netdev)
1416{
1417 struct be_adapter *adapter = netdev_priv(netdev);
1418 struct be_ctrl_info *ctrl = &adapter->ctrl;
1419 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1420 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1421 u32 if_flags;
1422 int status;
1423
1424 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
1425 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
1426 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1427 status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr,
1428 false/* pmac_invalid */, &adapter->if_handle,
1429 &adapter->pmac_id);
1430 if (status != 0)
1431 goto do_none;
1432
1433 be_vid_config(netdev);
1434
1435 status = be_cmd_set_flow_control(ctrl, true, true);
1436 if (status != 0)
1437 goto if_destroy;
1438
1439 status = be_tx_queues_create(adapter);
1440 if (status != 0)
1441 goto if_destroy;
1442
1443 status = be_rx_queues_create(adapter);
1444 if (status != 0)
1445 goto tx_qs_destroy;
1446
1447 /* First time posting */
1448 be_post_rx_frags(adapter);
1449
1450 napi_enable(&rx_eq->napi);
1451 napi_enable(&tx_eq->napi);
1452
1453 be_irq_register(adapter);
1454
1455 be_intr_set(ctrl, true);
1456
1457 /* The evt queues are created in the unarmed state; arm them */
1458 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1459 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1460
1461 /* The compl queues are created in the unarmed state; arm them */
1462 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1463 be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
1464
1465 be_link_status_update(adapter);
1466
1467 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1468 return 0;
1469
1470tx_qs_destroy:
1471 be_tx_queues_destroy(adapter);
1472if_destroy:
1473 be_cmd_if_destroy(ctrl, adapter->if_handle);
1474do_none:
1475 return status;
1476}
1477
1478static int be_close(struct net_device *netdev)
1479{
1480 struct be_adapter *adapter = netdev_priv(netdev);
1481 struct be_ctrl_info *ctrl = &adapter->ctrl;
1482 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1483 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1484 int vec;
1485
1486 cancel_delayed_work(&adapter->work);
1487
1488 netif_stop_queue(netdev);
1489 netif_carrier_off(netdev);
1490 adapter->link.speed = PHY_LINK_SPEED_ZERO;
1491
1492 be_intr_set(ctrl, false);
1493
1494 if (adapter->msix_enabled) {
1495 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1496 synchronize_irq(vec);
1497 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1498 synchronize_irq(vec);
1499 } else {
1500 synchronize_irq(netdev->irq);
1501 }
1502 be_irq_unregister(adapter);
1503
1504 napi_disable(&rx_eq->napi);
1505 napi_disable(&tx_eq->napi);
1506
1507 be_rx_queues_destroy(adapter);
1508 be_tx_queues_destroy(adapter);
1509
1510 be_cmd_if_destroy(ctrl, adapter->if_handle);
1511 return 0;
1512}
1513
1514static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1515 void **ip_hdr, void **tcpudp_hdr,
1516 u64 *hdr_flags, void *priv)
1517{
1518 struct ethhdr *eh;
1519 struct vlan_ethhdr *veh;
1520 struct iphdr *iph;
1521 u8 *va = page_address(frag->page) + frag->page_offset;
1522 unsigned long ll_hlen;
1523
1524 prefetch(va);
1525 eh = (struct ethhdr *)va;
1526 *mac_hdr = eh;
1527 ll_hlen = ETH_HLEN;
1528 if (eh->h_proto != htons(ETH_P_IP)) {
1529 if (eh->h_proto == htons(ETH_P_8021Q)) {
1530 veh = (struct vlan_ethhdr *)va;
1531 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
1532 return -1;
1533
1534 ll_hlen += VLAN_HLEN;
1535 } else {
1536 return -1;
1537 }
1538 }
1539 *hdr_flags = LRO_IPV4;
1540 iph = (struct iphdr *)(va + ll_hlen);
1541 *ip_hdr = iph;
1542 if (iph->protocol != IPPROTO_TCP)
1543 return -1;
1544 *hdr_flags |= LRO_TCP;
1545 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
1546
1547 return 0;
1548}
1549
1550static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
1551{
1552 struct net_lro_mgr *lro_mgr;
1553
1554 lro_mgr = &adapter->rx_obj.lro_mgr;
1555 lro_mgr->dev = netdev;
1556 lro_mgr->features = LRO_F_NAPI;
1557 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
1558 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
1559 lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
1560 lro_mgr->lro_arr = adapter->rx_obj.lro_desc;
1561 lro_mgr->get_frag_header = be_get_frag_header;
1562 lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
1563}
1564
1565static struct net_device_ops be_netdev_ops = {
1566 .ndo_open = be_open,
1567 .ndo_stop = be_close,
1568 .ndo_start_xmit = be_xmit,
1569 .ndo_get_stats = be_get_stats,
1570 .ndo_set_rx_mode = be_set_multicast_list,
1571 .ndo_set_mac_address = be_mac_addr_set,
1572 .ndo_change_mtu = be_change_mtu,
1573 .ndo_validate_addr = eth_validate_addr,
1574 .ndo_vlan_rx_register = be_vlan_register,
1575 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
1576 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
1577};
1578
1579static void be_netdev_init(struct net_device *netdev)
1580{
1581 struct be_adapter *adapter = netdev_priv(netdev);
1582
1583 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1584 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
1585 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
1586
1587 netdev->flags |= IFF_MULTICAST;
1588
1589 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1590
1591 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1592
1593 be_lro_init(adapter, netdev);
1594
1595 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1596 BE_NAPI_WEIGHT);
1597 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
1598 BE_NAPI_WEIGHT);
1599
1600 netif_carrier_off(netdev);
1601 netif_stop_queue(netdev);
1602}
1603
1604static void be_unmap_pci_bars(struct be_adapter *adapter)
1605{
1606 struct be_ctrl_info *ctrl = &adapter->ctrl;
1607 if (ctrl->csr)
1608 iounmap(ctrl->csr);
1609 if (ctrl->db)
1610 iounmap(ctrl->db);
1611 if (ctrl->pcicfg)
1612 iounmap(ctrl->pcicfg);
1613}
1614
1615static int be_map_pci_bars(struct be_adapter *adapter)
1616{
1617 u8 __iomem *addr;
1618
1619 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1620 pci_resource_len(adapter->pdev, 2));
1621 if (addr == NULL)
1622 return -ENOMEM;
1623 adapter->ctrl.csr = addr;
1624
1625 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
1626 128 * 1024);
1627 if (addr == NULL)
1628 goto pci_map_err;
1629 adapter->ctrl.db = addr;
1630
1631 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
1632 pci_resource_len(adapter->pdev, 1));
1633 if (addr == NULL)
1634 goto pci_map_err;
1635 adapter->ctrl.pcicfg = addr;
1636
1637 return 0;
1638pci_map_err:
1639 be_unmap_pci_bars(adapter);
1640 return -ENOMEM;
1641}
1642
1643
1644static void be_ctrl_cleanup(struct be_adapter *adapter)
1645{
1646 struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced;
1647
1648 be_unmap_pci_bars(adapter);
1649
1650 if (mem->va)
1651 pci_free_consistent(adapter->pdev, mem->size,
1652 mem->va, mem->dma);
1653}
1654
1655/* Initialize the mbox required to send cmds to BE */
1656static int be_ctrl_init(struct be_adapter *adapter)
1657{
1658 struct be_ctrl_info *ctrl = &adapter->ctrl;
1659 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
1660 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
1661 int status;
1662 u32 val;
1663
1664 status = be_map_pci_bars(adapter);
1665 if (status)
1666 return status;
1667
1668 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1669 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1670 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1671 if (!mbox_mem_alloc->va) {
1672 be_unmap_pci_bars(adapter);
1673 return -1;
1674 }
1675 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
1676 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1677 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1678 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1679 spin_lock_init(&ctrl->cmd_lock);
1680
1681 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
1682 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
1683 MEMBAR_CTRL_INT_CTRL_PFUNC_MASK;
1684 return 0;
1685}
1686
1687static void be_stats_cleanup(struct be_adapter *adapter)
1688{
1689 struct be_stats_obj *stats = &adapter->stats;
1690 struct be_dma_mem *cmd = &stats->cmd;
1691
1692 if (cmd->va)
1693 pci_free_consistent(adapter->pdev, cmd->size,
1694 cmd->va, cmd->dma);
1695}
1696
1697static int be_stats_init(struct be_adapter *adapter)
1698{
1699 struct be_stats_obj *stats = &adapter->stats;
1700 struct be_dma_mem *cmd = &stats->cmd;
1701
1702 cmd->size = sizeof(struct be_cmd_req_get_stats);
1703 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
1704 if (cmd->va == NULL)
1705 return -1;
1706 return 0;
1707}
1708
1709static void __devexit be_remove(struct pci_dev *pdev)
1710{
1711 struct be_adapter *adapter = pci_get_drvdata(pdev);
1712 if (!adapter)
1713 return;
1714
1715 unregister_netdev(adapter->netdev);
1716
1717 be_stats_cleanup(adapter);
1718
1719 be_ctrl_cleanup(adapter);
1720
1721 if (adapter->msix_enabled) {
1722 pci_disable_msix(adapter->pdev);
1723 adapter->msix_enabled = false;
1724 }
1725
1726 pci_set_drvdata(pdev, NULL);
1727 pci_release_regions(pdev);
1728 pci_disable_device(pdev);
1729
1730 free_netdev(adapter->netdev);
1731}
1732
1733static int be_hw_up(struct be_adapter *adapter)
1734{
1735 struct be_ctrl_info *ctrl = &adapter->ctrl;
1736 int status;
1737
1738 status = be_cmd_POST(ctrl);
1739 if (status)
1740 return status;
1741
1742 status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver);
1743 if (status)
1744 return status;
1745
1746 status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num);
1747 return status;
1748}
1749
1750static int __devinit be_probe(struct pci_dev *pdev,
1751 const struct pci_device_id *pdev_id)
1752{
1753 int status = 0;
1754 struct be_adapter *adapter;
1755 struct net_device *netdev;
1756 struct be_ctrl_info *ctrl;
1757 u8 mac[ETH_ALEN];
1758
1759 status = pci_enable_device(pdev);
1760 if (status)
1761 goto do_none;
1762
1763 status = pci_request_regions(pdev, DRV_NAME);
1764 if (status)
1765 goto disable_dev;
1766 pci_set_master(pdev);
1767
1768 netdev = alloc_etherdev(sizeof(struct be_adapter));
1769 if (netdev == NULL) {
1770 status = -ENOMEM;
1771 goto rel_reg;
1772 }
1773 adapter = netdev_priv(netdev);
1774 adapter->pdev = pdev;
1775 pci_set_drvdata(pdev, adapter);
1776 adapter->netdev = netdev;
1777
1778 be_msix_enable(adapter);
1779
1780 status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1781 if (!status) {
1782 netdev->features |= NETIF_F_HIGHDMA;
1783 } else {
1784 status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1785 if (status) {
1786 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
1787 goto free_netdev;
1788 }
1789 }
1790
1791 ctrl = &adapter->ctrl;
1792 status = be_ctrl_init(adapter);
1793 if (status)
1794 goto free_netdev;
1795
1796 status = be_stats_init(adapter);
1797 if (status)
1798 goto ctrl_clean;
1799
1800 status = be_hw_up(adapter);
1801 if (status)
1802 goto stats_clean;
1803
1804 status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK,
1805 true /* permanent */, 0);
1806 if (status)
1807 goto stats_clean;
1808 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1809
1810 INIT_DELAYED_WORK(&adapter->work, be_worker);
1811 be_netdev_init(netdev);
1812 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1813
1814 status = register_netdev(netdev);
1815 if (status != 0)
1816 goto stats_clean;
1817
1818 dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num);
1819 return 0;
1820
1821stats_clean:
1822 be_stats_cleanup(adapter);
1823ctrl_clean:
1824 be_ctrl_cleanup(adapter);
1825free_netdev:
1826 free_netdev(adapter->netdev);
1827rel_reg:
1828 pci_release_regions(pdev);
1829disable_dev:
1830 pci_disable_device(pdev);
1831do_none:
1832 dev_warn(&pdev->dev, BE_NAME " initialization failed\n");
1833 return status;
1834}
1835
1836static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1837{
1838 struct be_adapter *adapter = pci_get_drvdata(pdev);
1839 struct net_device *netdev = adapter->netdev;
1840
1841 netif_device_detach(netdev);
1842 if (netif_running(netdev)) {
1843 rtnl_lock();
1844 be_close(netdev);
1845 rtnl_unlock();
1846 }
1847
1848 pci_save_state(pdev);
1849 pci_disable_device(pdev);
1850 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1851 return 0;
1852}
1853
1854static int be_resume(struct pci_dev *pdev)
1855{
1856 int status = 0;
1857 struct be_adapter *adapter = pci_get_drvdata(pdev);
1858 struct net_device *netdev = adapter->netdev;
1859
1860 netif_device_detach(netdev);
1861
1862 status = pci_enable_device(pdev);
1863 if (status)
1864 return status;
1865
1866 pci_set_power_state(pdev, 0);
1867 pci_restore_state(pdev);
1868
1869 if (netif_running(netdev)) {
1870 rtnl_lock();
1871 be_open(netdev);
1872 rtnl_unlock();
1873 }
1874 netif_device_attach(netdev);
1875 return 0;
1876}
1877
1878static struct pci_driver be_driver = {
1879 .name = DRV_NAME,
1880 .id_table = be_dev_ids,
1881 .probe = be_probe,
1882 .remove = be_remove,
1883 .suspend = be_suspend,
1884 .resume = be_resume
1885};
1886
1887static int __init be_init_module(void)
1888{
1889 if (rx_frag_size != 8192 && rx_frag_size != 4096
1890 && rx_frag_size != 2048) {
1891 printk(KERN_WARNING DRV_NAME
1892 " : Module param rx_frag_size must be 2048/4096/8192."
1893 " Using 2048\n");
1894 rx_frag_size = 2048;
1895 }
1896 /* Ensure rx_frag_size is aligned to chache line */
1897 if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) {
1898 printk(KERN_WARNING DRV_NAME
1899 " : Bad module param rx_frag_size. Using 2048\n");
1900 rx_frag_size = 2048;
1901 }
1902
1903 return pci_register_driver(&be_driver);
1904}
1905module_init(be_init_module);
1906
1907static void __exit be_exit_module(void)
1908{
1909 pci_unregister_driver(&be_driver);
1910}
1911module_exit(be_exit_module);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 6500b7c4739f..6b6530ffdf19 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -57,8 +57,8 @@
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.9.2" 60#define DRV_MODULE_VERSION "1.9.3"
61#define DRV_MODULE_RELDATE "Feb 11, 2009" 61#define DRV_MODULE_RELDATE "March 17, 2009"
62 62
63#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
64 64
@@ -5843,9 +5843,6 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5843 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 5843 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5844 msix_ent[i].entry = i; 5844 msix_ent[i].entry = i;
5845 msix_ent[i].vector = 0; 5845 msix_ent[i].vector = 0;
5846
5847 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5848 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5849 } 5846 }
5850 5847
5851 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC); 5848 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
@@ -5854,8 +5851,11 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5854 5851
5855 bp->irq_nvecs = msix_vecs; 5852 bp->irq_nvecs = msix_vecs;
5856 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; 5853 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5857 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 5854 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5858 bp->irq_tbl[i].vector = msix_ent[i].vector; 5855 bp->irq_tbl[i].vector = msix_ent[i].vector;
5856 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5857 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5858 }
5859} 5859}
5860 5860
5861static void 5861static void
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 15a5cf0f676b..3cf2b92eef3b 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -152,7 +152,7 @@ struct sw_rx_page {
152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) 152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
153#define SGE_PAGE_SIZE PAGE_SIZE 153#define SGE_PAGE_SIZE PAGE_SIZE
154#define SGE_PAGE_SHIFT PAGE_SHIFT 154#define SGE_PAGE_SHIFT PAGE_SHIFT
155#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr) 155#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))addr)
156 156
157#define BCM_RX_ETH_PAYLOAD_ALIGN 64 157#define BCM_RX_ETH_PAYLOAD_ALIGN 64
158 158
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index a6c0b3abba29..3b0c2499ef17 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -150,7 +150,6 @@ static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
150 150
151static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len) 151static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
152{ 152{
153#ifdef USE_DMAE
154 int offset = 0; 153 int offset = 0;
155 154
156 if (bp->dmae_ready) { 155 if (bp->dmae_ready) {
@@ -164,9 +163,6 @@ static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
164 addr + offset, len); 163 addr + offset, len);
165 } else 164 } else
166 bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len); 165 bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
167#else
168 bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
169#endif
170} 166}
171 167
172static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 168static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index d3e7775a9ccf..2e346a5e98cf 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,7 +57,7 @@
57#include "bnx2x.h" 57#include "bnx2x.h"
58#include "bnx2x_init.h" 58#include "bnx2x_init.h"
59 59
60#define DRV_MODULE_VERSION "1.45.26" 60#define DRV_MODULE_VERSION "1.45.27"
61#define DRV_MODULE_RELDATE "2009/01/26" 61#define DRV_MODULE_RELDATE "2009/01/26"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
@@ -4035,10 +4035,10 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4035{ 4035{
4036 int port = BP_PORT(bp); 4036 int port = BP_PORT(bp);
4037 4037
4038 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 4038 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4039 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 4039 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4040 sizeof(struct ustorm_status_block)/4); 4040 sizeof(struct ustorm_status_block)/4);
4041 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 4041 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4042 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 4042 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4043 sizeof(struct cstorm_status_block)/4); 4043 sizeof(struct cstorm_status_block)/4);
4044} 4044}
@@ -4092,18 +4092,18 @@ static void bnx2x_zero_def_sb(struct bnx2x *bp)
4092{ 4092{
4093 int func = BP_FUNC(bp); 4093 int func = BP_FUNC(bp);
4094 4094
4095 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 4095 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4096 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4097 sizeof(struct tstorm_def_status_block)/4);
4098 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4096 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4099 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4097 sizeof(struct ustorm_def_status_block)/4); 4100 sizeof(struct ustorm_def_status_block)/4);
4098 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 4101 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4099 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4102 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4100 sizeof(struct cstorm_def_status_block)/4); 4103 sizeof(struct cstorm_def_status_block)/4);
4101 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM + 4104 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4102 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4105 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4103 sizeof(struct xstorm_def_status_block)/4); 4106 sizeof(struct xstorm_def_status_block)/4);
4104 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4105 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4106 sizeof(struct tstorm_def_status_block)/4);
4107} 4107}
4108 4108
4109static void bnx2x_init_def_sb(struct bnx2x *bp, 4109static void bnx2x_init_def_sb(struct bnx2x *bp,
@@ -4518,7 +4518,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
4518 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA | 4518 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4519 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING); 4519 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4520 context->ustorm_st_context.common.sge_buff_size = 4520 context->ustorm_st_context.common.sge_buff_size =
4521 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE); 4521 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4522 (u32)0xffff);
4522 context->ustorm_st_context.common.sge_page_base_hi = 4523 context->ustorm_st_context.common.sge_page_base_hi =
4523 U64_HI(fp->rx_sge_mapping); 4524 U64_HI(fp->rx_sge_mapping);
4524 context->ustorm_st_context.common.sge_page_base_lo = 4525 context->ustorm_st_context.common.sge_page_base_lo =
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e0578fe8c0db..3d76686dceca 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3537,11 +3537,26 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
3537 } 3537 }
3538 break; 3538 break;
3539 case NETDEV_CHANGE: 3539 case NETDEV_CHANGE:
3540 /* 3540 if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) {
3541 * TODO: is this what we get if somebody 3541 struct slave *slave;
3542 * sets up a hierarchical bond, then rmmod's 3542
3543 * one of the slave bonding devices? 3543 slave = bond_get_slave_by_dev(bond, slave_dev);
3544 */ 3544 if (slave) {
3545 u16 old_speed = slave->speed;
3546 u16 old_duplex = slave->duplex;
3547
3548 bond_update_speed_duplex(slave);
3549
3550 if (bond_is_lb(bond))
3551 break;
3552
3553 if (old_speed != slave->speed)
3554 bond_3ad_adapter_speed_changed(slave);
3555 if (old_duplex != slave->duplex)
3556 bond_3ad_adapter_duplex_changed(slave);
3557 }
3558 }
3559
3545 break; 3560 break;
3546 case NETDEV_DOWN: 3561 case NETDEV_DOWN:
3547 /* 3562 /*
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index bcf92917bbf3..254ec62b5f58 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -930,13 +930,15 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
930 struct net_device *dev = dev_id; 930 struct net_device *dev = dev_id;
931 board_info_t *db = netdev_priv(dev); 931 board_info_t *db = netdev_priv(dev);
932 int int_status; 932 int int_status;
933 unsigned long flags;
933 u8 reg_save; 934 u8 reg_save;
934 935
935 dm9000_dbg(db, 3, "entering %s\n", __func__); 936 dm9000_dbg(db, 3, "entering %s\n", __func__);
936 937
937 /* A real interrupt coming */ 938 /* A real interrupt coming */
938 939
939 spin_lock(&db->lock); 940 /* holders of db->lock must always block IRQs */
941 spin_lock_irqsave(&db->lock, flags);
940 942
941 /* Save previous register address */ 943 /* Save previous register address */
942 reg_save = readb(db->io_addr); 944 reg_save = readb(db->io_addr);
@@ -972,7 +974,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
972 /* Restore previous register address */ 974 /* Restore previous register address */
973 writeb(reg_save, db->io_addr); 975 writeb(reg_save, db->io_addr);
974 976
975 spin_unlock(&db->lock); 977 spin_unlock_irqrestore(&db->lock, flags);
976 978
977 return IRQ_HANDLED; 979 return IRQ_HANDLED;
978} 980}
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
new file mode 100644
index 000000000000..1b4063222a82
--- /dev/null
+++ b/drivers/net/dnet.c
@@ -0,0 +1,994 @@
1/*
2 * Dave DNET Ethernet Controller driver
3 *
4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/version.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/platform_device.h>
23#include <linux/phy.h>
24#include <linux/platform_device.h>
25
26#include "dnet.h"
27
28#undef DEBUG
29
30/* function for reading internal MAC register */
31u16 dnet_readw_mac(struct dnet *bp, u16 reg)
32{
33 u16 data_read;
34
35 /* issue a read */
36 dnet_writel(bp, reg, MACREG_ADDR);
37
38 /* since a read/write op to the MAC is very slow,
39 * we must wait before reading the data */
40 ndelay(500);
41
42 /* read data read from the MAC register */
43 data_read = dnet_readl(bp, MACREG_DATA);
44
45 /* all done */
46 return data_read;
47}
48
49/* function for writing internal MAC register */
50void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
51{
52 /* load data to write */
53 dnet_writel(bp, val, MACREG_DATA);
54
55 /* issue a write */
56 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
57
58 /* since a read/write op to the MAC is very slow,
59 * we must wait before exiting */
60 ndelay(500);
61}
62
63static void __dnet_set_hwaddr(struct dnet *bp)
64{
65 u16 tmp;
66
67 tmp = cpu_to_be16(*((u16 *) bp->dev->dev_addr));
68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
69 tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 2)));
70 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
71 tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 4)));
72 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
73}
74
75static void __devinit dnet_get_hwaddr(struct dnet *bp)
76{
77 u16 tmp;
78 u8 addr[6];
79
80 /*
81 * from MAC docs:
82 * "Note that the MAC address is stored in the registers in Hexadecimal
83 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
84 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
85 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
86 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
87 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
88 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
89 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
90 * Mac_addr[15:0]).
91 */
92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
93 *((u16 *) addr) = be16_to_cpu(tmp);
94 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
95 *((u16 *) (addr + 2)) = be16_to_cpu(tmp);
96 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
97 *((u16 *) (addr + 4)) = be16_to_cpu(tmp);
98
99 if (is_valid_ether_addr(addr))
100 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
101}
102
103static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
104{
105 struct dnet *bp = bus->priv;
106 u16 value;
107
108 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
109 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
110 cpu_relax();
111
112 /* only 5 bits allowed for phy-addr and reg_offset */
113 mii_id &= 0x1f;
114 regnum &= 0x1f;
115
116 /* prepare reg_value for a read */
117 value = (mii_id << 8);
118 value |= regnum;
119
120 /* write control word */
121 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
122
123 /* wait for end of transfer */
124 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
125 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
126 cpu_relax();
127
128 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
129
130 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
131
132 return value;
133}
134
135static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
136 u16 value)
137{
138 struct dnet *bp = bus->priv;
139 u16 tmp;
140
141 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
142
143 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
144 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
145 cpu_relax();
146
147 /* prepare for a write operation */
148 tmp = (1 << 13);
149
150 /* only 5 bits allowed for phy-addr and reg_offset */
151 mii_id &= 0x1f;
152 regnum &= 0x1f;
153
154 /* only 16 bits on data */
155 value &= 0xffff;
156
157 /* prepare reg_value for a write */
158 tmp |= (mii_id << 8);
159 tmp |= regnum;
160
161 /* write data to write first */
162 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
163
164 /* write control word */
165 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
166
167 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
168 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
169 cpu_relax();
170
171 return 0;
172}
173
174static int dnet_mdio_reset(struct mii_bus *bus)
175{
176 return 0;
177}
178
179static void dnet_handle_link_change(struct net_device *dev)
180{
181 struct dnet *bp = netdev_priv(dev);
182 struct phy_device *phydev = bp->phy_dev;
183 unsigned long flags;
184 u32 mode_reg, ctl_reg;
185
186 int status_change = 0;
187
188 spin_lock_irqsave(&bp->lock, flags);
189
190 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
191 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
192
193 if (phydev->link) {
194 if (bp->duplex != phydev->duplex) {
195 if (phydev->duplex)
196 ctl_reg &=
197 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
198 else
199 ctl_reg |=
200 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
201
202 bp->duplex = phydev->duplex;
203 status_change = 1;
204 }
205
206 if (bp->speed != phydev->speed) {
207 status_change = 1;
208 switch (phydev->speed) {
209 case 1000:
210 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
211 break;
212 case 100:
213 case 10:
214 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
215 break;
216 default:
217 printk(KERN_WARNING
218 "%s: Ack! Speed (%d) is not "
219 "10/100/1000!\n", dev->name,
220 phydev->speed);
221 break;
222 }
223 bp->speed = phydev->speed;
224 }
225 }
226
227 if (phydev->link != bp->link) {
228 if (phydev->link) {
229 mode_reg |=
230 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
231 } else {
232 mode_reg &=
233 ~(DNET_INTERNAL_MODE_RXEN |
234 DNET_INTERNAL_MODE_TXEN);
235 bp->speed = 0;
236 bp->duplex = -1;
237 }
238 bp->link = phydev->link;
239
240 status_change = 1;
241 }
242
243 if (status_change) {
244 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
245 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
246 }
247
248 spin_unlock_irqrestore(&bp->lock, flags);
249
250 if (status_change) {
251 if (phydev->link)
252 printk(KERN_INFO "%s: link up (%d/%s)\n",
253 dev->name, phydev->speed,
254 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
255 else
256 printk(KERN_INFO "%s: link down\n", dev->name);
257 }
258}
259
260static int dnet_mii_probe(struct net_device *dev)
261{
262 struct dnet *bp = netdev_priv(dev);
263 struct phy_device *phydev = NULL;
264 int phy_addr;
265
266 /* find the first phy */
267 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
268 if (bp->mii_bus->phy_map[phy_addr]) {
269 phydev = bp->mii_bus->phy_map[phy_addr];
270 break;
271 }
272 }
273
274 if (!phydev) {
275 printk(KERN_ERR "%s: no PHY found\n", dev->name);
276 return -ENODEV;
277 }
278
279 /* TODO : add pin_irq */
280
281 /* attach the mac to the phy */
282 if (bp->capabilities & DNET_HAS_RMII) {
283 phydev = phy_connect(dev, dev_name(&phydev->dev),
284 &dnet_handle_link_change, 0,
285 PHY_INTERFACE_MODE_RMII);
286 } else {
287 phydev = phy_connect(dev, dev_name(&phydev->dev),
288 &dnet_handle_link_change, 0,
289 PHY_INTERFACE_MODE_MII);
290 }
291
292 if (IS_ERR(phydev)) {
293 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
294 return PTR_ERR(phydev);
295 }
296
297 /* mask with MAC supported features */
298 if (bp->capabilities & DNET_HAS_GIGABIT)
299 phydev->supported &= PHY_GBIT_FEATURES;
300 else
301 phydev->supported &= PHY_BASIC_FEATURES;
302
303 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
304
305 phydev->advertising = phydev->supported;
306
307 bp->link = 0;
308 bp->speed = 0;
309 bp->duplex = -1;
310 bp->phy_dev = phydev;
311
312 return 0;
313}
314
315static int dnet_mii_init(struct dnet *bp)
316{
317 int err, i;
318
319 bp->mii_bus = mdiobus_alloc();
320 if (bp->mii_bus == NULL)
321 return -ENOMEM;
322
323 bp->mii_bus->name = "dnet_mii_bus";
324 bp->mii_bus->read = &dnet_mdio_read;
325 bp->mii_bus->write = &dnet_mdio_write;
326 bp->mii_bus->reset = &dnet_mdio_reset;
327
328 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
329
330 bp->mii_bus->priv = bp;
331
332 bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
333 if (!bp->mii_bus->irq) {
334 err = -ENOMEM;
335 goto err_out;
336 }
337
338 for (i = 0; i < PHY_MAX_ADDR; i++)
339 bp->mii_bus->irq[i] = PHY_POLL;
340
341 platform_set_drvdata(bp->dev, bp->mii_bus);
342
343 if (mdiobus_register(bp->mii_bus)) {
344 err = -ENXIO;
345 goto err_out_free_mdio_irq;
346 }
347
348 if (dnet_mii_probe(bp->dev) != 0) {
349 err = -ENXIO;
350 goto err_out_unregister_bus;
351 }
352
353 return 0;
354
355err_out_unregister_bus:
356 mdiobus_unregister(bp->mii_bus);
357err_out_free_mdio_irq:
358 kfree(bp->mii_bus->irq);
359err_out:
360 mdiobus_free(bp->mii_bus);
361 return err;
362}
363
364/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
365int dnet_phy_marvell_fixup(struct phy_device *phydev)
366{
367 return phy_write(phydev, 0x18, 0x4148);
368}
369
370static void dnet_update_stats(struct dnet *bp)
371{
372 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
373 u32 *p = &bp->hw_stats.rx_pkt_ignr;
374 u32 *end = &bp->hw_stats.rx_byte + 1;
375
376 WARN_ON((unsigned long)(end - p - 1) !=
377 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
378
379 for (; p < end; p++, reg++)
380 *p += readl(reg);
381
382 reg = bp->regs + DNET_TX_UNICAST_CNT;
383 p = &bp->hw_stats.tx_unicast;
384 end = &bp->hw_stats.tx_byte + 1;
385
386 WARN_ON((unsigned long)(end - p - 1) !=
387 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
388
389 for (; p < end; p++, reg++)
390 *p += readl(reg);
391}
392
393static int dnet_poll(struct napi_struct *napi, int budget)
394{
395 struct dnet *bp = container_of(napi, struct dnet, napi);
396 struct net_device *dev = bp->dev;
397 int npackets = 0;
398 unsigned int pkt_len;
399 struct sk_buff *skb;
400 unsigned int *data_ptr;
401 u32 int_enable;
402 u32 cmd_word;
403 int i;
404
405 while (npackets < budget) {
406 /*
407 * break out of while loop if there are no more
408 * packets waiting
409 */
410 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
411 napi_complete(napi);
412 int_enable = dnet_readl(bp, INTR_ENB);
413 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
414 dnet_writel(bp, int_enable, INTR_ENB);
415 return 0;
416 }
417
418 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
419 pkt_len = cmd_word & 0xFFFF;
420
421 if (cmd_word & 0xDF180000)
422 printk(KERN_ERR "%s packet receive error %x\n",
423 __func__, cmd_word);
424
425 skb = dev_alloc_skb(pkt_len + 5);
426 if (skb != NULL) {
427 /* Align IP on 16 byte boundaries */
428 skb_reserve(skb, 2);
429 /*
430 * 'skb_put()' points to the start of sk_buff
431 * data area.
432 */
433 data_ptr = (unsigned int *)skb_put(skb, pkt_len);
434 for (i = 0; i < (pkt_len + 3) >> 2; i++)
435 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
436 skb->protocol = eth_type_trans(skb, dev);
437 netif_receive_skb(skb);
438 npackets++;
439 } else
440 printk(KERN_NOTICE
441 "%s: No memory to allocate a sk_buff of "
442 "size %u.\n", dev->name, pkt_len);
443 }
444
445 budget -= npackets;
446
447 if (npackets < budget) {
448 /* We processed all packets available. Tell NAPI it can
449 * stop polling then re-enable rx interrupts */
450 napi_complete(napi);
451 int_enable = dnet_readl(bp, INTR_ENB);
452 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
453 dnet_writel(bp, int_enable, INTR_ENB);
454 return 0;
455 }
456
457 /* There are still packets waiting */
458 return 1;
459}
460
461static irqreturn_t dnet_interrupt(int irq, void *dev_id)
462{
463 struct net_device *dev = dev_id;
464 struct dnet *bp = netdev_priv(dev);
465 u32 int_src, int_enable, int_current;
466 unsigned long flags;
467 unsigned int handled = 0;
468
469 spin_lock_irqsave(&bp->lock, flags);
470
471 /* read and clear the DNET irq (clear on read) */
472 int_src = dnet_readl(bp, INTR_SRC);
473 int_enable = dnet_readl(bp, INTR_ENB);
474 int_current = int_src & int_enable;
475
476 /* restart the queue if we had stopped it for TX fifo almost full */
477 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
478 int_enable = dnet_readl(bp, INTR_ENB);
479 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
480 dnet_writel(bp, int_enable, INTR_ENB);
481 netif_wake_queue(dev);
482 handled = 1;
483 }
484
485 /* RX FIFO error checking */
486 if (int_current &
487 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
488 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
489 dnet_readl(bp, RX_STATUS), int_current);
490 /* we can only flush the RX FIFOs */
491 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
492 ndelay(500);
493 dnet_writel(bp, 0, SYS_CTL);
494 handled = 1;
495 }
496
497 /* TX FIFO error checking */
498 if (int_current &
499 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
500 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
501 dnet_readl(bp, TX_STATUS), int_current);
502 /* we can only flush the TX FIFOs */
503 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
504 ndelay(500);
505 dnet_writel(bp, 0, SYS_CTL);
506 handled = 1;
507 }
508
509 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
510 if (napi_schedule_prep(&bp->napi)) {
511 /*
512 * There's no point taking any more interrupts
513 * until we have processed the buffers
514 */
515 /* Disable Rx interrupts and schedule NAPI poll */
516 int_enable = dnet_readl(bp, INTR_ENB);
517 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
518 dnet_writel(bp, int_enable, INTR_ENB);
519 __napi_schedule(&bp->napi);
520 }
521 handled = 1;
522 }
523
524 if (!handled)
525 pr_debug("%s: irq %x remains\n", __func__, int_current);
526
527 spin_unlock_irqrestore(&bp->lock, flags);
528
529 return IRQ_RETVAL(handled);
530}
531
532#ifdef DEBUG
533static inline void dnet_print_skb(struct sk_buff *skb)
534{
535 int k;
536 printk(KERN_DEBUG PFX "data:");
537 for (k = 0; k < skb->len; k++)
538 printk(" %02x", (unsigned int)skb->data[k]);
539 printk("\n");
540}
541#else
542#define dnet_print_skb(skb) do {} while (0)
543#endif
544
545static int dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
546{
547
548 struct dnet *bp = netdev_priv(dev);
549 u32 tx_status, irq_enable;
550 unsigned int len, i, tx_cmd, wrsz;
551 unsigned long flags;
552 unsigned int *bufp;
553
554 tx_status = dnet_readl(bp, TX_STATUS);
555
556 pr_debug("start_xmit: len %u head %p data %p\n",
557 skb->len, skb->head, skb->data);
558 dnet_print_skb(skb);
559
560 /* frame size (words) */
561 len = (skb->len + 3) >> 2;
562
563 spin_lock_irqsave(&bp->lock, flags);
564
565 tx_status = dnet_readl(bp, TX_STATUS);
566
567 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
568 wrsz = (u32) skb->len + 3;
569 wrsz += ((unsigned long) skb->data) & 0x3;
570 wrsz >>= 2;
571 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
572
573 /* check if there is enough room for the current frame */
574 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
575 for (i = 0; i < wrsz; i++)
576 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
577
578 /*
579 * inform MAC that a packet's written and ready to be
580 * shipped out
581 */
582 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
583 }
584
585 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
586 netif_stop_queue(dev);
587 tx_status = dnet_readl(bp, INTR_SRC);
588 irq_enable = dnet_readl(bp, INTR_ENB);
589 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
590 dnet_writel(bp, irq_enable, INTR_ENB);
591 }
592
593 /* free the buffer */
594 dev_kfree_skb(skb);
595
596 spin_unlock_irqrestore(&bp->lock, flags);
597
598 dev->trans_start = jiffies;
599
600 return 0;
601}
602
603static void dnet_reset_hw(struct dnet *bp)
604{
605 /* put ts_mac in IDLE state i.e. disable rx/tx */
606 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
607
608 /*
609 * RX FIFO almost full threshold: only cmd FIFO almost full is
610 * implemented for RX side
611 */
612 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
613 /*
614 * TX FIFO almost empty threshold: only data FIFO almost empty
615 * is implemented for TX side
616 */
617 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
618
619 /* flush rx/tx fifos */
620 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
621 SYS_CTL);
622 msleep(1);
623 dnet_writel(bp, 0, SYS_CTL);
624}
625
626static void dnet_init_hw(struct dnet *bp)
627{
628 u32 config;
629
630 dnet_reset_hw(bp);
631 __dnet_set_hwaddr(bp);
632
633 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
634
635 if (bp->dev->flags & IFF_PROMISC)
636 /* Copy All Frames */
637 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
638 if (!(bp->dev->flags & IFF_BROADCAST))
639 /* No BroadCast */
640 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
641
642 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
643 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
644 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
645 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
646
647 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
648
649 /* clear irq before enabling them */
650 config = dnet_readl(bp, INTR_SRC);
651
652 /* enable RX/TX interrupt, recv packet ready interrupt */
653 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
654 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
655 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
656 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
657 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
658}
659
660static int dnet_open(struct net_device *dev)
661{
662 struct dnet *bp = netdev_priv(dev);
663
664 /* if the phy is not yet register, retry later */
665 if (!bp->phy_dev)
666 return -EAGAIN;
667
668 if (!is_valid_ether_addr(dev->dev_addr))
669 return -EADDRNOTAVAIL;
670
671 napi_enable(&bp->napi);
672 dnet_init_hw(bp);
673
674 phy_start_aneg(bp->phy_dev);
675
676 /* schedule a link state check */
677 phy_start(bp->phy_dev);
678
679 netif_start_queue(dev);
680
681 return 0;
682}
683
684static int dnet_close(struct net_device *dev)
685{
686 struct dnet *bp = netdev_priv(dev);
687
688 netif_stop_queue(dev);
689 napi_disable(&bp->napi);
690
691 if (bp->phy_dev)
692 phy_stop(bp->phy_dev);
693
694 dnet_reset_hw(bp);
695 netif_carrier_off(dev);
696
697 return 0;
698}
699
700static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
701{
702 pr_debug("%s\n", __func__);
703 pr_debug("----------------------------- RX statistics "
704 "-------------------------------\n");
705 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
706 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
707 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
708 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
709 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
710 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
711 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
712 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
713 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
714 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
715 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
716 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
717 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
718 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
719 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
720 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
721 pr_debug("----------------------------- TX statistics "
722 "-------------------------------\n");
723 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
724 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
725 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
726 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
727 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
728 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
729 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
730 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
731}
732
733static struct net_device_stats *dnet_get_stats(struct net_device *dev)
734{
735
736 struct dnet *bp = netdev_priv(dev);
737 struct net_device_stats *nstat = &dev->stats;
738 struct dnet_stats *hwstat = &bp->hw_stats;
739
740 /* read stats from hardware */
741 dnet_update_stats(bp);
742
743 /* Convert HW stats into netdevice stats */
744 nstat->rx_errors = (hwstat->rx_len_chk_err +
745 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
746 /* ignore IGP violation error
747 hwstat->rx_ipg_viol + */
748 hwstat->rx_crc_err +
749 hwstat->rx_pre_shrink +
750 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
751 nstat->tx_errors = hwstat->tx_bad_fcs;
752 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
753 hwstat->rx_lng_frm +
754 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
755 nstat->rx_crc_errors = hwstat->rx_crc_err;
756 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
757 nstat->rx_packets = hwstat->rx_ok_pkt;
758 nstat->tx_packets = (hwstat->tx_unicast +
759 hwstat->tx_multicast + hwstat->tx_brdcast);
760 nstat->rx_bytes = hwstat->rx_byte;
761 nstat->tx_bytes = hwstat->tx_byte;
762 nstat->multicast = hwstat->rx_multicast;
763 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
764
765 dnet_print_pretty_hwstats(hwstat);
766
767 return nstat;
768}
769
770static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
771{
772 struct dnet *bp = netdev_priv(dev);
773 struct phy_device *phydev = bp->phy_dev;
774
775 if (!phydev)
776 return -ENODEV;
777
778 return phy_ethtool_gset(phydev, cmd);
779}
780
781static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
782{
783 struct dnet *bp = netdev_priv(dev);
784 struct phy_device *phydev = bp->phy_dev;
785
786 if (!phydev)
787 return -ENODEV;
788
789 return phy_ethtool_sset(phydev, cmd);
790}
791
792static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
793{
794 struct dnet *bp = netdev_priv(dev);
795 struct phy_device *phydev = bp->phy_dev;
796
797 if (!netif_running(dev))
798 return -EINVAL;
799
800 if (!phydev)
801 return -ENODEV;
802
803 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
804}
805
806static void dnet_get_drvinfo(struct net_device *dev,
807 struct ethtool_drvinfo *info)
808{
809 strcpy(info->driver, DRV_NAME);
810 strcpy(info->version, DRV_VERSION);
811 strcpy(info->bus_info, "0");
812}
813
814static const struct ethtool_ops dnet_ethtool_ops = {
815 .get_settings = dnet_get_settings,
816 .set_settings = dnet_set_settings,
817 .get_drvinfo = dnet_get_drvinfo,
818 .get_link = ethtool_op_get_link,
819};
820
821static const struct net_device_ops dnet_netdev_ops = {
822 .ndo_open = dnet_open,
823 .ndo_stop = dnet_close,
824 .ndo_get_stats = dnet_get_stats,
825 .ndo_start_xmit = dnet_start_xmit,
826 .ndo_do_ioctl = dnet_ioctl,
827 .ndo_set_mac_address = eth_mac_addr,
828 .ndo_validate_addr = eth_validate_addr,
829 .ndo_change_mtu = eth_change_mtu,
830};
831
832static int __devinit dnet_probe(struct platform_device *pdev)
833{
834 struct resource *res;
835 struct net_device *dev;
836 struct dnet *bp;
837 struct phy_device *phydev;
838 int err = -ENXIO;
839 unsigned int mem_base, mem_size, irq;
840
841 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
842 if (!res) {
843 dev_err(&pdev->dev, "no mmio resource defined\n");
844 goto err_out;
845 }
846 mem_base = res->start;
847 mem_size = resource_size(res);
848 irq = platform_get_irq(pdev, 0);
849
850 if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
851 dev_err(&pdev->dev, "no memory region available\n");
852 err = -EBUSY;
853 goto err_out;
854 }
855
856 err = -ENOMEM;
857 dev = alloc_etherdev(sizeof(*bp));
858 if (!dev) {
859 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
860 goto err_out;
861 }
862
863 /* TODO: Actually, we have some interesting features... */
864 dev->features |= 0;
865
866 bp = netdev_priv(dev);
867 bp->dev = dev;
868
869 SET_NETDEV_DEV(dev, &pdev->dev);
870
871 spin_lock_init(&bp->lock);
872
873 bp->regs = ioremap(mem_base, mem_size);
874 if (!bp->regs) {
875 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
876 err = -ENOMEM;
877 goto err_out_free_dev;
878 }
879
880 dev->irq = irq;
881 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
882 if (err) {
883 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
884 irq, err);
885 goto err_out_iounmap;
886 }
887
888 dev->netdev_ops = &dnet_netdev_ops;
889 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
890 dev->ethtool_ops = &dnet_ethtool_ops;
891
892 dev->base_addr = (unsigned long)bp->regs;
893
894 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
895
896 dnet_get_hwaddr(bp);
897
898 if (!is_valid_ether_addr(dev->dev_addr)) {
899 /* choose a random ethernet address */
900 random_ether_addr(dev->dev_addr);
901 __dnet_set_hwaddr(bp);
902 }
903
904 err = register_netdev(dev);
905 if (err) {
906 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
907 goto err_out_free_irq;
908 }
909
910 /* register the PHY board fixup (for Marvell 88E1111) */
911 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
912 dnet_phy_marvell_fixup);
913 /* we can live without it, so just issue a warning */
914 if (err)
915 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
916
917 if (dnet_mii_init(bp) != 0)
918 goto err_out_unregister_netdev;
919
920 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
921 bp->regs, mem_base, dev->irq, dev->dev_addr);
922 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n",
923 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
924 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
925 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
926 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
927 phydev = bp->phy_dev;
928 dev_info(&pdev->dev, "attached PHY driver [%s] "
929 "(mii_bus:phy_addr=%s, irq=%d)\n",
930 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
931
932 return 0;
933
934err_out_unregister_netdev:
935 unregister_netdev(dev);
936err_out_free_irq:
937 free_irq(dev->irq, dev);
938err_out_iounmap:
939 iounmap(bp->regs);
940err_out_free_dev:
941 free_netdev(dev);
942err_out:
943 return err;
944}
945
946static int __devexit dnet_remove(struct platform_device *pdev)
947{
948
949 struct net_device *dev;
950 struct dnet *bp;
951
952 dev = platform_get_drvdata(pdev);
953
954 if (dev) {
955 bp = netdev_priv(dev);
956 if (bp->phy_dev)
957 phy_disconnect(bp->phy_dev);
958 mdiobus_unregister(bp->mii_bus);
959 kfree(bp->mii_bus->irq);
960 mdiobus_free(bp->mii_bus);
961 unregister_netdev(dev);
962 free_irq(dev->irq, dev);
963 iounmap(bp->regs);
964 free_netdev(dev);
965 }
966
967 return 0;
968}
969
970static struct platform_driver dnet_driver = {
971 .probe = dnet_probe,
972 .remove = __devexit_p(dnet_remove),
973 .driver = {
974 .name = "dnet",
975 },
976};
977
978static int __init dnet_init(void)
979{
980 return platform_driver_register(&dnet_driver);
981}
982
983static void __exit dnet_exit(void)
984{
985 platform_driver_unregister(&dnet_driver);
986}
987
988module_init(dnet_init);
989module_exit(dnet_exit);
990
991MODULE_LICENSE("GPL");
992MODULE_DESCRIPTION("Dave DNET Ethernet driver");
993MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
994 "Matteo Vit <matteo.vit@dave.eu>");
diff --git a/drivers/net/dnet.h b/drivers/net/dnet.h
new file mode 100644
index 000000000000..37f5b30fa78b
--- /dev/null
+++ b/drivers/net/dnet.h
@@ -0,0 +1,225 @@
1/*
2 * Dave DNET Ethernet Controller driver
3 *
4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _DNET_H
11#define _DNET_H
12
13#define DRV_NAME "dnet"
14#define DRV_VERSION "0.9.1"
15#define PFX DRV_NAME ": "
16
17/* Register access macros */
18#define dnet_writel(port, value, reg) \
19 writel((value), (port)->regs + DNET_##reg)
20#define dnet_readl(port, reg) readl((port)->regs + DNET_##reg)
21
22/* ALL DNET FIFO REGISTERS */
23#define DNET_RX_LEN_FIFO 0x000 /* RX_LEN_FIFO */
24#define DNET_RX_DATA_FIFO 0x004 /* RX_DATA_FIFO */
25#define DNET_TX_LEN_FIFO 0x008 /* TX_LEN_FIFO */
26#define DNET_TX_DATA_FIFO 0x00C /* TX_DATA_FIFO */
27
28/* ALL DNET CONTROL/STATUS REGISTERS OFFSETS */
29#define DNET_VERCAPS 0x100 /* VERCAPS */
30#define DNET_INTR_SRC 0x104 /* INTR_SRC */
31#define DNET_INTR_ENB 0x108 /* INTR_ENB */
32#define DNET_RX_STATUS 0x10C /* RX_STATUS */
33#define DNET_TX_STATUS 0x110 /* TX_STATUS */
34#define DNET_RX_FRAMES_CNT 0x114 /* RX_FRAMES_CNT */
35#define DNET_TX_FRAMES_CNT 0x118 /* TX_FRAMES_CNT */
36#define DNET_RX_FIFO_TH 0x11C /* RX_FIFO_TH */
37#define DNET_TX_FIFO_TH 0x120 /* TX_FIFO_TH */
38#define DNET_SYS_CTL 0x124 /* SYS_CTL */
39#define DNET_PAUSE_TMR 0x128 /* PAUSE_TMR */
40#define DNET_RX_FIFO_WCNT 0x12C /* RX_FIFO_WCNT */
41#define DNET_TX_FIFO_WCNT 0x130 /* TX_FIFO_WCNT */
42
43/* ALL DNET MAC REGISTERS */
44#define DNET_MACREG_DATA 0x200 /* Mac-Reg Data */
45#define DNET_MACREG_ADDR 0x204 /* Mac-Reg Addr */
46
47/* ALL DNET RX STATISTICS COUNTERS */
48#define DNET_RX_PKT_IGNR_CNT 0x300
49#define DNET_RX_LEN_CHK_ERR_CNT 0x304
50#define DNET_RX_LNG_FRM_CNT 0x308
51#define DNET_RX_SHRT_FRM_CNT 0x30C
52#define DNET_RX_IPG_VIOL_CNT 0x310
53#define DNET_RX_CRC_ERR_CNT 0x314
54#define DNET_RX_OK_PKT_CNT 0x318
55#define DNET_RX_CTL_FRM_CNT 0x31C
56#define DNET_RX_PAUSE_FRM_CNT 0x320
57#define DNET_RX_MULTICAST_CNT 0x324
58#define DNET_RX_BROADCAST_CNT 0x328
59#define DNET_RX_VLAN_TAG_CNT 0x32C
60#define DNET_RX_PRE_SHRINK_CNT 0x330
61#define DNET_RX_DRIB_NIB_CNT 0x334
62#define DNET_RX_UNSUP_OPCD_CNT 0x338
63#define DNET_RX_BYTE_CNT 0x33C
64
65/* DNET TX STATISTICS COUNTERS */
66#define DNET_TX_UNICAST_CNT 0x400
67#define DNET_TX_PAUSE_FRM_CNT 0x404
68#define DNET_TX_MULTICAST_CNT 0x408
69#define DNET_TX_BRDCAST_CNT 0x40C
70#define DNET_TX_VLAN_TAG_CNT 0x410
71#define DNET_TX_BAD_FCS_CNT 0x414
72#define DNET_TX_JUMBO_CNT 0x418
73#define DNET_TX_BYTE_CNT 0x41C
74
75/* SOME INTERNAL MAC-CORE REGISTER */
76#define DNET_INTERNAL_MODE_REG 0x0
77#define DNET_INTERNAL_RXTX_CONTROL_REG 0x2
78#define DNET_INTERNAL_MAX_PKT_SIZE_REG 0x4
79#define DNET_INTERNAL_IGP_REG 0x8
80#define DNET_INTERNAL_MAC_ADDR_0_REG 0xa
81#define DNET_INTERNAL_MAC_ADDR_1_REG 0xc
82#define DNET_INTERNAL_MAC_ADDR_2_REG 0xe
83#define DNET_INTERNAL_TX_RX_STS_REG 0x12
84#define DNET_INTERNAL_GMII_MNG_CTL_REG 0x14
85#define DNET_INTERNAL_GMII_MNG_DAT_REG 0x16
86
87#define DNET_INTERNAL_GMII_MNG_CMD_FIN (1 << 14)
88
89#define DNET_INTERNAL_WRITE (1 << 31)
90
91/* MAC-CORE REGISTER FIELDS */
92
93/* MAC-CORE MODE REGISTER FIELDS */
94#define DNET_INTERNAL_MODE_GBITEN (1 << 0)
95#define DNET_INTERNAL_MODE_FCEN (1 << 1)
96#define DNET_INTERNAL_MODE_RXEN (1 << 2)
97#define DNET_INTERNAL_MODE_TXEN (1 << 3)
98
99/* MAC-CORE RXTX CONTROL REGISTER FIELDS */
100#define DNET_INTERNAL_RXTX_CONTROL_RXSHORTFRAME (1 << 8)
101#define DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST (1 << 7)
102#define DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST (1 << 4)
103#define DNET_INTERNAL_RXTX_CONTROL_RXPAUSE (1 << 3)
104#define DNET_INTERNAL_RXTX_CONTROL_DISTXFCS (1 << 2)
105#define DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS (1 << 1)
106#define DNET_INTERNAL_RXTX_CONTROL_ENPROMISC (1 << 0)
107#define DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL (1 << 6)
108#define DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP (1 << 5)
109
110/* SYSTEM CONTROL REGISTER FIELDS */
111#define DNET_SYS_CTL_IGNORENEXTPKT (1 << 0)
112#define DNET_SYS_CTL_SENDPAUSE (1 << 2)
113#define DNET_SYS_CTL_RXFIFOFLUSH (1 << 3)
114#define DNET_SYS_CTL_TXFIFOFLUSH (1 << 4)
115
116/* TX STATUS REGISTER FIELDS */
117#define DNET_TX_STATUS_FIFO_ALMOST_EMPTY (1 << 2)
118#define DNET_TX_STATUS_FIFO_ALMOST_FULL (1 << 1)
119
120/* INTERRUPT SOURCE REGISTER FIELDS */
121#define DNET_INTR_SRC_TX_PKTSENT (1 << 0)
122#define DNET_INTR_SRC_TX_FIFOAF (1 << 1)
123#define DNET_INTR_SRC_TX_FIFOAE (1 << 2)
124#define DNET_INTR_SRC_TX_DISCFRM (1 << 3)
125#define DNET_INTR_SRC_TX_FIFOFULL (1 << 4)
126#define DNET_INTR_SRC_RX_CMDFIFOAF (1 << 8)
127#define DNET_INTR_SRC_RX_CMDFIFOFF (1 << 9)
128#define DNET_INTR_SRC_RX_DATAFIFOFF (1 << 10)
129#define DNET_INTR_SRC_TX_SUMMARY (1 << 16)
130#define DNET_INTR_SRC_RX_SUMMARY (1 << 17)
131#define DNET_INTR_SRC_PHY (1 << 19)
132
133/* INTERRUPT ENABLE REGISTER FIELDS */
134#define DNET_INTR_ENB_TX_PKTSENT (1 << 0)
135#define DNET_INTR_ENB_TX_FIFOAF (1 << 1)
136#define DNET_INTR_ENB_TX_FIFOAE (1 << 2)
137#define DNET_INTR_ENB_TX_DISCFRM (1 << 3)
138#define DNET_INTR_ENB_TX_FIFOFULL (1 << 4)
139#define DNET_INTR_ENB_RX_PKTRDY (1 << 8)
140#define DNET_INTR_ENB_RX_FIFOAF (1 << 9)
141#define DNET_INTR_ENB_RX_FIFOERR (1 << 10)
142#define DNET_INTR_ENB_RX_ERROR (1 << 11)
143#define DNET_INTR_ENB_RX_FIFOFULL (1 << 12)
144#define DNET_INTR_ENB_RX_FIFOAE (1 << 13)
145#define DNET_INTR_ENB_TX_SUMMARY (1 << 16)
146#define DNET_INTR_ENB_RX_SUMMARY (1 << 17)
147#define DNET_INTR_ENB_GLOBAL_ENABLE (1 << 18)
148
149/* default values:
150 * almost empty = less than one full sized ethernet frame (no jumbo) inside
151 * the fifo almost full = can write less than one full sized ethernet frame
152 * (no jumbo) inside the fifo
153 */
154#define DNET_CFG_TX_FIFO_FULL_THRES 25
155#define DNET_CFG_RX_FIFO_FULL_THRES 20
156
157/*
158 * Capabilities. Used by the driver to know the capabilities that the ethernet
159 * controller inside the FPGA have.
160 */
161
162#define DNET_HAS_MDIO (1 << 0)
163#define DNET_HAS_IRQ (1 << 1)
164#define DNET_HAS_GIGABIT (1 << 2)
165#define DNET_HAS_DMA (1 << 3)
166
167#define DNET_HAS_MII (1 << 4) /* or GMII */
168#define DNET_HAS_RMII (1 << 5) /* or RGMII */
169
170#define DNET_CAPS_MASK 0xFFFF
171
172#define DNET_FIFO_SIZE 1024 /* 1K x 32 bit */
173#define DNET_FIFO_TX_DATA_AF_TH (DNET_FIFO_SIZE - 384) /* 384 = 1536 / 4 */
174#define DNET_FIFO_TX_DATA_AE_TH 384
175
176#define DNET_FIFO_RX_CMD_AF_TH (1 << 16) /* just one frame inside the FIFO */
177
178/*
179 * Hardware-collected statistics.
180 */
181struct dnet_stats {
182 u32 rx_pkt_ignr;
183 u32 rx_len_chk_err;
184 u32 rx_lng_frm;
185 u32 rx_shrt_frm;
186 u32 rx_ipg_viol;
187 u32 rx_crc_err;
188 u32 rx_ok_pkt;
189 u32 rx_ctl_frm;
190 u32 rx_pause_frm;
191 u32 rx_multicast;
192 u32 rx_broadcast;
193 u32 rx_vlan_tag;
194 u32 rx_pre_shrink;
195 u32 rx_drib_nib;
196 u32 rx_unsup_opcd;
197 u32 rx_byte;
198 u32 tx_unicast;
199 u32 tx_pause_frm;
200 u32 tx_multicast;
201 u32 tx_brdcast;
202 u32 tx_vlan_tag;
203 u32 tx_bad_fcs;
204 u32 tx_jumbo;
205 u32 tx_byte;
206};
207
208struct dnet {
209 void __iomem *regs;
210 spinlock_t lock;
211 struct platform_device *pdev;
212 struct net_device *dev;
213 struct dnet_stats hw_stats;
214 unsigned int capabilities; /* read from FPGA */
215 struct napi_struct napi;
216
217 /* PHY stuff */
218 struct mii_bus *mii_bus;
219 struct phy_device *phy_dev;
220 unsigned int link;
221 unsigned int speed;
222 unsigned int duplex;
223};
224
225#endif /* _DNET_H */
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 87a706694fb3..6fd7aa61736e 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2594,6 +2594,9 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2594 if (of_device_is_compatible(np, "ibm,emac-460ex") || 2594 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2595 of_device_is_compatible(np, "ibm,emac-460gt")) 2595 of_device_is_compatible(np, "ibm,emac-460gt"))
2596 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX; 2596 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2597 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2598 of_device_is_compatible(np, "ibm,emac-405exr"))
2599 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2597 } else if (of_device_is_compatible(np, "ibm,emac4")) { 2600 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2598 dev->features |= EMAC_FTR_EMAC4; 2601 dev->features |= EMAC_FTR_EMAC4;
2599 if (of_device_is_compatible(np, "ibm,emac-440gx")) 2602 if (of_device_is_compatible(np, "ibm,emac-440gx"))
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index a50db5398fa5..9dd13ad12ce4 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1023,11 +1023,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1023 struct net_device *netdev; 1023 struct net_device *netdev;
1024 struct igb_adapter *adapter; 1024 struct igb_adapter *adapter;
1025 struct e1000_hw *hw; 1025 struct e1000_hw *hw;
1026 struct pci_dev *us_dev;
1027 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1026 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1028 unsigned long mmio_start, mmio_len; 1027 unsigned long mmio_start, mmio_len;
1029 int i, err, pci_using_dac, pos; 1028 int i, err, pci_using_dac;
1030 u16 eeprom_data = 0, state = 0; 1029 u16 eeprom_data = 0;
1031 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1030 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1032 u32 part_num; 1031 u32 part_num;
1033 int bars, need_ioport; 1032 int bars, need_ioport;
@@ -1062,27 +1061,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1062 } 1061 }
1063 } 1062 }
1064 1063
1065 /* 82575 requires that the pci-e link partner disable the L0s state */
1066 switch (pdev->device) {
1067 case E1000_DEV_ID_82575EB_COPPER:
1068 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1069 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1070 us_dev = pdev->bus->self;
1071 pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
1072 if (pos) {
1073 pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1074 &state);
1075 state &= ~PCIE_LINK_STATE_L0S;
1076 pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1077 state);
1078 dev_info(&pdev->dev,
1079 "Disabling ASPM L0s upstream switch port %s\n",
1080 pci_name(us_dev));
1081 }
1082 default:
1083 break;
1084 }
1085
1086 err = pci_request_selected_regions(pdev, bars, igb_driver_name); 1064 err = pci_request_selected_regions(pdev, bars, igb_driver_name);
1087 if (err) 1065 if (err)
1088 goto err_pci_reg; 1066 goto err_pci_reg;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d2f4d5f508b7..5d364a96e35d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3973,6 +3973,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3973 .ndo_stop = ixgbe_close, 3973 .ndo_stop = ixgbe_close,
3974 .ndo_start_xmit = ixgbe_xmit_frame, 3974 .ndo_start_xmit = ixgbe_xmit_frame,
3975 .ndo_get_stats = ixgbe_get_stats, 3975 .ndo_get_stats = ixgbe_get_stats,
3976 .ndo_set_rx_mode = ixgbe_set_rx_mode,
3976 .ndo_set_multicast_list = ixgbe_set_rx_mode, 3977 .ndo_set_multicast_list = ixgbe_set_rx_mode,
3977 .ndo_validate_addr = eth_validate_addr, 3978 .ndo_validate_addr = eth_validate_addr,
3978 .ndo_set_mac_address = ixgbe_set_mac, 3979 .ndo_set_mac_address = ixgbe_set_mac,
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 13f11f402a99..b0bc3bc18e9c 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2030,11 +2030,6 @@ static void port_start(struct mv643xx_eth_private *mp)
2030 } 2030 }
2031 2031
2032 /* 2032 /*
2033 * Add configured unicast address to address filter table.
2034 */
2035 mv643xx_eth_program_unicast_filter(mp->dev);
2036
2037 /*
2038 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2033 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2039 * frames to RX queue #0, and include the pseudo-header when 2034 * frames to RX queue #0, and include the pseudo-header when
2040 * calculating receive checksums. 2035 * calculating receive checksums.
@@ -2047,6 +2042,11 @@ static void port_start(struct mv643xx_eth_private *mp)
2047 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2042 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2048 2043
2049 /* 2044 /*
2045 * Add configured unicast addresses to address filter table.
2046 */
2047 mv643xx_eth_program_unicast_filter(mp->dev);
2048
2049 /*
2050 * Enable the receive queues. 2050 * Enable the receive queues.
2051 */ 2051 */
2052 for (i = 0; i < mp->rxq_count; i++) { 2052 for (i = 0; i < mp->rxq_count; i++) {
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index f4dd9acb6877..1ff066b2281a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1595,7 +1595,6 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
1595} 1595}
1596 1596
1597 1597
1598int netxen_is_flash_supported(struct netxen_adapter *adapter);
1599int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1598int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
1600int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1599int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
1601extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1600extern void netxen_change_ringparam(struct netxen_adapter *adapter);
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 821cff68b3f3..7fea77088108 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -706,28 +706,6 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
706 return rc; 706 return rc;
707} 707}
708 708
709int netxen_is_flash_supported(struct netxen_adapter *adapter)
710{
711 const int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 };
712 int addr, val01, val02, i, j;
713
714 /* if the flash size less than 4Mb, make huge war cry and die */
715 for (j = 1; j < 4; j++) {
716 addr = j * NETXEN_NIC_WINDOW_MARGIN;
717 for (i = 0; i < ARRAY_SIZE(locs); i++) {
718 if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0
719 && netxen_rom_fast_read(adapter, (addr + locs[i]),
720 &val02) == 0) {
721 if (val01 == val02)
722 return -1;
723 } else
724 return -1;
725 }
726 }
727
728 return 0;
729}
730
731static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, 709static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
732 int size, __le32 * buf) 710 int size, __le32 * buf)
733{ 711{
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 13087782ac40..c172b6e24a96 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -405,9 +405,6 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
405 struct net_device *netdev = adapter->netdev; 405 struct net_device *netdev = adapter->netdev;
406 struct pci_dev *pdev = adapter->pdev; 406 struct pci_dev *pdev = adapter->pdev;
407 407
408 if (netxen_is_flash_supported(adapter) != 0)
409 return -EIO;
410
411 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 408 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
412 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) 409 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
413 return -EIO; 410 return -EIO;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index e6fdce9206cc..aff9c5fec738 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -927,6 +927,7 @@ struct ib_mac_iocb_rsp {
927 u8 flags1; 927 u8 flags1;
928#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ 928#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
929#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ 929#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
930#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
930#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ 931#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
931#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ 932#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
932#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ 933#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8ea72dc60f79..91191f761fba 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1436,18 +1436,32 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1436 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { 1436 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1437 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); 1437 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1438 } 1438 }
1439 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) { 1439
1440 QPRINTK(qdev, RX_STATUS, ERR, 1440 skb->protocol = eth_type_trans(skb, ndev);
1441 "Bad checksum for this %s packet.\n", 1441 skb->ip_summed = CHECKSUM_NONE;
1442 ((ib_mac_rsp-> 1442
1443 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP")); 1443 /* If rx checksum is on, and there are no
1444 skb->ip_summed = CHECKSUM_NONE; 1444 * csum or frame errors.
1445 } else if (qdev->rx_csum && 1445 */
1446 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) || 1446 if (qdev->rx_csum &&
1447 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && 1447 !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) &&
1448 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) { 1448 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1449 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n"); 1449 /* TCP frame. */
1450 skb->ip_summed = CHECKSUM_UNNECESSARY; 1450 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1451 QPRINTK(qdev, RX_STATUS, DEBUG,
1452 "TCP checksum done!\n");
1453 skb->ip_summed = CHECKSUM_UNNECESSARY;
1454 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1455 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1456 /* Unfragmented ipv4 UDP frame. */
1457 struct iphdr *iph = (struct iphdr *) skb->data;
1458 if (!(iph->frag_off &
1459 cpu_to_be16(IP_MF|IP_OFFSET))) {
1460 skb->ip_summed = CHECKSUM_UNNECESSARY;
1461 QPRINTK(qdev, RX_STATUS, DEBUG,
1462 "TCP checksum done!\n");
1463 }
1464 }
1451 } 1465 }
1452 qdev->stats.rx_packets++; 1466 qdev->stats.rx_packets++;
1453 qdev->stats.rx_bytes += skb->len; 1467 qdev->stats.rx_bytes += skb->len;
@@ -1927,6 +1941,9 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1927 1941
1928 tx_ring = &qdev->tx_ring[tx_ring_idx]; 1942 tx_ring = &qdev->tx_ring[tx_ring_idx];
1929 1943
1944 if (skb_padto(skb, ETH_ZLEN))
1945 return NETDEV_TX_OK;
1946
1930 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { 1947 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1931 QPRINTK(qdev, TX_QUEUED, INFO, 1948 QPRINTK(qdev, TX_QUEUED, INFO,
1932 "%s: shutting down tx queue %d du to lack of resources.\n", 1949 "%s: shutting down tx queue %d du to lack of resources.\n",
@@ -2970,9 +2987,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
2970 mask = value << 16; 2987 mask = value << 16;
2971 ql_write32(qdev, SYS, mask | value); 2988 ql_write32(qdev, SYS, mask | value);
2972 2989
2973 /* Set the default queue. */ 2990 /* Set the default queue, and VLAN behavior. */
2974 value = NIC_RCV_CFG_DFQ; 2991 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
2975 mask = NIC_RCV_CFG_DFQ_MASK; 2992 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
2976 ql_write32(qdev, NIC_RCV_CFG, (mask | value)); 2993 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
2977 2994
2978 /* Set the MPI interrupt to enabled. */ 2995 /* Set the MPI interrupt to enabled. */
@@ -3149,6 +3166,11 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3149 3166
3150 ql_tx_ring_clean(qdev); 3167 ql_tx_ring_clean(qdev);
3151 3168
3169 /* Call netif_napi_del() from common point.
3170 */
3171 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3172 netif_napi_del(&qdev->rx_ring[i].napi);
3173
3152 spin_lock(&qdev->hw_lock); 3174 spin_lock(&qdev->hw_lock);
3153 status = ql_adapter_reset(qdev); 3175 status = ql_adapter_reset(qdev);
3154 if (status) 3176 if (status)
@@ -3853,7 +3875,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3853{ 3875{
3854 struct net_device *ndev = pci_get_drvdata(pdev); 3876 struct net_device *ndev = pci_get_drvdata(pdev);
3855 struct ql_adapter *qdev = netdev_priv(ndev); 3877 struct ql_adapter *qdev = netdev_priv(ndev);
3856 int err, i; 3878 int err;
3857 3879
3858 netif_device_detach(ndev); 3880 netif_device_detach(ndev);
3859 3881
@@ -3863,9 +3885,6 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3863 return err; 3885 return err;
3864 } 3886 }
3865 3887
3866 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3867 netif_napi_del(&qdev->rx_ring[i].napi);
3868
3869 err = pci_save_state(pdev); 3888 err = pci_save_state(pdev);
3870 if (err) 3889 if (err)
3871 return err; 3890 return err;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b3473401c83a..43fedb9ecedb 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -81,9 +81,9 @@ static const int multicast_filter_limit = 32;
81#define RTL8169_TX_TIMEOUT (6*HZ) 81#define RTL8169_TX_TIMEOUT (6*HZ)
82#define RTL8169_PHY_TIMEOUT (10*HZ) 82#define RTL8169_PHY_TIMEOUT (10*HZ)
83 83
84#define RTL_EEPROM_SIG 0x8129 84#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
85#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
85#define RTL_EEPROM_SIG_ADDR 0x0000 86#define RTL_EEPROM_SIG_ADDR 0x0000
86#define RTL_EEPROM_MAC_ADDR 0x0007
87 87
88/* write/read MMIO register */ 88/* write/read MMIO register */
89#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 89#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
@@ -293,11 +293,6 @@ enum rtl_register_content {
293 /* Cfg9346Bits */ 293 /* Cfg9346Bits */
294 Cfg9346_Lock = 0x00, 294 Cfg9346_Lock = 0x00,
295 Cfg9346_Unlock = 0xc0, 295 Cfg9346_Unlock = 0xc0,
296 Cfg9346_Program = 0x80, /* Programming mode */
297 Cfg9346_EECS = 0x08, /* Chip select */
298 Cfg9346_EESK = 0x04, /* Serial data clock */
299 Cfg9346_EEDI = 0x02, /* Data input */
300 Cfg9346_EEDO = 0x01, /* Data output */
301 296
302 /* rx_mode_bits */ 297 /* rx_mode_bits */
303 AcceptErr = 0x20, 298 AcceptErr = 0x20,
@@ -310,7 +305,6 @@ enum rtl_register_content {
310 /* RxConfigBits */ 305 /* RxConfigBits */
311 RxCfgFIFOShift = 13, 306 RxCfgFIFOShift = 13,
312 RxCfgDMAShift = 8, 307 RxCfgDMAShift = 8,
313 RxCfg9356SEL = 6, /* EEPROM type: 0 = 9346, 1 = 9356 */
314 308
315 /* TxConfigBits */ 309 /* TxConfigBits */
316 TxInterFrameGapShift = 24, 310 TxInterFrameGapShift = 24,
@@ -1969,108 +1963,6 @@ static const struct net_device_ops rtl8169_netdev_ops = {
1969 1963
1970}; 1964};
1971 1965
1972/* Delay between EEPROM clock transitions. Force out buffered PCI writes. */
1973#define RTL_EEPROM_DELAY() RTL_R8(Cfg9346)
1974#define RTL_EEPROM_READ_CMD 6
1975
1976/* read 16bit word stored in EEPROM. EEPROM is addressed by words. */
1977static u16 rtl_eeprom_read(void __iomem *ioaddr, int addr)
1978{
1979 u16 result = 0;
1980 int cmd, cmd_len, i;
1981
1982 /* check for EEPROM address size (in bits) */
1983 if (RTL_R32(RxConfig) & (1 << RxCfg9356SEL)) {
1984 /* EEPROM is 93C56 */
1985 cmd_len = 3 + 8; /* 3 bits for command id and 8 for address */
1986 cmd = (RTL_EEPROM_READ_CMD << 8) | (addr & 0xff);
1987 } else {
1988 /* EEPROM is 93C46 */
1989 cmd_len = 3 + 6; /* 3 bits for command id and 6 for address */
1990 cmd = (RTL_EEPROM_READ_CMD << 6) | (addr & 0x3f);
1991 }
1992
1993 /* enter programming mode */
1994 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS);
1995 RTL_EEPROM_DELAY();
1996
1997 /* write command and requested address */
1998 while (cmd_len--) {
1999 u8 x = Cfg9346_Program | Cfg9346_EECS;
2000
2001 x |= (cmd & (1 << cmd_len)) ? Cfg9346_EEDI : 0;
2002
2003 /* write a bit */
2004 RTL_W8(Cfg9346, x);
2005 RTL_EEPROM_DELAY();
2006
2007 /* raise clock */
2008 RTL_W8(Cfg9346, x | Cfg9346_EESK);
2009 RTL_EEPROM_DELAY();
2010 }
2011
2012 /* lower clock */
2013 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS);
2014 RTL_EEPROM_DELAY();
2015
2016 /* read back 16bit value */
2017 for (i = 16; i > 0; i--) {
2018 /* raise clock */
2019 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS | Cfg9346_EESK);
2020 RTL_EEPROM_DELAY();
2021
2022 result <<= 1;
2023 result |= (RTL_R8(Cfg9346) & Cfg9346_EEDO) ? 1 : 0;
2024
2025 /* lower clock */
2026 RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS);
2027 RTL_EEPROM_DELAY();
2028 }
2029
2030 RTL_W8(Cfg9346, Cfg9346_Program);
2031 /* leave programming mode */
2032 RTL_W8(Cfg9346, Cfg9346_Lock);
2033
2034 return result;
2035}
2036
2037static void rtl_init_mac_address(struct rtl8169_private *tp,
2038 void __iomem *ioaddr)
2039{
2040 struct pci_dev *pdev = tp->pci_dev;
2041 u16 x;
2042 u8 mac[8];
2043
2044 /* read EEPROM signature */
2045 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_SIG_ADDR);
2046
2047 if (x != RTL_EEPROM_SIG) {
2048 dev_info(&pdev->dev, "Missing EEPROM signature: %04x\n", x);
2049 return;
2050 }
2051
2052 /* read MAC address */
2053 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR);
2054 mac[0] = x & 0xff;
2055 mac[1] = x >> 8;
2056 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR + 1);
2057 mac[2] = x & 0xff;
2058 mac[3] = x >> 8;
2059 x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR + 2);
2060 mac[4] = x & 0xff;
2061 mac[5] = x >> 8;
2062
2063 if (netif_msg_probe(tp)) {
2064 DECLARE_MAC_BUF(buf);
2065
2066 dev_info(&pdev->dev, "MAC address found in EEPROM: %s\n",
2067 print_mac(buf, mac));
2068 }
2069
2070 if (is_valid_ether_addr(mac))
2071 rtl_rar_set(tp, mac);
2072}
2073
2074static int __devinit 1966static int __devinit
2075rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1967rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2076{ 1968{
@@ -2249,8 +2141,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2249 2141
2250 tp->mmio_addr = ioaddr; 2142 tp->mmio_addr = ioaddr;
2251 2143
2252 rtl_init_mac_address(tp, ioaddr);
2253
2254 /* Get MAC address */ 2144 /* Get MAC address */
2255 for (i = 0; i < MAC_ADDR_LEN; i++) 2145 for (i = 0; i < MAC_ADDR_LEN; i++)
2256 dev->dev_addr[i] = RTL_R8(MAC0 + i); 2146 dev->dev_addr[i] = RTL_R8(MAC0 + i);
@@ -3363,13 +3253,6 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
3363 opts1 |= FirstFrag; 3253 opts1 |= FirstFrag;
3364 } else { 3254 } else {
3365 len = skb->len; 3255 len = skb->len;
3366
3367 if (unlikely(len < ETH_ZLEN)) {
3368 if (skb_padto(skb, ETH_ZLEN))
3369 goto err_update_stats;
3370 len = ETH_ZLEN;
3371 }
3372
3373 opts1 |= FirstFrag | LastFrag; 3256 opts1 |= FirstFrag | LastFrag;
3374 tp->tx_skb[entry].skb = skb; 3257 tp->tx_skb[entry].skb = skb;
3375 } 3258 }
@@ -3407,7 +3290,6 @@ out:
3407err_stop: 3290err_stop:
3408 netif_stop_queue(dev); 3291 netif_stop_queue(dev);
3409 ret = NETDEV_TX_BUSY; 3292 ret = NETDEV_TX_BUSY;
3410err_update_stats:
3411 dev->stats.tx_dropped++; 3293 dev->stats.tx_dropped++;
3412 goto out; 3294 goto out;
3413} 3295}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7f8e514eb5e9..7b1882765a0c 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -687,6 +687,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
687{ 687{
688 struct net_device *ndev = netdev; 688 struct net_device *ndev = netdev;
689 struct sh_eth_private *mdp = netdev_priv(ndev); 689 struct sh_eth_private *mdp = netdev_priv(ndev);
690 irqreturn_t ret = IRQ_NONE;
690 u32 ioaddr, boguscnt = RX_RING_SIZE; 691 u32 ioaddr, boguscnt = RX_RING_SIZE;
691 u32 intr_status = 0; 692 u32 intr_status = 0;
692 693
@@ -696,7 +697,13 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
696 /* Get interrpt stat */ 697 /* Get interrpt stat */
697 intr_status = ctrl_inl(ioaddr + EESR); 698 intr_status = ctrl_inl(ioaddr + EESR);
698 /* Clear interrupt */ 699 /* Clear interrupt */
699 ctrl_outl(intr_status, ioaddr + EESR); 700 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
701 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
702 TX_CHECK | EESR_ERR_CHECK)) {
703 ctrl_outl(intr_status, ioaddr + EESR);
704 ret = IRQ_HANDLED;
705 } else
706 goto other_irq;
700 707
701 if (intr_status & (EESR_FRC | /* Frame recv*/ 708 if (intr_status & (EESR_FRC | /* Frame recv*/
702 EESR_RMAF | /* Multi cast address recv*/ 709 EESR_RMAF | /* Multi cast address recv*/
@@ -723,9 +730,10 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
723 ndev->name, intr_status); 730 ndev->name, intr_status);
724 } 731 }
725 732
733other_irq:
726 spin_unlock(&mdp->lock); 734 spin_unlock(&mdp->lock);
727 735
728 return IRQ_HANDLED; 736 return ret;
729} 737}
730 738
731static void sh_eth_timer(unsigned long data) 739static void sh_eth_timer(unsigned long data)
@@ -844,7 +852,13 @@ static int sh_eth_open(struct net_device *ndev)
844 int ret = 0; 852 int ret = 0;
845 struct sh_eth_private *mdp = netdev_priv(ndev); 853 struct sh_eth_private *mdp = netdev_priv(ndev);
846 854
847 ret = request_irq(ndev->irq, &sh_eth_interrupt, 0, ndev->name, ndev); 855 ret = request_irq(ndev->irq, &sh_eth_interrupt,
856#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
857 IRQF_SHARED,
858#else
859 0,
860#endif
861 ndev->name, ndev);
848 if (ret) { 862 if (ret) {
849 printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME); 863 printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME);
850 return ret; 864 return ret;
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 73bc7181cc18..1537e13e623d 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -43,8 +43,8 @@
43 43
44#define SH7763_SKB_ALIGN 32 44#define SH7763_SKB_ALIGN 32
45/* Chip Base Address */ 45/* Chip Base Address */
46# define SH_TSU_ADDR 0xFFE01800 46# define SH_TSU_ADDR 0xFEE01800
47# define ARSTR 0xFFE01800 47# define ARSTR SH_TSU_ADDR
48 48
49/* Chip Registers */ 49/* Chip Registers */
50/* E-DMAC */ 50/* E-DMAC */
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 9a78daec2fe9..d1590ac55e4b 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1225,6 +1225,10 @@ static int smsc911x_open(struct net_device *dev)
1225 dev_info(&dev->dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", 1225 dev_info(&dev->dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
1226 (unsigned long)pdata->ioaddr, dev->irq); 1226 (unsigned long)pdata->ioaddr, dev->irq);
1227 1227
1228 /* Reset the last known duplex and carrier */
1229 pdata->last_duplex = -1;
1230 pdata->last_carrier = -1;
1231
1228 /* Bring the PHY up */ 1232 /* Bring the PHY up */
1229 phy_start(pdata->phy_dev); 1233 phy_start(pdata->phy_dev);
1230 1234
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 0fcb7503363d..c9c7650826c0 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2998,8 +2998,11 @@ static const struct net_device_ops gem_netdev_ops = {
2998 .ndo_do_ioctl = gem_ioctl, 2998 .ndo_do_ioctl = gem_ioctl,
2999 .ndo_tx_timeout = gem_tx_timeout, 2999 .ndo_tx_timeout = gem_tx_timeout,
3000 .ndo_change_mtu = gem_change_mtu, 3000 .ndo_change_mtu = gem_change_mtu,
3001 .ndo_set_mac_address = eth_mac_addr,
3002 .ndo_validate_addr = eth_validate_addr, 3001 .ndo_validate_addr = eth_validate_addr,
3002 .ndo_set_mac_address = gem_set_mac_address,
3003#ifdef CONFIG_NET_POLL_CONTROLLER
3004 .ndo_poll_controller = gem_poll_controller,
3005#endif
3003}; 3006};
3004 3007
3005static int __devinit gem_init_one(struct pci_dev *pdev, 3008static int __devinit gem_init_one(struct pci_dev *pdev,
@@ -3161,10 +3164,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3161 dev->watchdog_timeo = 5 * HZ; 3164 dev->watchdog_timeo = 5 * HZ;
3162 dev->irq = pdev->irq; 3165 dev->irq = pdev->irq;
3163 dev->dma = 0; 3166 dev->dma = 0;
3164 dev->set_mac_address = gem_set_mac_address;
3165#ifdef CONFIG_NET_POLL_CONTROLLER
3166 dev->poll_controller = gem_poll_controller;
3167#endif
3168 3167
3169 /* Set that now, in case PM kicks in now */ 3168 /* Set that now, in case PM kicks in now */
3170 pci_set_drvdata(pdev, dev); 3169 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index d4fb4acdbebd..4e9bd380a5c2 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2649,8 +2649,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2649 int err = -ENODEV; 2649 int err = -ENODEV;
2650 2650
2651 sbus_dp = to_of_device(op->dev.parent)->node; 2651 sbus_dp = to_of_device(op->dev.parent)->node;
2652 if (is_qfe)
2653 sbus_dp = to_of_device(op->dev.parent->parent)->node;
2654 2652
2655 /* We can match PCI devices too, do not accept those here. */ 2653 /* We can match PCI devices too, do not accept those here. */
2656 if (strcmp(sbus_dp->name, "sbus")) 2654 if (strcmp(sbus_dp->name, "sbus"))
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index bee75fa87a9c..2abb5d3becc6 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -255,6 +255,7 @@ const char tulip_media_cap[32] =
255 255
256static void tulip_tx_timeout(struct net_device *dev); 256static void tulip_tx_timeout(struct net_device *dev);
257static void tulip_init_ring(struct net_device *dev); 257static void tulip_init_ring(struct net_device *dev);
258static void tulip_free_ring(struct net_device *dev);
258static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev); 259static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
259static int tulip_open(struct net_device *dev); 260static int tulip_open(struct net_device *dev);
260static int tulip_close(struct net_device *dev); 261static int tulip_close(struct net_device *dev);
@@ -502,16 +503,21 @@ tulip_open(struct net_device *dev)
502{ 503{
503 int retval; 504 int retval;
504 505
505 if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev)))
506 return retval;
507
508 tulip_init_ring (dev); 506 tulip_init_ring (dev);
509 507
508 retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev);
509 if (retval)
510 goto free_ring;
511
510 tulip_up (dev); 512 tulip_up (dev);
511 513
512 netif_start_queue (dev); 514 netif_start_queue (dev);
513 515
514 return 0; 516 return 0;
517
518free_ring:
519 tulip_free_ring (dev);
520 return retval;
515} 521}
516 522
517 523
@@ -768,23 +774,11 @@ static void tulip_down (struct net_device *dev)
768 tulip_set_power_state (tp, 0, 1); 774 tulip_set_power_state (tp, 0, 1);
769} 775}
770 776
771 777static void tulip_free_ring (struct net_device *dev)
772static int tulip_close (struct net_device *dev)
773{ 778{
774 struct tulip_private *tp = netdev_priv(dev); 779 struct tulip_private *tp = netdev_priv(dev);
775 void __iomem *ioaddr = tp->base_addr;
776 int i; 780 int i;
777 781
778 netif_stop_queue (dev);
779
780 tulip_down (dev);
781
782 if (tulip_debug > 1)
783 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
784 dev->name, ioread32 (ioaddr + CSR5));
785
786 free_irq (dev->irq, dev);
787
788 /* Free all the skbuffs in the Rx queue. */ 782 /* Free all the skbuffs in the Rx queue. */
789 for (i = 0; i < RX_RING_SIZE; i++) { 783 for (i = 0; i < RX_RING_SIZE; i++) {
790 struct sk_buff *skb = tp->rx_buffers[i].skb; 784 struct sk_buff *skb = tp->rx_buffers[i].skb;
@@ -803,6 +797,7 @@ static int tulip_close (struct net_device *dev)
803 dev_kfree_skb (skb); 797 dev_kfree_skb (skb);
804 } 798 }
805 } 799 }
800
806 for (i = 0; i < TX_RING_SIZE; i++) { 801 for (i = 0; i < TX_RING_SIZE; i++) {
807 struct sk_buff *skb = tp->tx_buffers[i].skb; 802 struct sk_buff *skb = tp->tx_buffers[i].skb;
808 803
@@ -814,6 +809,24 @@ static int tulip_close (struct net_device *dev)
814 tp->tx_buffers[i].skb = NULL; 809 tp->tx_buffers[i].skb = NULL;
815 tp->tx_buffers[i].mapping = 0; 810 tp->tx_buffers[i].mapping = 0;
816 } 811 }
812}
813
814static int tulip_close (struct net_device *dev)
815{
816 struct tulip_private *tp = netdev_priv(dev);
817 void __iomem *ioaddr = tp->base_addr;
818
819 netif_stop_queue (dev);
820
821 tulip_down (dev);
822
823 if (tulip_debug > 1)
824 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
825 dev->name, ioread32 (ioaddr + CSR5));
826
827 free_irq (dev->irq, dev);
828
829 tulip_free_ring (dev);
817 830
818 return 0; 831 return 0;
819} 832}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e87986867ba5..1f61e42c641d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1536,32 +1536,15 @@ static void adjust_link(struct net_device *dev)
1536static int init_phy(struct net_device *dev) 1536static int init_phy(struct net_device *dev)
1537{ 1537{
1538 struct ucc_geth_private *priv = netdev_priv(dev); 1538 struct ucc_geth_private *priv = netdev_priv(dev);
1539 struct device_node *np = priv->node; 1539 struct ucc_geth_info *ug_info = priv->ug_info;
1540 struct device_node *phy, *mdio;
1541 const phandle *ph;
1542 char bus_name[MII_BUS_ID_SIZE];
1543 const unsigned int *id;
1544 struct phy_device *phydev; 1540 struct phy_device *phydev;
1545 char phy_id[BUS_ID_SIZE];
1546 1541
1547 priv->oldlink = 0; 1542 priv->oldlink = 0;
1548 priv->oldspeed = 0; 1543 priv->oldspeed = 0;
1549 priv->oldduplex = -1; 1544 priv->oldduplex = -1;
1550 1545
1551 ph = of_get_property(np, "phy-handle", NULL); 1546 phydev = phy_connect(dev, ug_info->phy_bus_id, &adjust_link, 0,
1552 phy = of_find_node_by_phandle(*ph); 1547 priv->phy_interface);
1553 mdio = of_get_parent(phy);
1554
1555 id = of_get_property(phy, "reg", NULL);
1556
1557 of_node_put(phy);
1558 of_node_put(mdio);
1559
1560 uec_mdio_bus_name(bus_name, mdio);
1561 snprintf(phy_id, sizeof(phy_id), "%s:%02x",
1562 bus_name, *id);
1563
1564 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1565 1548
1566 if (IS_ERR(phydev)) { 1549 if (IS_ERR(phydev)) {
1567 printk("%s: Could not attach to PHY\n", dev->name); 1550 printk("%s: Could not attach to PHY\n", dev->name);
@@ -3629,10 +3612,12 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3629 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3612 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3630 fixed_link = of_get_property(np, "fixed-link", NULL); 3613 fixed_link = of_get_property(np, "fixed-link", NULL);
3631 if (fixed_link) { 3614 if (fixed_link) {
3632 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0"); 3615 snprintf(ug_info->phy_bus_id, sizeof(ug_info->phy_bus_id),
3633 ug_info->phy_address = fixed_link[0]; 3616 PHY_ID_FMT, "0", fixed_link[0]);
3634 phy = NULL; 3617 phy = NULL;
3635 } else { 3618 } else {
3619 char bus_name[MII_BUS_ID_SIZE];
3620
3636 ph = of_get_property(np, "phy-handle", NULL); 3621 ph = of_get_property(np, "phy-handle", NULL);
3637 phy = of_find_node_by_phandle(*ph); 3622 phy = of_find_node_by_phandle(*ph);
3638 3623
@@ -3643,7 +3628,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3643 prop = of_get_property(phy, "reg", NULL); 3628 prop = of_get_property(phy, "reg", NULL);
3644 if (prop == NULL) 3629 if (prop == NULL)
3645 return -1; 3630 return -1;
3646 ug_info->phy_address = *prop;
3647 3631
3648 /* Set the bus id */ 3632 /* Set the bus id */
3649 mdio = of_get_parent(phy); 3633 mdio = of_get_parent(phy);
@@ -3657,7 +3641,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3657 if (err) 3641 if (err)
3658 return -1; 3642 return -1;
3659 3643
3660 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start); 3644 uec_mdio_bus_name(bus_name, mdio);
3645 snprintf(ug_info->phy_bus_id, sizeof(ug_info->phy_bus_id),
3646 "%s:%02x", bus_name, *prop);
3661 } 3647 }
3662 3648
3663 /* get the phy interface type, or default to MII */ 3649 /* get the phy interface type, or default to MII */
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 16cbe42ba43c..611bdef2402b 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1091,8 +1091,7 @@ struct ucc_geth_info {
1091 u32 eventRegMask; 1091 u32 eventRegMask;
1092 u16 pausePeriod; 1092 u16 pausePeriod;
1093 u16 extensionField; 1093 u16 extensionField;
1094 u8 phy_address; 1094 char phy_bus_id[BUS_ID_SIZE];
1095 char mdio_bus[MII_BUS_ID_SIZE];
1096 u8 weightfactor[NUM_TX_QUEUES]; 1095 u8 weightfactor[NUM_TX_QUEUES];
1097 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; 1096 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1098 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; 1097 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c5691fdb7079..fb53ef872df3 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1838,17 +1838,19 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1838{ 1838{
1839 struct sk_buff *skb = tdinfo->skb; 1839 struct sk_buff *skb = tdinfo->skb;
1840 int i; 1840 int i;
1841 int pktlen;
1841 1842
1842 /* 1843 /*
1843 * Don't unmap the pre-allocated tx_bufs 1844 * Don't unmap the pre-allocated tx_bufs
1844 */ 1845 */
1845 if (tdinfo->skb_dma) { 1846 if (tdinfo->skb_dma) {
1846 1847
1848 pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
1847 for (i = 0; i < tdinfo->nskb_dma; i++) { 1849 for (i = 0; i < tdinfo->nskb_dma; i++) {
1848#ifdef VELOCITY_ZERO_COPY_SUPPORT 1850#ifdef VELOCITY_ZERO_COPY_SUPPORT
1849 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); 1851 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
1850#else 1852#else
1851 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); 1853 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
1852#endif 1854#endif
1853 tdinfo->skb_dma[i] = 0; 1855 tdinfo->skb_dma[i] = 0;
1854 } 1856 }
@@ -2080,17 +2082,14 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2080 struct tx_desc *td_ptr; 2082 struct tx_desc *td_ptr;
2081 struct velocity_td_info *tdinfo; 2083 struct velocity_td_info *tdinfo;
2082 unsigned long flags; 2084 unsigned long flags;
2083 int pktlen = skb->len; 2085 int pktlen;
2084 __le16 len; 2086 __le16 len;
2085 int index; 2087 int index;
2086 2088
2087 2089
2088 2090 if (skb_padto(skb, ETH_ZLEN))
2089 if (skb->len < ETH_ZLEN) { 2091 goto out;
2090 if (skb_padto(skb, ETH_ZLEN)) 2092 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
2091 goto out;
2092 pktlen = ETH_ZLEN;
2093 }
2094 2093
2095 len = cpu_to_le16(pktlen); 2094 len = cpu_to_le16(pktlen);
2096 2095
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c68808336c8c..e67d16c2e5f3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -612,6 +612,7 @@ static struct ethtool_ops virtnet_ethtool_ops = {
612 .set_tx_csum = virtnet_set_tx_csum, 612 .set_tx_csum = virtnet_set_tx_csum,
613 .set_sg = ethtool_op_set_sg, 613 .set_sg = ethtool_op_set_sg,
614 .set_tso = ethtool_op_set_tso, 614 .set_tso = ethtool_op_set_tso,
615 .get_link = ethtool_op_get_link,
615}; 616};
616 617
617#define MIN_MTU 68 618#define MIN_MTU 68
@@ -739,6 +740,8 @@ static int virtnet_probe(struct virtio_device *vdev)
739 goto unregister; 740 goto unregister;
740 } 741 }
741 742
743 netif_carrier_on(dev);
744
742 pr_debug("virtnet: registered device %s\n", dev->name); 745 pr_debug("virtnet: registered device %s\n", dev->name);
743 return 0; 746 return 0;
744 747
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index d27813502953..6650f609ece4 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -587,8 +587,8 @@ struct ath9k_country_entry {
587 u8 iso[3]; 587 u8 iso[3];
588}; 588};
589 589
590#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg) 590#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val))
591#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg) 591#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg))
592 592
593#define SM(_v, _f) (((_v) << _f##_S) & _f) 593#define SM(_v, _f) (((_v) << _f##_S) & _f)
594#define MS(_v, _f) (((_v) & _f) >> _f##_S) 594#define MS(_v, _f) (((_v) & _f) >> _f##_S)
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index 4ca2aed236e0..139566cbbf65 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -701,6 +701,7 @@ struct ath_softc {
701 struct ath_hal *sc_ah; 701 struct ath_hal *sc_ah;
702 void __iomem *mem; 702 void __iomem *mem;
703 spinlock_t sc_resetlock; 703 spinlock_t sc_resetlock;
704 spinlock_t sc_serial_rw;
704 struct mutex mutex; 705 struct mutex mutex;
705 706
706 u8 sc_curbssid[ETH_ALEN]; 707 u8 sc_curbssid[ETH_ALEN];
@@ -751,4 +752,36 @@ int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
751int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 752int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
752int ath_cabq_update(struct ath_softc *); 753int ath_cabq_update(struct ath_softc *);
753 754
755/*
756 * Read and write, they both share the same lock. We do this to serialize
757 * reads and writes on Atheros 802.11n PCI devices only. This is required
758 * as the FIFO on these devices can only accept sanely 2 requests. After
759 * that the device goes bananas. Serializing the reads/writes prevents this
760 * from happening.
761 */
762
763static inline void ath9k_iowrite32(struct ath_hal *ah, u32 reg_offset, u32 val)
764{
765 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
766 unsigned long flags;
767 spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
768 iowrite32(val, ah->ah_sc->mem + reg_offset);
769 spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
770 } else
771 iowrite32(val, ah->ah_sc->mem + reg_offset);
772}
773
774static inline unsigned int ath9k_ioread32(struct ath_hal *ah, u32 reg_offset)
775{
776 u32 val;
777 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
778 unsigned long flags;
779 spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
780 val = ioread32(ah->ah_sc->mem + reg_offset);
781 spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
782 } else
783 val = ioread32(ah->ah_sc->mem + reg_offset);
784 return val;
785}
786
754#endif /* CORE_H */ 787#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index 34474edefc97..c38a00bbce64 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -437,6 +437,25 @@ static void ath9k_hw_set_defaults(struct ath_hal *ah)
437 } 437 }
438 438
439 ah->ah_config.intr_mitigation = 1; 439 ah->ah_config.intr_mitigation = 1;
440
441 /*
442 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
443 * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
444 * This means we use it for all AR5416 devices, and the few
445 * minor PCI AR9280 devices out there.
446 *
447 * Serialization is required because these devices do not handle
448 * well the case of two concurrent reads/writes due to the latency
449 * involved. During one read/write another read/write can be issued
450 * on another CPU while the previous read/write may still be working
451 * on our hardware, if we hit this case the hardware poops in a loop.
452 * We prevent this by serializing reads and writes.
453 *
454 * This issue is not present on PCI-Express devices or pre-AR5416
455 * devices (legacy, 802.11abg).
456 */
457 if (num_possible_cpus() > 1)
458 ah->ah_config.serialize_regmode = SER_REG_MODE_AUTO;
440} 459}
441 460
442static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid, 461static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
@@ -668,7 +687,8 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
668 } 687 }
669 688
670 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) { 689 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
671 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) { 690 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI ||
691 (AR_SREV_9280(ah) && !ah->ah_isPciExpress)) {
672 ah->ah_config.serialize_regmode = 692 ah->ah_config.serialize_regmode =
673 SER_REG_MODE_ON; 693 SER_REG_MODE_ON;
674 } else { 694 } else {
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 0e80990d8e84..3c04044a60bd 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -1336,6 +1336,7 @@ static int ath_init(u16 devid, struct ath_softc *sc)
1336 printk(KERN_ERR "Unable to create debugfs files\n"); 1336 printk(KERN_ERR "Unable to create debugfs files\n");
1337 1337
1338 spin_lock_init(&sc->sc_resetlock); 1338 spin_lock_init(&sc->sc_resetlock);
1339 spin_lock_init(&sc->sc_serial_rw);
1339 mutex_init(&sc->mutex); 1340 mutex_init(&sc->mutex);
1340 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 1341 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1341 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, 1342 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index a611ad857983..847057d682b1 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -575,13 +575,17 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
575 575
576 r = fill_ctrlset(mac, skb); 576 r = fill_ctrlset(mac, skb);
577 if (r) 577 if (r)
578 return r; 578 goto fail;
579 579
580 info->rate_driver_data[0] = hw; 580 info->rate_driver_data[0] = hw;
581 581
582 r = zd_usb_tx(&mac->chip.usb, skb); 582 r = zd_usb_tx(&mac->chip.usb, skb);
583 if (r) 583 if (r)
584 return r; 584 goto fail;
585 return 0;
586
587fail:
588 dev_kfree_skb(skb);
585 return 0; 589 return 0;
586} 590}
587 591