aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-10-11 14:19:30 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-11 14:19:30 -0400
commit60ca97584fcb98d2f1b14e4c2f12203dd5b3888e (patch)
treeeccd93512a4b3b1e5cb5f721084ee51165ff4965 /drivers/net
parentf061c5847bcc72eebf6a783f458d42092eac1b6a (diff)
parent4a1d2d81fa327d095a0a8a1f961bace5b0a2f7da (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (42 commits) [PATCH] Fix section mismatch in de2104x.c [PATCH] sky2: set lower pause threshold to prevent overrun [PATCH] sky2: revert pci express extensions [PATCH] skge: version 1.9 [PATCH] skge: better flow control negotiation [PATCH] skge: pause mapping for fiber [PATCH] skge: fix stuck irq when fiber down [PATCH] powerpc/cell spidernet release all descrs [PATCH] powerpc/cell spidernet DMA direction fix [PATCH] powerpc/cell spidernet variable name change [PATCH] powerpc/cell spidernet reduce DMA kicking [PATCH] powerpc/cell spidernet [PATCH] powerpc/cell spidernet refine locking [PATCH] powerpc/cell spidernet NAPI polling info. [PATCH] powerpc/cell spidernet low watermark patch. [PATCH] powerpc/cell spidernet incorrect offset [PATCH] powerpc/cell spidernet stop error printing patch. [PATCH] powerpc/cell spidernet fix error interrupt print [PATCH] powerpc/cell spidernet bogus rx interrupt bit [PATCH] Spidernet stop queue when queue is full. ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/b44.c9
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/ehea/ehea.h13
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/ehea/ehea_phyp.c573
-rw-r--r--drivers/net/forcedeth.c31
-rw-r--r--drivers/net/ibmveth.c58
-rw-r--r--drivers/net/mv643xx_eth.c4
-rw-r--r--drivers/net/skge.c220
-rw-r--r--drivers/net/skge.h25
-rw-r--r--drivers/net/sky2.c36
-rw-r--r--drivers/net/sky2.h45
-rw-r--r--drivers/net/smc91x.h18
-rw-r--r--drivers/net/spider_net.c246
-rw-r--r--drivers/net/spider_net.h35
-rw-r--r--drivers/net/spider_net_ethtool.c6
-rw-r--r--drivers/net/tulip/de2104x.c8
17 files changed, 725 insertions, 612 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index b124eee4eb10..1ec217433b4c 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1706,14 +1706,15 @@ static void __b44_set_rx_mode(struct net_device *dev)
1706 1706
1707 __b44_set_mac_addr(bp); 1707 __b44_set_mac_addr(bp);
1708 1708
1709 if (dev->flags & IFF_ALLMULTI) 1709 if ((dev->flags & IFF_ALLMULTI) ||
1710 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1710 val |= RXCONFIG_ALLMULTI; 1711 val |= RXCONFIG_ALLMULTI;
1711 else 1712 else
1712 i = __b44_load_mcast(bp, dev); 1713 i = __b44_load_mcast(bp, dev);
1713 1714
1714 for (; i < 64; i++) { 1715 for (; i < 64; i++)
1715 __b44_cam_write(bp, zero, i); 1716 __b44_cam_write(bp, zero, i);
1716 } 1717
1717 bw32(bp, B44_RXCONFIG, val); 1718 bw32(bp, B44_RXCONFIG, val);
1718 val = br32(bp, B44_CAM_CTRL); 1719 val = br32(bp, B44_CAM_CTRL);
1719 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); 1720 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
@@ -2055,7 +2056,7 @@ static int b44_read_eeprom(struct b44 *bp, u8 *data)
2055 u16 *ptr = (u16 *) data; 2056 u16 *ptr = (u16 *) data;
2056 2057
2057 for (i = 0; i < 128; i += 2) 2058 for (i = 0; i < 128; i += 2)
2058 ptr[i / 2] = readw(bp->regs + 4096 + i); 2059 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2059 2060
2060 return 0; 2061 return 0;
2061} 2062}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e83bc825f6af..32923162179e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1433,7 +1433,7 @@ void bond_alb_monitor(struct bonding *bond)
1433 * write lock to protect from other code that also 1433 * write lock to protect from other code that also
1434 * sets the promiscuity. 1434 * sets the promiscuity.
1435 */ 1435 */
1436 write_lock(&bond->curr_slave_lock); 1436 write_lock_bh(&bond->curr_slave_lock);
1437 1437
1438 if (bond_info->primary_is_promisc && 1438 if (bond_info->primary_is_promisc &&
1439 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { 1439 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
@@ -1448,7 +1448,7 @@ void bond_alb_monitor(struct bonding *bond)
1448 bond_info->primary_is_promisc = 0; 1448 bond_info->primary_is_promisc = 0;
1449 } 1449 }
1450 1450
1451 write_unlock(&bond->curr_slave_lock); 1451 write_unlock_bh(&bond->curr_slave_lock);
1452 1452
1453 if (bond_info->rlb_rebalance) { 1453 if (bond_info->rlb_rebalance) {
1454 bond_info->rlb_rebalance = 0; 1454 bond_info->rlb_rebalance = 0;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 23b451a8ae12..b40724fc6b74 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "ehea" 41#define DRV_NAME "ehea"
42#define DRV_VERSION "EHEA_0028" 42#define DRV_VERSION "EHEA_0034"
43 43
44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -50,6 +50,7 @@
50#define EHEA_MAX_ENTRIES_SQ 32767 50#define EHEA_MAX_ENTRIES_SQ 32767
51#define EHEA_MIN_ENTRIES_QP 127 51#define EHEA_MIN_ENTRIES_QP 127
52 52
53#define EHEA_SMALL_QUEUES
53#define EHEA_NUM_TX_QP 1 54#define EHEA_NUM_TX_QP 1
54 55
55#ifdef EHEA_SMALL_QUEUES 56#ifdef EHEA_SMALL_QUEUES
@@ -59,11 +60,11 @@
59#define EHEA_DEF_ENTRIES_RQ2 1023 60#define EHEA_DEF_ENTRIES_RQ2 1023
60#define EHEA_DEF_ENTRIES_RQ3 1023 61#define EHEA_DEF_ENTRIES_RQ3 1023
61#else 62#else
62#define EHEA_MAX_CQE_COUNT 32000 63#define EHEA_MAX_CQE_COUNT 4080
63#define EHEA_DEF_ENTRIES_SQ 16000 64#define EHEA_DEF_ENTRIES_SQ 4080
64#define EHEA_DEF_ENTRIES_RQ1 32080 65#define EHEA_DEF_ENTRIES_RQ1 8160
65#define EHEA_DEF_ENTRIES_RQ2 4020 66#define EHEA_DEF_ENTRIES_RQ2 2040
66#define EHEA_DEF_ENTRIES_RQ3 4020 67#define EHEA_DEF_ENTRIES_RQ3 2040
67#endif 68#endif
68 69
69#define EHEA_MAX_ENTRIES_EQ 20 70#define EHEA_MAX_ENTRIES_EQ 20
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index c6b31775e26b..eb7d44de59ff 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -766,7 +766,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
766 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 766 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
767 if (!netif_carrier_ok(port->netdev)) { 767 if (!netif_carrier_ok(port->netdev)) {
768 ret = ehea_sense_port_attr( 768 ret = ehea_sense_port_attr(
769 adapter->port[portnum]); 769 port);
770 if (ret) { 770 if (ret) {
771 ehea_error("failed resensing port " 771 ehea_error("failed resensing port "
772 "attributes"); 772 "attributes");
@@ -818,7 +818,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
818 netif_stop_queue(port->netdev); 818 netif_stop_queue(port->netdev);
819 break; 819 break;
820 default: 820 default:
821 ehea_error("unknown event code %x", ec); 821 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
822 break; 822 break;
823 } 823 }
824} 824}
@@ -1841,7 +1841,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1841 1841
1842 if (netif_msg_tx_queued(port)) { 1842 if (netif_msg_tx_queued(port)) {
1843 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 1843 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
1844 ehea_dump(swqe, sizeof(*swqe), "swqe"); 1844 ehea_dump(swqe, 512, "swqe");
1845 } 1845 }
1846 1846
1847 ehea_post_swqe(pr->qp, swqe); 1847 ehea_post_swqe(pr->qp, swqe);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 4a85aca4c7e9..0b51a8cea077 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -44,71 +44,99 @@ static inline u16 get_order_of_qentries(u16 queue_entries)
44#define H_ALL_RES_TYPE_MR 5 44#define H_ALL_RES_TYPE_MR 5
45#define H_ALL_RES_TYPE_MW 6 45#define H_ALL_RES_TYPE_MW 6
46 46
47static long ehea_hcall_9arg_9ret(unsigned long opcode, 47static long ehea_plpar_hcall_norets(unsigned long opcode,
48 unsigned long arg1, unsigned long arg2, 48 unsigned long arg1,
49 unsigned long arg3, unsigned long arg4, 49 unsigned long arg2,
50 unsigned long arg5, unsigned long arg6, 50 unsigned long arg3,
51 unsigned long arg7, unsigned long arg8, 51 unsigned long arg4,
52 unsigned long arg9, unsigned long *out1, 52 unsigned long arg5,
53 unsigned long *out2,unsigned long *out3, 53 unsigned long arg6,
54 unsigned long *out4,unsigned long *out5, 54 unsigned long arg7)
55 unsigned long *out6,unsigned long *out7,
56 unsigned long *out8,unsigned long *out9)
57{ 55{
58 long hret; 56 long ret;
59 int i, sleep_msecs; 57 int i, sleep_msecs;
60 58
61 for (i = 0; i < 5; i++) { 59 for (i = 0; i < 5; i++) {
62 hret = plpar_hcall_9arg_9ret(opcode,arg1, arg2, arg3, arg4, 60 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
63 arg5, arg6, arg7, arg8, arg9, out1, 61 arg5, arg6, arg7);
64 out2, out3, out4, out5, out6, out7, 62
65 out8, out9); 63 if (H_IS_LONG_BUSY(ret)) {
66 if (H_IS_LONG_BUSY(hret)) { 64 sleep_msecs = get_longbusy_msecs(ret);
67 sleep_msecs = get_longbusy_msecs(hret);
68 msleep_interruptible(sleep_msecs); 65 msleep_interruptible(sleep_msecs);
69 continue; 66 continue;
70 } 67 }
71 68
72 if (hret < H_SUCCESS) 69 if (ret < H_SUCCESS)
73 ehea_error("op=%lx hret=%lx " 70 ehea_error("opcode=%lx ret=%lx"
74 "i1=%lx i2=%lx i3=%lx i4=%lx i5=%lx i6=%lx " 71 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
75 "i7=%lx i8=%lx i9=%lx " 72 " arg5=%lx arg6=%lx arg7=%lx ",
76 "o1=%lx o2=%lx o3=%lx o4=%lx o5=%lx o6=%lx " 73 opcode, ret,
77 "o7=%lx o8=%lx o9=%lx", 74 arg1, arg2, arg3, arg4, arg5,
78 opcode, hret, arg1, arg2, arg3, arg4, arg5, 75 arg6, arg7);
79 arg6, arg7, arg8, arg9, *out1, *out2, *out3, 76
80 *out4, *out5, *out6, *out7, *out8, *out9); 77 return ret;
81 return hret;
82 } 78 }
79
83 return H_BUSY; 80 return H_BUSY;
84} 81}
85 82
86u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, 83static long ehea_plpar_hcall9(unsigned long opcode,
87 const u64 qp_handle, const u64 sel_mask, void *cb_addr) 84 unsigned long *outs, /* array of 9 outputs */
85 unsigned long arg1,
86 unsigned long arg2,
87 unsigned long arg3,
88 unsigned long arg4,
89 unsigned long arg5,
90 unsigned long arg6,
91 unsigned long arg7,
92 unsigned long arg8,
93 unsigned long arg9)
88{ 94{
89 u64 dummy; 95 long ret;
96 int i, sleep_msecs;
90 97
91 if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) { 98 for (i = 0; i < 5; i++) {
92 ehea_error("not on pageboundary"); 99 ret = plpar_hcall9(opcode, outs,
93 return H_PARAMETER; 100 arg1, arg2, arg3, arg4, arg5,
101 arg6, arg7, arg8, arg9);
102
103 if (H_IS_LONG_BUSY(ret)) {
104 sleep_msecs = get_longbusy_msecs(ret);
105 msleep_interruptible(sleep_msecs);
106 continue;
107 }
108
109 if (ret < H_SUCCESS)
110 ehea_error("opcode=%lx ret=%lx"
111 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
112 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
113 " arg9=%lx"
114 " out1=%lx out2=%lx out3=%lx out4=%lx"
115 " out5=%lx out6=%lx out7=%lx out8=%lx"
116 " out9=%lx",
117 opcode, ret,
118 arg1, arg2, arg3, arg4, arg5,
119 arg6, arg7, arg8, arg9,
120 outs[0], outs[1], outs[2], outs[3],
121 outs[4], outs[5], outs[6], outs[7],
122 outs[8]);
123
124 return ret;
94 } 125 }
95 126
96 return ehea_hcall_9arg_9ret(H_QUERY_HEA_QP, 127 return H_BUSY;
97 adapter_handle, /* R4 */ 128}
98 qp_category, /* R5 */ 129
99 qp_handle, /* R6 */ 130u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
100 sel_mask, /* R7 */ 131 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
101 virt_to_abs(cb_addr), /* R8 */ 132{
102 0, 0, 0, 0, /* R9-R12 */ 133 return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
103 &dummy, /* R4 */ 134 adapter_handle, /* R4 */
104 &dummy, /* R5 */ 135 qp_category, /* R5 */
105 &dummy, /* R6 */ 136 qp_handle, /* R6 */
106 &dummy, /* R7 */ 137 sel_mask, /* R7 */
107 &dummy, /* R8 */ 138 virt_to_abs(cb_addr), /* R8 */
108 &dummy, /* R9 */ 139 0, 0);
109 &dummy, /* R10 */
110 &dummy, /* R11 */
111 &dummy); /* R12 */
112} 140}
113 141
114/* input param R5 */ 142/* input param R5 */
@@ -180,6 +208,7 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
180 u64 *qp_handle, struct h_epas *h_epas) 208 u64 *qp_handle, struct h_epas *h_epas)
181{ 209{
182 u64 hret; 210 u64 hret;
211 u64 outs[PLPAR_HCALL9_BUFSIZE];
183 212
184 u64 allocate_controls = 213 u64 allocate_controls =
185 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0) 214 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
@@ -219,45 +248,29 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
219 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold) 248 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
220 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold); 249 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
221 250
222 u64 r5_out = 0; 251 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
223 u64 r6_out = 0; 252 outs,
224 u64 r7_out = 0; 253 adapter_handle, /* R4 */
225 u64 r8_out = 0; 254 allocate_controls, /* R5 */
226 u64 r9_out = 0; 255 init_attr->send_cq_handle, /* R6 */
227 u64 g_la_user_out = 0; 256 init_attr->recv_cq_handle, /* R7 */
228 u64 r11_out = 0; 257 init_attr->aff_eq_handle, /* R8 */
229 u64 r12_out = 0; 258 r9_reg, /* R9 */
230 259 max_r10_reg, /* R10 */
231 hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 260 r11_in, /* R11 */
232 adapter_handle, /* R4 */ 261 threshold); /* R12 */
233 allocate_controls, /* R5 */ 262
234 init_attr->send_cq_handle, /* R6 */ 263 *qp_handle = outs[0];
235 init_attr->recv_cq_handle, /* R7 */ 264 init_attr->qp_nr = (u32)outs[1];
236 init_attr->aff_eq_handle, /* R8 */
237 r9_reg, /* R9 */
238 max_r10_reg, /* R10 */
239 r11_in, /* R11 */
240 threshold, /* R12 */
241 qp_handle, /* R4 */
242 &r5_out, /* R5 */
243 &r6_out, /* R6 */
244 &r7_out, /* R7 */
245 &r8_out, /* R8 */
246 &r9_out, /* R9 */
247 &g_la_user_out, /* R10 */
248 &r11_out, /* R11 */
249 &r12_out); /* R12 */
250
251 init_attr->qp_nr = (u32)r5_out;
252 265
253 init_attr->act_nr_send_wqes = 266 init_attr->act_nr_send_wqes =
254 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, r6_out); 267 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
255 init_attr->act_nr_rwqes_rq1 = 268 init_attr->act_nr_rwqes_rq1 =
256 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, r6_out); 269 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
257 init_attr->act_nr_rwqes_rq2 = 270 init_attr->act_nr_rwqes_rq2 =
258 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, r6_out); 271 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
259 init_attr->act_nr_rwqes_rq3 = 272 init_attr->act_nr_rwqes_rq3 =
260 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, r6_out); 273 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
261 274
262 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq; 275 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
263 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1; 276 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
@@ -265,25 +278,25 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
265 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3; 278 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
266 279
267 init_attr->nr_sq_pages = 280 init_attr->nr_sq_pages =
268 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, r8_out); 281 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
269 init_attr->nr_rq1_pages = 282 init_attr->nr_rq1_pages =
270 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, r8_out); 283 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
271 init_attr->nr_rq2_pages = 284 init_attr->nr_rq2_pages =
272 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, r9_out); 285 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
273 init_attr->nr_rq3_pages = 286 init_attr->nr_rq3_pages =
274 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, r9_out); 287 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
275 288
276 init_attr->liobn_sq = 289 init_attr->liobn_sq =
277 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, r11_out); 290 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
278 init_attr->liobn_rq1 = 291 init_attr->liobn_rq1 =
279 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, r11_out); 292 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
280 init_attr->liobn_rq2 = 293 init_attr->liobn_rq2 =
281 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, r12_out); 294 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
282 init_attr->liobn_rq3 = 295 init_attr->liobn_rq3 =
283 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, r12_out); 296 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
284 297
285 if (!hret) 298 if (!hret)
286 hcp_epas_ctor(h_epas, g_la_user_out, g_la_user_out); 299 hcp_epas_ctor(h_epas, outs[6], outs[6]);
287 300
288 return hret; 301 return hret;
289} 302}
@@ -292,31 +305,24 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
292 struct ehea_cq_attr *cq_attr, 305 struct ehea_cq_attr *cq_attr,
293 u64 *cq_handle, struct h_epas *epas) 306 u64 *cq_handle, struct h_epas *epas)
294{ 307{
295 u64 hret, dummy, act_nr_of_cqes_out, act_pages_out; 308 u64 hret;
296 u64 g_la_privileged_out, g_la_user_out; 309 u64 outs[PLPAR_HCALL9_BUFSIZE];
297 310
298 hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 311 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
299 adapter_handle, /* R4 */ 312 outs,
300 H_ALL_RES_TYPE_CQ, /* R5 */ 313 adapter_handle, /* R4 */
301 cq_attr->eq_handle, /* R6 */ 314 H_ALL_RES_TYPE_CQ, /* R5 */
302 cq_attr->cq_token, /* R7 */ 315 cq_attr->eq_handle, /* R6 */
303 cq_attr->max_nr_of_cqes, /* R8 */ 316 cq_attr->cq_token, /* R7 */
304 0, 0, 0, 0, /* R9-R12 */ 317 cq_attr->max_nr_of_cqes, /* R8 */
305 cq_handle, /* R4 */ 318 0, 0, 0, 0); /* R9-R12 */
306 &dummy, /* R5 */ 319
307 &dummy, /* R6 */ 320 *cq_handle = outs[0];
308 &act_nr_of_cqes_out, /* R7 */ 321 cq_attr->act_nr_of_cqes = outs[3];
309 &act_pages_out, /* R8 */ 322 cq_attr->nr_pages = outs[4];
310 &g_la_privileged_out, /* R9 */
311 &g_la_user_out, /* R10 */
312 &dummy, /* R11 */
313 &dummy); /* R12 */
314
315 cq_attr->act_nr_of_cqes = act_nr_of_cqes_out;
316 cq_attr->nr_pages = act_pages_out;
317 323
318 if (!hret) 324 if (!hret)
319 hcp_epas_ctor(epas, g_la_privileged_out, g_la_user_out); 325 hcp_epas_ctor(epas, outs[5], outs[6]);
320 326
321 return hret; 327 return hret;
322} 328}
@@ -361,9 +367,8 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
361u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, 367u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
362 struct ehea_eq_attr *eq_attr, u64 *eq_handle) 368 struct ehea_eq_attr *eq_attr, u64 *eq_handle)
363{ 369{
364 u64 hret, dummy, eq_liobn, allocate_controls; 370 u64 hret, allocate_controls;
365 u64 ist1_out, ist2_out, ist3_out, ist4_out; 371 u64 outs[PLPAR_HCALL9_BUFSIZE];
366 u64 act_nr_of_eqes_out, act_pages_out;
367 372
368 /* resource type */ 373 /* resource type */
369 allocate_controls = 374 allocate_controls =
@@ -372,27 +377,20 @@ u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
372 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen) 377 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
373 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1); 378 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
374 379
375 hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 380 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
376 adapter_handle, /* R4 */ 381 outs,
377 allocate_controls, /* R5 */ 382 adapter_handle, /* R4 */
378 eq_attr->max_nr_of_eqes, /* R6 */ 383 allocate_controls, /* R5 */
379 0, 0, 0, 0, 0, 0, /* R7-R10 */ 384 eq_attr->max_nr_of_eqes, /* R6 */
380 eq_handle, /* R4 */ 385 0, 0, 0, 0, 0, 0); /* R7-R10 */
381 &dummy, /* R5 */ 386
382 &eq_liobn, /* R6 */ 387 *eq_handle = outs[0];
383 &act_nr_of_eqes_out, /* R7 */ 388 eq_attr->act_nr_of_eqes = outs[3];
384 &act_pages_out, /* R8 */ 389 eq_attr->nr_pages = outs[4];
385 &ist1_out, /* R9 */ 390 eq_attr->ist1 = outs[5];
386 &ist2_out, /* R10 */ 391 eq_attr->ist2 = outs[6];
387 &ist3_out, /* R11 */ 392 eq_attr->ist3 = outs[7];
388 &ist4_out); /* R12 */ 393 eq_attr->ist4 = outs[8];
389
390 eq_attr->act_nr_of_eqes = act_nr_of_eqes_out;
391 eq_attr->nr_pages = act_pages_out;
392 eq_attr->ist1 = ist1_out;
393 eq_attr->ist2 = ist2_out;
394 eq_attr->ist3 = ist3_out;
395 eq_attr->ist4 = ist4_out;
396 394
397 return hret; 395 return hret;
398} 396}
@@ -402,31 +400,22 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
402 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask, 400 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
403 u16 *out_swr, u16 *out_rwr) 401 u16 *out_swr, u16 *out_rwr)
404{ 402{
405 u64 hret, dummy, act_out_swr, act_out_rwr; 403 u64 hret;
406 404 u64 outs[PLPAR_HCALL9_BUFSIZE];
407 if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) { 405
408 ehea_error("not on page boundary"); 406 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
409 return H_PARAMETER; 407 outs,
410 } 408 adapter_handle, /* R4 */
411 409 (u64) cat, /* R5 */
412 hret = ehea_hcall_9arg_9ret(H_MODIFY_HEA_QP, 410 qp_handle, /* R6 */
413 adapter_handle, /* R4 */ 411 sel_mask, /* R7 */
414 (u64) cat, /* R5 */ 412 virt_to_abs(cb_addr), /* R8 */
415 qp_handle, /* R6 */ 413 0, 0, 0, 0); /* R9-R12 */
416 sel_mask, /* R7 */ 414
417 virt_to_abs(cb_addr), /* R8 */ 415 *inv_attr_id = outs[0];
418 0, 0, 0, 0, /* R9-R12 */ 416 *out_swr = outs[3];
419 inv_attr_id, /* R4 */ 417 *out_rwr = outs[4];
420 &dummy, /* R5 */ 418 *proc_mask = outs[5];
421 &dummy, /* R6 */
422 &act_out_swr, /* R7 */
423 &act_out_rwr, /* R8 */
424 proc_mask, /* R9 */
425 &dummy, /* R10 */
426 &dummy, /* R11 */
427 &dummy); /* R12 */
428 *out_swr = act_out_swr;
429 *out_rwr = act_out_rwr;
430 419
431 return hret; 420 return hret;
432} 421}
@@ -435,122 +424,81 @@ u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
435 const u8 queue_type, const u64 resource_handle, 424 const u8 queue_type, const u64 resource_handle,
436 const u64 log_pageaddr, u64 count) 425 const u64 log_pageaddr, u64 count)
437{ 426{
438 u64 dummy, reg_control; 427 u64 reg_control;
439 428
440 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize) 429 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
441 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type); 430 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
442 431
443 return ehea_hcall_9arg_9ret(H_REGISTER_HEA_RPAGES, 432 return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
444 adapter_handle, /* R4 */ 433 adapter_handle, /* R4 */
445 reg_control, /* R5 */ 434 reg_control, /* R5 */
446 resource_handle, /* R6 */ 435 resource_handle, /* R6 */
447 log_pageaddr, /* R7 */ 436 log_pageaddr, /* R7 */
448 count, /* R8 */ 437 count, /* R8 */
449 0, 0, 0, 0, /* R9-R12 */ 438 0, 0); /* R9-R10 */
450 &dummy, /* R4 */
451 &dummy, /* R5 */
452 &dummy, /* R6 */
453 &dummy, /* R7 */
454 &dummy, /* R8 */
455 &dummy, /* R9 */
456 &dummy, /* R10 */
457 &dummy, /* R11 */
458 &dummy); /* R12 */
459} 439}
460 440
461u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, 441u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
462 const u64 vaddr_in, const u32 access_ctrl, const u32 pd, 442 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
463 struct ehea_mr *mr) 443 struct ehea_mr *mr)
464{ 444{
465 u64 hret, dummy, lkey_out; 445 u64 hret;
466 446 u64 outs[PLPAR_HCALL9_BUFSIZE];
467 hret = ehea_hcall_9arg_9ret(H_REGISTER_SMR, 447
468 adapter_handle , /* R4 */ 448 hret = ehea_plpar_hcall9(H_REGISTER_SMR,
469 orig_mr_handle, /* R5 */ 449 outs,
470 vaddr_in, /* R6 */ 450 adapter_handle , /* R4 */
471 (((u64)access_ctrl) << 32ULL), /* R7 */ 451 orig_mr_handle, /* R5 */
472 pd, /* R8 */ 452 vaddr_in, /* R6 */
473 0, 0, 0, 0, /* R9-R12 */ 453 (((u64)access_ctrl) << 32ULL), /* R7 */
474 &mr->handle, /* R4 */ 454 pd, /* R8 */
475 &dummy, /* R5 */ 455 0, 0, 0, 0); /* R9-R12 */
476 &lkey_out, /* R6 */ 456
477 &dummy, /* R7 */ 457 mr->handle = outs[0];
478 &dummy, /* R8 */ 458 mr->lkey = (u32)outs[2];
479 &dummy, /* R9 */
480 &dummy, /* R10 */
481 &dummy, /* R11 */
482 &dummy); /* R12 */
483 mr->lkey = (u32)lkey_out;
484 459
485 return hret; 460 return hret;
486} 461}
487 462
488u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) 463u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
489{ 464{
490 u64 hret, dummy, ladr_next_sq_wqe_out; 465 u64 outs[PLPAR_HCALL9_BUFSIZE];
491 u64 ladr_next_rq1_wqe_out, ladr_next_rq2_wqe_out, ladr_next_rq3_wqe_out; 466
492 467 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
493 hret = ehea_hcall_9arg_9ret(H_DISABLE_AND_GET_HEA, 468 outs,
494 adapter_handle, /* R4 */ 469 adapter_handle, /* R4 */
495 H_DISABLE_GET_EHEA_WQE_P, /* R5 */ 470 H_DISABLE_GET_EHEA_WQE_P, /* R5 */
496 qp_handle, /* R6 */ 471 qp_handle, /* R6 */
497 0, 0, 0, 0, 0, 0, /* R7-R12 */ 472 0, 0, 0, 0, 0, 0); /* R7-R12 */
498 &ladr_next_sq_wqe_out, /* R4 */
499 &ladr_next_rq1_wqe_out, /* R5 */
500 &ladr_next_rq2_wqe_out, /* R6 */
501 &ladr_next_rq3_wqe_out, /* R7 */
502 &dummy, /* R8 */
503 &dummy, /* R9 */
504 &dummy, /* R10 */
505 &dummy, /* R11 */
506 &dummy); /* R12 */
507 return hret;
508} 473}
509 474
510u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle) 475u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle)
511{ 476{
512 u64 dummy; 477 return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
513 478 adapter_handle, /* R4 */
514 return ehea_hcall_9arg_9ret(H_FREE_RESOURCE, 479 res_handle, /* R5 */
515 adapter_handle, /* R4 */ 480 0, 0, 0, 0, 0); /* R6-R10 */
516 res_handle, /* R5 */
517 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */
518 &dummy, /* R4 */
519 &dummy, /* R5 */
520 &dummy, /* R6 */
521 &dummy, /* R7 */
522 &dummy, /* R8 */
523 &dummy, /* R9 */
524 &dummy, /* R10 */
525 &dummy, /* R11 */
526 &dummy); /* R12 */
527} 481}
528 482
529u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, 483u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
530 const u64 length, const u32 access_ctrl, 484 const u64 length, const u32 access_ctrl,
531 const u32 pd, u64 *mr_handle, u32 *lkey) 485 const u32 pd, u64 *mr_handle, u32 *lkey)
532{ 486{
533 u64 hret, dummy, lkey_out; 487 u64 hret;
534 488 u64 outs[PLPAR_HCALL9_BUFSIZE];
535 hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, 489
536 adapter_handle, /* R4 */ 490 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
537 5, /* R5 */ 491 outs,
538 vaddr, /* R6 */ 492 adapter_handle, /* R4 */
539 length, /* R7 */ 493 5, /* R5 */
540 (((u64) access_ctrl) << 32ULL),/* R8 */ 494 vaddr, /* R6 */
541 pd, /* R9 */ 495 length, /* R7 */
542 0, 0, 0, /* R10-R12 */ 496 (((u64) access_ctrl) << 32ULL), /* R8 */
543 mr_handle, /* R4 */ 497 pd, /* R9 */
544 &dummy, /* R5 */ 498 0, 0, 0); /* R10-R12 */
545 &lkey_out, /* R6 */ 499
546 &dummy, /* R7 */ 500 *mr_handle = outs[0];
547 &dummy, /* R8 */ 501 *lkey = (u32)outs[2];
548 &dummy, /* R9 */
549 &dummy, /* R10 */
550 &dummy, /* R11 */
551 &dummy); /* R12 */
552 *lkey = (u32) lkey_out;
553
554 return hret; 502 return hret;
555} 503}
556 504
@@ -570,23 +518,14 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
570 518
571u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr) 519u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
572{ 520{
573 u64 hret, dummy, cb_logaddr; 521 u64 hret, cb_logaddr;
574 522
575 cb_logaddr = virt_to_abs(cb_addr); 523 cb_logaddr = virt_to_abs(cb_addr);
576 524
577 hret = ehea_hcall_9arg_9ret(H_QUERY_HEA, 525 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
578 adapter_handle, /* R4 */ 526 adapter_handle, /* R4 */
579 cb_logaddr, /* R5 */ 527 cb_logaddr, /* R5 */
580 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */ 528 0, 0, 0, 0, 0); /* R6-R10 */
581 &dummy, /* R4 */
582 &dummy, /* R5 */
583 &dummy, /* R6 */
584 &dummy, /* R7 */
585 &dummy, /* R8 */
586 &dummy, /* R9 */
587 &dummy, /* R10 */
588 &dummy, /* R11 */
589 &dummy); /* R12 */
590#ifdef DEBUG 529#ifdef DEBUG
591 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 530 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
592#endif 531#endif
@@ -597,36 +536,28 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
597 const u8 cb_cat, const u64 select_mask, 536 const u8 cb_cat, const u64 select_mask,
598 void *cb_addr) 537 void *cb_addr)
599{ 538{
600 u64 port_info, dummy; 539 u64 port_info;
601 u64 cb_logaddr = virt_to_abs(cb_addr); 540 u64 cb_logaddr = virt_to_abs(cb_addr);
602 u64 arr_index = 0; 541 u64 arr_index = 0;
603 542
604 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) 543 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
605 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); 544 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
606 545
607 return ehea_hcall_9arg_9ret(H_QUERY_HEA_PORT, 546 return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
608 adapter_handle, /* R4 */ 547 adapter_handle, /* R4 */
609 port_info, /* R5 */ 548 port_info, /* R5 */
610 select_mask, /* R6 */ 549 select_mask, /* R6 */
611 arr_index, /* R7 */ 550 arr_index, /* R7 */
612 cb_logaddr, /* R8 */ 551 cb_logaddr, /* R8 */
613 0, 0, 0, 0, /* R9-R12 */ 552 0, 0); /* R9-R10 */
614 &dummy, /* R4 */
615 &dummy, /* R5 */
616 &dummy, /* R6 */
617 &dummy, /* R7 */
618 &dummy, /* R8 */
619 &dummy, /* R9 */
620 &dummy, /* R10 */
621 &dummy, /* R11 */
622 &dummy); /* R12 */
623} 553}
624 554
625u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, 555u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
626 const u8 cb_cat, const u64 select_mask, 556 const u8 cb_cat, const u64 select_mask,
627 void *cb_addr) 557 void *cb_addr)
628{ 558{
629 u64 port_info, dummy, inv_attr_ident, proc_mask; 559 u64 outs[PLPAR_HCALL9_BUFSIZE];
560 u64 port_info;
630 u64 arr_index = 0; 561 u64 arr_index = 0;
631 u64 cb_logaddr = virt_to_abs(cb_addr); 562 u64 cb_logaddr = virt_to_abs(cb_addr);
632 563
@@ -635,29 +566,21 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
635#ifdef DEBUG 566#ifdef DEBUG
636 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL"); 567 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
637#endif 568#endif
638 return ehea_hcall_9arg_9ret(H_MODIFY_HEA_PORT, 569 return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
639 adapter_handle, /* R4 */ 570 outs,
640 port_info, /* R5 */ 571 adapter_handle, /* R4 */
641 select_mask, /* R6 */ 572 port_info, /* R5 */
642 arr_index, /* R7 */ 573 select_mask, /* R6 */
643 cb_logaddr, /* R8 */ 574 arr_index, /* R7 */
644 0, 0, 0, 0, /* R9-R12 */ 575 cb_logaddr, /* R8 */
645 &inv_attr_ident, /* R4 */ 576 0, 0, 0, 0); /* R9-R12 */
646 &proc_mask, /* R5 */
647 &dummy, /* R6 */
648 &dummy, /* R7 */
649 &dummy, /* R8 */
650 &dummy, /* R9 */
651 &dummy, /* R10 */
652 &dummy, /* R11 */
653 &dummy); /* R12 */
654} 577}
655 578
656u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num, 579u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
657 const u8 reg_type, const u64 mc_mac_addr, 580 const u8 reg_type, const u64 mc_mac_addr,
658 const u16 vlan_id, const u32 hcall_id) 581 const u16 vlan_id, const u32 hcall_id)
659{ 582{
660 u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id, dummy; 583 u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
661 u64 mac_addr = mc_mac_addr >> 16; 584 u64 mac_addr = mc_mac_addr >> 16;
662 585
663 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num); 586 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
@@ -665,41 +588,21 @@ u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
665 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr); 588 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
666 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id); 589 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
667 590
668 return ehea_hcall_9arg_9ret(hcall_id, 591 return ehea_plpar_hcall_norets(hcall_id,
669 adapter_handle, /* R4 */ 592 adapter_handle, /* R4 */
670 r5_port_num, /* R5 */ 593 r5_port_num, /* R5 */
671 r6_reg_type, /* R6 */ 594 r6_reg_type, /* R6 */
672 r7_mc_mac_addr, /* R7 */ 595 r7_mc_mac_addr, /* R7 */
673 r8_vlan_id, /* R8 */ 596 r8_vlan_id, /* R8 */
674 0, 0, 0, 0, /* R9-R12 */ 597 0, 0); /* R9-R12 */
675 &dummy, /* R4 */
676 &dummy, /* R5 */
677 &dummy, /* R6 */
678 &dummy, /* R7 */
679 &dummy, /* R8 */
680 &dummy, /* R9 */
681 &dummy, /* R10 */
682 &dummy, /* R11 */
683 &dummy); /* R12 */
684} 598}
685 599
686u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle, 600u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
687 const u64 event_mask) 601 const u64 event_mask)
688{ 602{
689 u64 dummy; 603 return ehea_plpar_hcall_norets(H_RESET_EVENTS,
690 604 adapter_handle, /* R4 */
691 return ehea_hcall_9arg_9ret(H_RESET_EVENTS, 605 neq_handle, /* R5 */
692 adapter_handle, /* R4 */ 606 event_mask, /* R6 */
693 neq_handle, /* R5 */ 607 0, 0, 0, 0); /* R7-R12 */
694 event_mask, /* R6 */
695 0, 0, 0, 0, 0, 0, /* R7-R12 */
696 &dummy, /* R4 */
697 &dummy, /* R5 */
698 &dummy, /* R6 */
699 &dummy, /* R7 */
700 &dummy, /* R8 */
701 &dummy, /* R9 */
702 &dummy, /* R10 */
703 &dummy, /* R11 */
704 &dummy); /* R12 */
705} 608}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 99b7a411db28..c5ed635bce36 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2497,6 +2497,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2497 u8 __iomem *base = get_hwbase(dev); 2497 u8 __iomem *base = get_hwbase(dev);
2498 u32 events; 2498 u32 events;
2499 int i; 2499 int i;
2500 unsigned long flags;
2500 2501
2501 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 2502 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
2502 2503
@@ -2508,16 +2509,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2508 if (!(events & np->irqmask)) 2509 if (!(events & np->irqmask))
2509 break; 2510 break;
2510 2511
2511 spin_lock_irq(&np->lock); 2512 spin_lock_irqsave(&np->lock, flags);
2512 nv_tx_done(dev); 2513 nv_tx_done(dev);
2513 spin_unlock_irq(&np->lock); 2514 spin_unlock_irqrestore(&np->lock, flags);
2514 2515
2515 if (events & (NVREG_IRQ_TX_ERR)) { 2516 if (events & (NVREG_IRQ_TX_ERR)) {
2516 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2517 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2517 dev->name, events); 2518 dev->name, events);
2518 } 2519 }
2519 if (i > max_interrupt_work) { 2520 if (i > max_interrupt_work) {
2520 spin_lock_irq(&np->lock); 2521 spin_lock_irqsave(&np->lock, flags);
2521 /* disable interrupts on the nic */ 2522 /* disable interrupts on the nic */
2522 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2523 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2523 pci_push(base); 2524 pci_push(base);
@@ -2527,7 +2528,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2527 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2528 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2528 } 2529 }
2529 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2530 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2530 spin_unlock_irq(&np->lock); 2531 spin_unlock_irqrestore(&np->lock, flags);
2531 break; 2532 break;
2532 } 2533 }
2533 2534
@@ -2601,6 +2602,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2601 u8 __iomem *base = get_hwbase(dev); 2602 u8 __iomem *base = get_hwbase(dev);
2602 u32 events; 2603 u32 events;
2603 int i; 2604 int i;
2605 unsigned long flags;
2604 2606
2605 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 2607 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
2606 2608
@@ -2614,14 +2616,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2614 2616
2615 nv_rx_process(dev, dev->weight); 2617 nv_rx_process(dev, dev->weight);
2616 if (nv_alloc_rx(dev)) { 2618 if (nv_alloc_rx(dev)) {
2617 spin_lock_irq(&np->lock); 2619 spin_lock_irqsave(&np->lock, flags);
2618 if (!np->in_shutdown) 2620 if (!np->in_shutdown)
2619 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2621 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2620 spin_unlock_irq(&np->lock); 2622 spin_unlock_irqrestore(&np->lock, flags);
2621 } 2623 }
2622 2624
2623 if (i > max_interrupt_work) { 2625 if (i > max_interrupt_work) {
2624 spin_lock_irq(&np->lock); 2626 spin_lock_irqsave(&np->lock, flags);
2625 /* disable interrupts on the nic */ 2627 /* disable interrupts on the nic */
2626 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2628 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2627 pci_push(base); 2629 pci_push(base);
@@ -2631,7 +2633,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2631 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2633 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2632 } 2634 }
2633 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2635 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2634 spin_unlock_irq(&np->lock); 2636 spin_unlock_irqrestore(&np->lock, flags);
2635 break; 2637 break;
2636 } 2638 }
2637 } 2639 }
@@ -2648,6 +2650,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2648 u8 __iomem *base = get_hwbase(dev); 2650 u8 __iomem *base = get_hwbase(dev);
2649 u32 events; 2651 u32 events;
2650 int i; 2652 int i;
2653 unsigned long flags;
2651 2654
2652 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 2655 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
2653 2656
@@ -2660,14 +2663,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2660 break; 2663 break;
2661 2664
2662 if (events & NVREG_IRQ_LINK) { 2665 if (events & NVREG_IRQ_LINK) {
2663 spin_lock_irq(&np->lock); 2666 spin_lock_irqsave(&np->lock, flags);
2664 nv_link_irq(dev); 2667 nv_link_irq(dev);
2665 spin_unlock_irq(&np->lock); 2668 spin_unlock_irqrestore(&np->lock, flags);
2666 } 2669 }
2667 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2670 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2668 spin_lock_irq(&np->lock); 2671 spin_lock_irqsave(&np->lock, flags);
2669 nv_linkchange(dev); 2672 nv_linkchange(dev);
2670 spin_unlock_irq(&np->lock); 2673 spin_unlock_irqrestore(&np->lock, flags);
2671 np->link_timeout = jiffies + LINK_TIMEOUT; 2674 np->link_timeout = jiffies + LINK_TIMEOUT;
2672 } 2675 }
2673 if (events & (NVREG_IRQ_UNKNOWN)) { 2676 if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2675,7 +2678,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2675 dev->name, events); 2678 dev->name, events);
2676 } 2679 }
2677 if (i > max_interrupt_work) { 2680 if (i > max_interrupt_work) {
2678 spin_lock_irq(&np->lock); 2681 spin_lock_irqsave(&np->lock, flags);
2679 /* disable interrupts on the nic */ 2682 /* disable interrupts on the nic */
2680 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2683 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2681 pci_push(base); 2684 pci_push(base);
@@ -2685,7 +2688,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2685 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2688 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2686 } 2689 }
2687 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2690 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2688 spin_unlock_irq(&np->lock); 2691 spin_unlock_irqrestore(&np->lock, flags);
2689 break; 2692 break;
2690 } 2693 }
2691 2694
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 4bac3cd8f235..2802db23d3cb 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -213,6 +213,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
213 } 213 }
214 214
215 free_index = pool->consumer_index++ % pool->size; 215 free_index = pool->consumer_index++ % pool->size;
216 pool->consumer_index = free_index;
216 index = pool->free_map[free_index]; 217 index = pool->free_map[free_index];
217 218
218 ibmveth_assert(index != IBM_VETH_INVALID_MAP); 219 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
@@ -238,7 +239,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
238 if(lpar_rc != H_SUCCESS) { 239 if(lpar_rc != H_SUCCESS) {
239 pool->free_map[free_index] = index; 240 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL; 241 pool->skbuff[index] = NULL;
241 pool->consumer_index--; 242 if (pool->consumer_index == 0)
243 pool->consumer_index = pool->size - 1;
244 else
245 pool->consumer_index--;
242 dma_unmap_single(&adapter->vdev->dev, 246 dma_unmap_single(&adapter->vdev->dev,
243 pool->dma_addr[index], pool->buff_size, 247 pool->dma_addr[index], pool->buff_size,
244 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
@@ -326,6 +330,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
326 DMA_FROM_DEVICE); 330 DMA_FROM_DEVICE);
327 331
328 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; 332 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
333 adapter->rx_buff_pool[pool].producer_index = free_index;
329 adapter->rx_buff_pool[pool].free_map[free_index] = index; 334 adapter->rx_buff_pool[pool].free_map[free_index] = index;
330 335
331 mb(); 336 mb();
@@ -437,6 +442,31 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
437 &adapter->rx_buff_pool[i]); 442 &adapter->rx_buff_pool[i]);
438} 443}
439 444
445static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
446 union ibmveth_buf_desc rxq_desc, u64 mac_address)
447{
448 int rc, try_again = 1;
449
450 /* After a kexec the adapter will still be open, so our attempt to
451 * open it will fail. So if we get a failure we free the adapter and
452 * try again, but only once. */
453retry:
454 rc = h_register_logical_lan(adapter->vdev->unit_address,
455 adapter->buffer_list_dma, rxq_desc.desc,
456 adapter->filter_list_dma, mac_address);
457
458 if (rc != H_SUCCESS && try_again) {
459 do {
460 rc = h_free_logical_lan(adapter->vdev->unit_address);
461 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
462
463 try_again = 0;
464 goto retry;
465 }
466
467 return rc;
468}
469
440static int ibmveth_open(struct net_device *netdev) 470static int ibmveth_open(struct net_device *netdev)
441{ 471{
442 struct ibmveth_adapter *adapter = netdev->priv; 472 struct ibmveth_adapter *adapter = netdev->priv;
@@ -502,12 +532,9 @@ static int ibmveth_open(struct net_device *netdev)
502 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 532 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
503 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 533 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
504 534
535 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
505 536
506 lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, 537 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
507 adapter->buffer_list_dma,
508 rxq_desc.desc,
509 adapter->filter_list_dma,
510 mac_address);
511 538
512 if(lpar_rc != H_SUCCESS) { 539 if(lpar_rc != H_SUCCESS) {
513 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 540 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
@@ -905,6 +932,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
905 return -EINVAL; 932 return -EINVAL;
906} 933}
907 934
935#ifdef CONFIG_NET_POLL_CONTROLLER
936static void ibmveth_poll_controller(struct net_device *dev)
937{
938 ibmveth_replenish_task(dev->priv);
939 ibmveth_interrupt(dev->irq, dev);
940}
941#endif
942
908static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 943static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
909{ 944{
910 int rc, i; 945 int rc, i;
@@ -977,6 +1012,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
977 netdev->ethtool_ops = &netdev_ethtool_ops; 1012 netdev->ethtool_ops = &netdev_ethtool_ops;
978 netdev->change_mtu = ibmveth_change_mtu; 1013 netdev->change_mtu = ibmveth_change_mtu;
979 SET_NETDEV_DEV(netdev, &dev->dev); 1014 SET_NETDEV_DEV(netdev, &dev->dev);
1015#ifdef CONFIG_NET_POLL_CONTROLLER
1016 netdev->poll_controller = ibmveth_poll_controller;
1017#endif
980 netdev->features |= NETIF_F_LLTX; 1018 netdev->features |= NETIF_F_LLTX;
981 spin_lock_init(&adapter->stats_lock); 1019 spin_lock_init(&adapter->stats_lock);
982 1020
@@ -1132,7 +1170,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1132{ 1170{
1133 struct proc_dir_entry *entry; 1171 struct proc_dir_entry *entry;
1134 if (ibmveth_proc_dir) { 1172 if (ibmveth_proc_dir) {
1135 entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir); 1173 char u_addr[10];
1174 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1175 entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
1136 if (!entry) { 1176 if (!entry) {
1137 ibmveth_error_printk("Cannot create adapter proc entry"); 1177 ibmveth_error_printk("Cannot create adapter proc entry");
1138 } else { 1178 } else {
@@ -1147,7 +1187,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1147static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1187static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1148{ 1188{
1149 if (ibmveth_proc_dir) { 1189 if (ibmveth_proc_dir) {
1150 remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir); 1190 char u_addr[10];
1191 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1192 remove_proc_entry(u_addr, ibmveth_proc_dir);
1151 } 1193 }
1152} 1194}
1153 1195
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 2ffa3a59e704..9997081c6dae 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2155,7 +2155,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
2155 for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; 2155 for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
2156 offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; 2156 offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
2157 offset += 4) 2157 offset += 4)
2158 *(u32 *)((char *)p + offset) = read_mib(mp, offset); 2158 *(u32 *)((char *)p + offset) += read_mib(mp, offset);
2159 2159
2160 p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); 2160 p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
2161 p->good_octets_sent += 2161 p->good_octets_sent +=
@@ -2164,7 +2164,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
2164 for (offset = ETH_MIB_GOOD_FRAMES_SENT; 2164 for (offset = ETH_MIB_GOOD_FRAMES_SENT;
2165 offset <= ETH_MIB_LATE_COLLISION; 2165 offset <= ETH_MIB_LATE_COLLISION;
2166 offset += 4) 2166 offset += 4)
2167 *(u32 *)((char *)p + offset) = read_mib(mp, offset); 2167 *(u32 *)((char *)p + offset) += read_mib(mp, offset);
2168} 2168}
2169 2169
2170/* 2170/*
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a4a58e4e93a1..e7e414928f89 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,7 +43,7 @@
43#include "skge.h" 43#include "skge.h"
44 44
45#define DRV_NAME "skge" 45#define DRV_NAME "skge"
46#define DRV_VERSION "1.8" 46#define DRV_VERSION "1.9"
47#define PFX DRV_NAME " " 47#define PFX DRV_NAME " "
48 48
49#define DEFAULT_TX_RING_SIZE 128 49#define DEFAULT_TX_RING_SIZE 128
@@ -197,8 +197,8 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
197 else if (hw->chip_id == CHIP_ID_YUKON) 197 else if (hw->chip_id == CHIP_ID_YUKON)
198 supported &= ~SUPPORTED_1000baseT_Half; 198 supported &= ~SUPPORTED_1000baseT_Half;
199 } else 199 } else
200 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE 200 supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
201 | SUPPORTED_Autoneg; 201 | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
202 202
203 return supported; 203 return supported;
204} 204}
@@ -487,31 +487,37 @@ static void skge_get_pauseparam(struct net_device *dev,
487{ 487{
488 struct skge_port *skge = netdev_priv(dev); 488 struct skge_port *skge = netdev_priv(dev);
489 489
490 ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND) 490 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC)
491 || (skge->flow_control == FLOW_MODE_SYMMETRIC); 491 || (skge->flow_control == FLOW_MODE_SYM_OR_REM);
492 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND) 492 ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND);
493 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
494 493
495 ecmd->autoneg = skge->autoneg; 494 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
496} 495}
497 496
498static int skge_set_pauseparam(struct net_device *dev, 497static int skge_set_pauseparam(struct net_device *dev,
499 struct ethtool_pauseparam *ecmd) 498 struct ethtool_pauseparam *ecmd)
500{ 499{
501 struct skge_port *skge = netdev_priv(dev); 500 struct skge_port *skge = netdev_priv(dev);
501 struct ethtool_pauseparam old;
502 502
503 skge->autoneg = ecmd->autoneg; 503 skge_get_pauseparam(dev, &old);
504 if (ecmd->rx_pause && ecmd->tx_pause) 504
505 skge->flow_control = FLOW_MODE_SYMMETRIC; 505 if (ecmd->autoneg != old.autoneg)
506 else if (ecmd->rx_pause && !ecmd->tx_pause) 506 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
507 skge->flow_control = FLOW_MODE_REM_SEND; 507 else {
508 else if (!ecmd->rx_pause && ecmd->tx_pause) 508 if (ecmd->rx_pause && ecmd->tx_pause)
509 skge->flow_control = FLOW_MODE_LOC_SEND; 509 skge->flow_control = FLOW_MODE_SYMMETRIC;
510 else 510 else if (ecmd->rx_pause && !ecmd->tx_pause)
511 skge->flow_control = FLOW_MODE_NONE; 511 skge->flow_control = FLOW_MODE_SYM_OR_REM;
512 else if (!ecmd->rx_pause && ecmd->tx_pause)
513 skge->flow_control = FLOW_MODE_LOC_SEND;
514 else
515 skge->flow_control = FLOW_MODE_NONE;
516 }
512 517
513 if (netif_running(dev)) 518 if (netif_running(dev))
514 skge_phy_reset(skge); 519 skge_phy_reset(skge);
520
515 return 0; 521 return 0;
516} 522}
517 523
@@ -854,6 +860,23 @@ static int skge_rx_fill(struct net_device *dev)
854 return 0; 860 return 0;
855} 861}
856 862
863static const char *skge_pause(enum pause_status status)
864{
865 switch(status) {
866 case FLOW_STAT_NONE:
867 return "none";
868 case FLOW_STAT_REM_SEND:
869 return "rx only";
870 case FLOW_STAT_LOC_SEND:
871 return "tx_only";
872 case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */
873 return "both";
874 default:
875 return "indeterminated";
876 }
877}
878
879
857static void skge_link_up(struct skge_port *skge) 880static void skge_link_up(struct skge_port *skge)
858{ 881{
859 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 882 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
@@ -862,16 +885,13 @@ static void skge_link_up(struct skge_port *skge)
862 netif_carrier_on(skge->netdev); 885 netif_carrier_on(skge->netdev);
863 netif_wake_queue(skge->netdev); 886 netif_wake_queue(skge->netdev);
864 887
865 if (netif_msg_link(skge)) 888 if (netif_msg_link(skge)) {
866 printk(KERN_INFO PFX 889 printk(KERN_INFO PFX
867 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n", 890 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
868 skge->netdev->name, skge->speed, 891 skge->netdev->name, skge->speed,
869 skge->duplex == DUPLEX_FULL ? "full" : "half", 892 skge->duplex == DUPLEX_FULL ? "full" : "half",
870 (skge->flow_control == FLOW_MODE_NONE) ? "none" : 893 skge_pause(skge->flow_status));
871 (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" : 894 }
872 (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
873 (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
874 "unknown");
875} 895}
876 896
877static void skge_link_down(struct skge_port *skge) 897static void skge_link_down(struct skge_port *skge)
@@ -884,6 +904,29 @@ static void skge_link_down(struct skge_port *skge)
884 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 904 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
885} 905}
886 906
907
908static void xm_link_down(struct skge_hw *hw, int port)
909{
910 struct net_device *dev = hw->dev[port];
911 struct skge_port *skge = netdev_priv(dev);
912 u16 cmd, msk;
913
914 if (hw->phy_type == SK_PHY_XMAC) {
915 msk = xm_read16(hw, port, XM_IMSK);
916 msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND;
917 xm_write16(hw, port, XM_IMSK, msk);
918 }
919
920 cmd = xm_read16(hw, port, XM_MMU_CMD);
921 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
922 xm_write16(hw, port, XM_MMU_CMD, cmd);
923 /* dummy read to ensure writing */
924 (void) xm_read16(hw, port, XM_MMU_CMD);
925
926 if (netif_carrier_ok(dev))
927 skge_link_down(skge);
928}
929
887static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) 930static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
888{ 931{
889 int i; 932 int i;
@@ -992,7 +1035,15 @@ static const u16 phy_pause_map[] = {
992 [FLOW_MODE_NONE] = 0, 1035 [FLOW_MODE_NONE] = 0,
993 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, 1036 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
994 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, 1037 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
995 [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, 1038 [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
1039};
1040
1041/* special defines for FIBER (88E1011S only) */
1042static const u16 fiber_pause_map[] = {
1043 [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
1044 [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
1045 [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
1046 [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
996}; 1047};
997 1048
998 1049
@@ -1008,14 +1059,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1008 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1059 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1009 1060
1010 if ((status & PHY_ST_LSYNC) == 0) { 1061 if ((status & PHY_ST_LSYNC) == 0) {
1011 u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1062 xm_link_down(hw, port);
1012 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1013 xm_write16(hw, port, XM_MMU_CMD, cmd);
1014 /* dummy read to ensure writing */
1015 (void) xm_read16(hw, port, XM_MMU_CMD);
1016
1017 if (netif_carrier_ok(dev))
1018 skge_link_down(skge);
1019 return; 1063 return;
1020 } 1064 }
1021 1065
@@ -1048,20 +1092,19 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1048 return; 1092 return;
1049 } 1093 }
1050 1094
1051
1052 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1095 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1053 switch (aux & PHY_B_AS_PAUSE_MSK) { 1096 switch (aux & PHY_B_AS_PAUSE_MSK) {
1054 case PHY_B_AS_PAUSE_MSK: 1097 case PHY_B_AS_PAUSE_MSK:
1055 skge->flow_control = FLOW_MODE_SYMMETRIC; 1098 skge->flow_status = FLOW_STAT_SYMMETRIC;
1056 break; 1099 break;
1057 case PHY_B_AS_PRR: 1100 case PHY_B_AS_PRR:
1058 skge->flow_control = FLOW_MODE_REM_SEND; 1101 skge->flow_status = FLOW_STAT_REM_SEND;
1059 break; 1102 break;
1060 case PHY_B_AS_PRT: 1103 case PHY_B_AS_PRT:
1061 skge->flow_control = FLOW_MODE_LOC_SEND; 1104 skge->flow_status = FLOW_STAT_LOC_SEND;
1062 break; 1105 break;
1063 default: 1106 default:
1064 skge->flow_control = FLOW_MODE_NONE; 1107 skge->flow_status = FLOW_STAT_NONE;
1065 } 1108 }
1066 skge->speed = SPEED_1000; 1109 skge->speed = SPEED_1000;
1067 } 1110 }
@@ -1191,17 +1234,7 @@ static void xm_phy_init(struct skge_port *skge)
1191 if (skge->advertising & ADVERTISED_1000baseT_Full) 1234 if (skge->advertising & ADVERTISED_1000baseT_Full)
1192 ctrl |= PHY_X_AN_FD; 1235 ctrl |= PHY_X_AN_FD;
1193 1236
1194 switch(skge->flow_control) { 1237 ctrl |= fiber_pause_map[skge->flow_control];
1195 case FLOW_MODE_NONE:
1196 ctrl |= PHY_X_P_NO_PAUSE;
1197 break;
1198 case FLOW_MODE_LOC_SEND:
1199 ctrl |= PHY_X_P_ASYM_MD;
1200 break;
1201 case FLOW_MODE_SYMMETRIC:
1202 ctrl |= PHY_X_P_BOTH_MD;
1203 break;
1204 }
1205 1238
1206 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); 1239 xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
1207 1240
@@ -1235,14 +1268,7 @@ static void xm_check_link(struct net_device *dev)
1235 status = xm_phy_read(hw, port, PHY_XMAC_STAT); 1268 status = xm_phy_read(hw, port, PHY_XMAC_STAT);
1236 1269
1237 if ((status & PHY_ST_LSYNC) == 0) { 1270 if ((status & PHY_ST_LSYNC) == 0) {
1238 u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1271 xm_link_down(hw, port);
1239 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1240 xm_write16(hw, port, XM_MMU_CMD, cmd);
1241 /* dummy read to ensure writing */
1242 (void) xm_read16(hw, port, XM_MMU_CMD);
1243
1244 if (netif_carrier_ok(dev))
1245 skge_link_down(skge);
1246 return; 1272 return;
1247 } 1273 }
1248 1274
@@ -1276,15 +1302,20 @@ static void xm_check_link(struct net_device *dev)
1276 } 1302 }
1277 1303
1278 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1304 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1279 if (lpa & PHY_X_P_SYM_MD) 1305 if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
1280 skge->flow_control = FLOW_MODE_SYMMETRIC; 1306 skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
1281 else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) 1307 (lpa & PHY_X_P_SYM_MD))
1282 skge->flow_control = FLOW_MODE_REM_SEND; 1308 skge->flow_status = FLOW_STAT_SYMMETRIC;
1283 else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) 1309 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
1284 skge->flow_control = FLOW_MODE_LOC_SEND; 1310 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
1311 /* Enable PAUSE receive, disable PAUSE transmit */
1312 skge->flow_status = FLOW_STAT_REM_SEND;
1313 else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
1314 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
1315 /* Disable PAUSE receive, enable PAUSE transmit */
1316 skge->flow_status = FLOW_STAT_LOC_SEND;
1285 else 1317 else
1286 skge->flow_control = FLOW_MODE_NONE; 1318 skge->flow_status = FLOW_STAT_NONE;
1287
1288 1319
1289 skge->speed = SPEED_1000; 1320 skge->speed = SPEED_1000;
1290 } 1321 }
@@ -1568,6 +1599,10 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
1568 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 1599 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1569 skge->netdev->name, status); 1600 skge->netdev->name, status);
1570 1601
1602 if (hw->phy_type == SK_PHY_XMAC &&
1603 (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC)))
1604 xm_link_down(hw, port);
1605
1571 if (status & XM_IS_TXF_UR) { 1606 if (status & XM_IS_TXF_UR) {
1572 xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1607 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1573 ++skge->net_stats.tx_fifo_errors; 1608 ++skge->net_stats.tx_fifo_errors;
@@ -1582,7 +1617,7 @@ static void genesis_link_up(struct skge_port *skge)
1582{ 1617{
1583 struct skge_hw *hw = skge->hw; 1618 struct skge_hw *hw = skge->hw;
1584 int port = skge->port; 1619 int port = skge->port;
1585 u16 cmd; 1620 u16 cmd, msk;
1586 u32 mode; 1621 u32 mode;
1587 1622
1588 cmd = xm_read16(hw, port, XM_MMU_CMD); 1623 cmd = xm_read16(hw, port, XM_MMU_CMD);
@@ -1591,8 +1626,8 @@ static void genesis_link_up(struct skge_port *skge)
1591 * enabling pause frame reception is required for 1000BT 1626 * enabling pause frame reception is required for 1000BT
1592 * because the XMAC is not reset if the link is going down 1627 * because the XMAC is not reset if the link is going down
1593 */ 1628 */
1594 if (skge->flow_control == FLOW_MODE_NONE || 1629 if (skge->flow_status == FLOW_STAT_NONE ||
1595 skge->flow_control == FLOW_MODE_LOC_SEND) 1630 skge->flow_status == FLOW_STAT_LOC_SEND)
1596 /* Disable Pause Frame Reception */ 1631 /* Disable Pause Frame Reception */
1597 cmd |= XM_MMU_IGN_PF; 1632 cmd |= XM_MMU_IGN_PF;
1598 else 1633 else
@@ -1602,8 +1637,8 @@ static void genesis_link_up(struct skge_port *skge)
1602 xm_write16(hw, port, XM_MMU_CMD, cmd); 1637 xm_write16(hw, port, XM_MMU_CMD, cmd);
1603 1638
1604 mode = xm_read32(hw, port, XM_MODE); 1639 mode = xm_read32(hw, port, XM_MODE);
1605 if (skge->flow_control == FLOW_MODE_SYMMETRIC || 1640 if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
1606 skge->flow_control == FLOW_MODE_LOC_SEND) { 1641 skge->flow_status == FLOW_STAT_LOC_SEND) {
1607 /* 1642 /*
1608 * Configure Pause Frame Generation 1643 * Configure Pause Frame Generation
1609 * Use internal and external Pause Frame Generation. 1644 * Use internal and external Pause Frame Generation.
@@ -1631,7 +1666,11 @@ static void genesis_link_up(struct skge_port *skge)
1631 } 1666 }
1632 1667
1633 xm_write32(hw, port, XM_MODE, mode); 1668 xm_write32(hw, port, XM_MODE, mode);
1634 xm_write16(hw, port, XM_IMSK, XM_DEF_MSK); 1669 msk = XM_DEF_MSK;
1670 if (hw->phy_type != SK_PHY_XMAC)
1671 msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */
1672
1673 xm_write16(hw, port, XM_IMSK, msk);
1635 xm_read16(hw, port, XM_ISRC); 1674 xm_read16(hw, port, XM_ISRC);
1636 1675
1637 /* get MMU Command Reg. */ 1676 /* get MMU Command Reg. */
@@ -1779,11 +1818,17 @@ static void yukon_init(struct skge_hw *hw, int port)
1779 adv |= PHY_M_AN_10_FD; 1818 adv |= PHY_M_AN_10_FD;
1780 if (skge->advertising & ADVERTISED_10baseT_Half) 1819 if (skge->advertising & ADVERTISED_10baseT_Half)
1781 adv |= PHY_M_AN_10_HD; 1820 adv |= PHY_M_AN_10_HD;
1782 } else /* special defines for FIBER (88E1011S only) */
1783 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1784 1821
1785 /* Set Flow-control capabilities */ 1822 /* Set Flow-control capabilities */
1786 adv |= phy_pause_map[skge->flow_control]; 1823 adv |= phy_pause_map[skge->flow_control];
1824 } else {
1825 if (skge->advertising & ADVERTISED_1000baseT_Full)
1826 adv |= PHY_M_AN_1000X_AFD;
1827 if (skge->advertising & ADVERTISED_1000baseT_Half)
1828 adv |= PHY_M_AN_1000X_AHD;
1829
1830 adv |= fiber_pause_map[skge->flow_control];
1831 }
1787 1832
1788 /* Restart Auto-negotiation */ 1833 /* Restart Auto-negotiation */
1789 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1834 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
@@ -1917,6 +1962,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1917 case FLOW_MODE_LOC_SEND: 1962 case FLOW_MODE_LOC_SEND:
1918 /* disable Rx flow-control */ 1963 /* disable Rx flow-control */
1919 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1964 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1965 break;
1966 case FLOW_MODE_SYMMETRIC:
1967 case FLOW_MODE_SYM_OR_REM:
1968 /* enable Tx & Rx flow-control */
1969 break;
1920 } 1970 }
1921 1971
1922 gma_write16(hw, port, GM_GP_CTRL, reg); 1972 gma_write16(hw, port, GM_GP_CTRL, reg);
@@ -2111,13 +2161,11 @@ static void yukon_link_down(struct skge_port *skge)
2111 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2161 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2112 gma_write16(hw, port, GM_GP_CTRL, ctrl); 2162 gma_write16(hw, port, GM_GP_CTRL, ctrl);
2113 2163
2114 if (skge->flow_control == FLOW_MODE_REM_SEND) { 2164 if (skge->flow_status == FLOW_STAT_REM_SEND) {
2165 ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2166 ctrl |= PHY_M_AN_ASP;
2115 /* restore Asymmetric Pause bit */ 2167 /* restore Asymmetric Pause bit */
2116 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 2168 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
2117 gm_phy_read(hw, port,
2118 PHY_MARV_AUNE_ADV)
2119 | PHY_M_AN_ASP);
2120
2121 } 2169 }
2122 2170
2123 yukon_reset(hw, port); 2171 yukon_reset(hw, port);
@@ -2164,19 +2212,19 @@ static void yukon_phy_intr(struct skge_port *skge)
2164 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 2212 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2165 switch (phystat & PHY_M_PS_PAUSE_MSK) { 2213 switch (phystat & PHY_M_PS_PAUSE_MSK) {
2166 case PHY_M_PS_PAUSE_MSK: 2214 case PHY_M_PS_PAUSE_MSK:
2167 skge->flow_control = FLOW_MODE_SYMMETRIC; 2215 skge->flow_status = FLOW_STAT_SYMMETRIC;
2168 break; 2216 break;
2169 case PHY_M_PS_RX_P_EN: 2217 case PHY_M_PS_RX_P_EN:
2170 skge->flow_control = FLOW_MODE_REM_SEND; 2218 skge->flow_status = FLOW_STAT_REM_SEND;
2171 break; 2219 break;
2172 case PHY_M_PS_TX_P_EN: 2220 case PHY_M_PS_TX_P_EN:
2173 skge->flow_control = FLOW_MODE_LOC_SEND; 2221 skge->flow_status = FLOW_STAT_LOC_SEND;
2174 break; 2222 break;
2175 default: 2223 default:
2176 skge->flow_control = FLOW_MODE_NONE; 2224 skge->flow_status = FLOW_STAT_NONE;
2177 } 2225 }
2178 2226
2179 if (skge->flow_control == FLOW_MODE_NONE || 2227 if (skge->flow_status == FLOW_STAT_NONE ||
2180 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 2228 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2181 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 2229 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2182 else 2230 else
@@ -3399,7 +3447,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3399 3447
3400 /* Auto speed and flow control */ 3448 /* Auto speed and flow control */
3401 skge->autoneg = AUTONEG_ENABLE; 3449 skge->autoneg = AUTONEG_ENABLE;
3402 skge->flow_control = FLOW_MODE_SYMMETRIC; 3450 skge->flow_control = FLOW_MODE_SYM_OR_REM;
3403 skge->duplex = -1; 3451 skge->duplex = -1;
3404 skge->speed = -1; 3452 skge->speed = -1;
3405 skge->advertising = skge_supported_modes(hw); 3453 skge->advertising = skge_supported_modes(hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index d0b47d46cf9d..537c0aaa1db8 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2195,7 +2195,8 @@ enum {
2195 XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ 2195 XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
2196}; 2196};
2197 2197
2198#define XM_DEF_MSK (~(XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_RXF_OV | XM_IS_TXF_UR)) 2198#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \
2199 XM_IS_RXF_OV | XM_IS_TXF_UR))
2199 2200
2200 2201
2201/* XM_HW_CFG 16 bit r/w Hardware Config Register */ 2202/* XM_HW_CFG 16 bit r/w Hardware Config Register */
@@ -2426,13 +2427,24 @@ struct skge_hw {
2426 struct mutex phy_mutex; 2427 struct mutex phy_mutex;
2427}; 2428};
2428 2429
2429enum { 2430enum pause_control {
2430 FLOW_MODE_NONE = 0, /* No Flow-Control */ 2431 FLOW_MODE_NONE = 1, /* No Flow-Control */
2431 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */ 2432 FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */
2432 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
2433 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ 2433 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2434 FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or
2435 * just the remote station may send PAUSE
2436 */
2437};
2438
2439enum pause_status {
2440 FLOW_STAT_INDETERMINATED=0, /* indeterminated */
2441 FLOW_STAT_NONE, /* No Flow Control */
2442 FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */
2443 FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */
2444 FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */
2434}; 2445};
2435 2446
2447
2436struct skge_port { 2448struct skge_port {
2437 u32 msg_enable; 2449 u32 msg_enable;
2438 struct skge_hw *hw; 2450 struct skge_hw *hw;
@@ -2445,9 +2457,10 @@ struct skge_port {
2445 struct net_device_stats net_stats; 2457 struct net_device_stats net_stats;
2446 2458
2447 struct work_struct link_thread; 2459 struct work_struct link_thread;
2460 enum pause_control flow_control;
2461 enum pause_status flow_status;
2448 u8 rx_csum; 2462 u8 rx_csum;
2449 u8 blink_on; 2463 u8 blink_on;
2450 u8 flow_control;
2451 u8 wol; 2464 u8 wol;
2452 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 2465 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
2453 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 2466 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 459c845d6648..c10e7f5faa5f 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -683,7 +683,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
683 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 683 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
684 684
685 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 685 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
686 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 686 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
687 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 687 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
688 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 688 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
689 /* set Tx GMAC FIFO Almost Empty Threshold */ 689 /* set Tx GMAC FIFO Almost Empty Threshold */
@@ -1907,7 +1907,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
1907 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 1907 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
1908 length, PCI_DMA_FROMDEVICE); 1908 length, PCI_DMA_FROMDEVICE);
1909 re->skb->ip_summed = CHECKSUM_NONE; 1909 re->skb->ip_summed = CHECKSUM_NONE;
1910 __skb_put(skb, length); 1910 skb_put(skb, length);
1911 } 1911 }
1912 return skb; 1912 return skb;
1913} 1913}
@@ -1970,7 +1970,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
1970 if (skb_shinfo(skb)->nr_frags) 1970 if (skb_shinfo(skb)->nr_frags)
1971 skb_put_frags(skb, hdr_space, length); 1971 skb_put_frags(skb, hdr_space, length);
1972 else 1972 else
1973 skb_put(skb, hdr_space); 1973 skb_put(skb, length);
1974 return skb; 1974 return skb;
1975} 1975}
1976 1976
@@ -2220,8 +2220,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2220 /* PCI-Express uncorrectable Error occurred */ 2220 /* PCI-Express uncorrectable Error occurred */
2221 u32 pex_err; 2221 u32 pex_err;
2222 2222
2223 pex_err = sky2_pci_read32(hw, 2223 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2224 hw->err_cap + PCI_ERR_UNCOR_STATUS);
2225 2224
2226 if (net_ratelimit()) 2225 if (net_ratelimit())
2227 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2226 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2229,20 +2228,15 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2229 2228
2230 /* clear the interrupt */ 2229 /* clear the interrupt */
2231 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2230 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2232 sky2_pci_write32(hw, 2231 sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
2233 hw->err_cap + PCI_ERR_UNCOR_STATUS, 2232 0xffffffffUL);
2234 0xffffffffUL);
2235 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2233 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2236 2234
2237 2235 if (pex_err & PEX_FATAL_ERRORS) {
2238 /* In case of fatal error mask off to keep from getting stuck */
2239 if (pex_err & (PCI_ERR_UNC_POISON_TLP | PCI_ERR_UNC_FCP
2240 | PCI_ERR_UNC_DLP)) {
2241 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); 2236 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2242 hwmsk &= ~Y2_IS_PCI_EXP; 2237 hwmsk &= ~Y2_IS_PCI_EXP;
2243 sky2_write32(hw, B0_HWE_IMSK, hwmsk); 2238 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
2244 } 2239 }
2245
2246 } 2240 }
2247 2241
2248 if (status & Y2_HWE_L1_MASK) 2242 if (status & Y2_HWE_L1_MASK)
@@ -2423,7 +2417,6 @@ static int sky2_reset(struct sky2_hw *hw)
2423 u16 status; 2417 u16 status;
2424 u8 t8; 2418 u8 t8;
2425 int i; 2419 int i;
2426 u32 msk;
2427 2420
2428 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2421 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2429 2422
@@ -2464,13 +2457,9 @@ static int sky2_reset(struct sky2_hw *hw)
2464 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 2457 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2465 2458
2466 /* clear any PEX errors */ 2459 /* clear any PEX errors */
2467 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { 2460 if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2468 hw->err_cap = pci_find_ext_capability(hw->pdev, PCI_EXT_CAP_ID_ERR); 2461 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2469 if (hw->err_cap) 2462
2470 sky2_pci_write32(hw,
2471 hw->err_cap + PCI_ERR_UNCOR_STATUS,
2472 0xffffffffUL);
2473 }
2474 2463
2475 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2464 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2476 hw->ports = 1; 2465 hw->ports = 1;
@@ -2527,10 +2516,7 @@ static int sky2_reset(struct sky2_hw *hw)
2527 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); 2516 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2528 } 2517 }
2529 2518
2530 msk = Y2_HWE_ALL_MASK; 2519 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2531 if (!hw->err_cap)
2532 msk &= ~Y2_IS_PCI_EXP;
2533 sky2_write32(hw, B0_HWE_IMSK, msk);
2534 2520
2535 for (i = 0; i < hw->ports; i++) 2521 for (i = 0; i < hw->ports; i++)
2536 sky2_gmac_reset(hw, i); 2522 sky2_gmac_reset(hw, i);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index f66109a96d95..43d2accf60e1 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -6,15 +6,24 @@
6 6
7#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ 7#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
8 8
9/* PCI device specific config registers */ 9/* PCI config registers */
10enum { 10enum {
11 PCI_DEV_REG1 = 0x40, 11 PCI_DEV_REG1 = 0x40,
12 PCI_DEV_REG2 = 0x44, 12 PCI_DEV_REG2 = 0x44,
13 PCI_DEV_STATUS = 0x7c,
13 PCI_DEV_REG3 = 0x80, 14 PCI_DEV_REG3 = 0x80,
14 PCI_DEV_REG4 = 0x84, 15 PCI_DEV_REG4 = 0x84,
15 PCI_DEV_REG5 = 0x88, 16 PCI_DEV_REG5 = 0x88,
16}; 17};
17 18
19enum {
20 PEX_DEV_CAP = 0xe4,
21 PEX_DEV_CTRL = 0xe8,
22 PEX_DEV_STA = 0xea,
23 PEX_LNK_STAT = 0xf2,
24 PEX_UNC_ERR_STAT= 0x104,
25};
26
18/* Yukon-2 */ 27/* Yukon-2 */
19enum pci_dev_reg_1 { 28enum pci_dev_reg_1 {
20 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ 29 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
@@ -63,6 +72,39 @@ enum pci_dev_reg_4 {
63 PCI_STATUS_REC_MASTER_ABORT | \ 72 PCI_STATUS_REC_MASTER_ABORT | \
64 PCI_STATUS_REC_TARGET_ABORT | \ 73 PCI_STATUS_REC_TARGET_ABORT | \
65 PCI_STATUS_PARITY) 74 PCI_STATUS_PARITY)
75
76enum pex_dev_ctrl {
77 PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
78 PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
79 PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
80 PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
81 PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
82 PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
83 PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
84 PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
85 PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
86 PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
87 PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
88};
89#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
90
91/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
92enum pex_err {
93 PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
94
95 PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
96
97 PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
98
99 PEX_COMP_TO = 1<<14, /* Completion Timeout */
100 PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
101 PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
102
103 PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
104 PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
105};
106
107
66enum csr_regs { 108enum csr_regs {
67 B0_RAP = 0x0000, 109 B0_RAP = 0x0000,
68 B0_CTST = 0x0004, 110 B0_CTST = 0x0004,
@@ -1836,7 +1878,6 @@ struct sky2_hw {
1836 struct net_device *dev[2]; 1878 struct net_device *dev[2];
1837 1879
1838 int pm_cap; 1880 int pm_cap;
1839 int err_cap;
1840 u8 chip_id; 1881 u8 chip_id;
1841 u8 chip_rev; 1882 u8 chip_rev;
1842 u8 pmd_type; 1883 u8 pmd_type;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 636dbfcdf8cb..0c9f1e7dab2e 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -398,6 +398,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
398 398
399#define SMC_IRQ_FLAGS (0) 399#define SMC_IRQ_FLAGS (0)
400 400
401#elif defined(CONFIG_ARCH_VERSATILE)
402
403#define SMC_CAN_USE_8BIT 1
404#define SMC_CAN_USE_16BIT 1
405#define SMC_CAN_USE_32BIT 1
406#define SMC_NOWAIT 1
407
408#define SMC_inb(a, r) readb((a) + (r))
409#define SMC_inw(a, r) readw((a) + (r))
410#define SMC_inl(a, r) readl((a) + (r))
411#define SMC_outb(v, a, r) writeb(v, (a) + (r))
412#define SMC_outw(v, a, r) writew(v, (a) + (r))
413#define SMC_outl(v, a, r) writel(v, (a) + (r))
414#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
415#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
416
417#define SMC_IRQ_FLAGS (0)
418
401#else 419#else
402 420
403#define SMC_CAN_USE_8BIT 1 421#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 46a009085f7c..418138dd6c68 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -55,12 +55,13 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
55 "<Jens.Osterkamp@de.ibm.com>"); 55 "<Jens.Osterkamp@de.ibm.com>");
56MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); 56MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
57MODULE_LICENSE("GPL"); 57MODULE_LICENSE("GPL");
58MODULE_VERSION(VERSION);
58 59
59static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; 60static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
60static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; 61static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
61 62
62module_param(rx_descriptors, int, 0644); 63module_param(rx_descriptors, int, 0444);
63module_param(tx_descriptors, int, 0644); 64module_param(tx_descriptors, int, 0444);
64 65
65MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ 66MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
66 "in rx chains"); 67 "in rx chains");
@@ -300,7 +301,7 @@ static int
300spider_net_init_chain(struct spider_net_card *card, 301spider_net_init_chain(struct spider_net_card *card,
301 struct spider_net_descr_chain *chain, 302 struct spider_net_descr_chain *chain,
302 struct spider_net_descr *start_descr, 303 struct spider_net_descr *start_descr,
303 int direction, int no) 304 int no)
304{ 305{
305 int i; 306 int i;
306 struct spider_net_descr *descr; 307 struct spider_net_descr *descr;
@@ -315,7 +316,7 @@ spider_net_init_chain(struct spider_net_card *card,
315 316
316 buf = pci_map_single(card->pdev, descr, 317 buf = pci_map_single(card->pdev, descr,
317 SPIDER_NET_DESCR_SIZE, 318 SPIDER_NET_DESCR_SIZE,
318 direction); 319 PCI_DMA_BIDIRECTIONAL);
319 320
320 if (pci_dma_mapping_error(buf)) 321 if (pci_dma_mapping_error(buf))
321 goto iommu_error; 322 goto iommu_error;
@@ -329,11 +330,6 @@ spider_net_init_chain(struct spider_net_card *card,
329 (descr-1)->next = start_descr; 330 (descr-1)->next = start_descr;
330 start_descr->prev = descr-1; 331 start_descr->prev = descr-1;
331 332
332 descr = start_descr;
333 if (direction == PCI_DMA_FROMDEVICE)
334 for (i=0; i < no; i++, descr++)
335 descr->next_descr_addr = descr->next->bus_addr;
336
337 spin_lock_init(&chain->lock); 333 spin_lock_init(&chain->lock);
338 chain->head = start_descr; 334 chain->head = start_descr;
339 chain->tail = start_descr; 335 chain->tail = start_descr;
@@ -346,7 +342,7 @@ iommu_error:
346 if (descr->bus_addr) 342 if (descr->bus_addr)
347 pci_unmap_single(card->pdev, descr->bus_addr, 343 pci_unmap_single(card->pdev, descr->bus_addr,
348 SPIDER_NET_DESCR_SIZE, 344 SPIDER_NET_DESCR_SIZE,
349 direction); 345 PCI_DMA_BIDIRECTIONAL);
350 return -ENOMEM; 346 return -ENOMEM;
351} 347}
352 348
@@ -362,15 +358,15 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
362 struct spider_net_descr *descr; 358 struct spider_net_descr *descr;
363 359
364 descr = card->rx_chain.head; 360 descr = card->rx_chain.head;
365 while (descr->next != card->rx_chain.head) { 361 do {
366 if (descr->skb) { 362 if (descr->skb) {
367 dev_kfree_skb(descr->skb); 363 dev_kfree_skb(descr->skb);
368 pci_unmap_single(card->pdev, descr->buf_addr, 364 pci_unmap_single(card->pdev, descr->buf_addr,
369 SPIDER_NET_MAX_FRAME, 365 SPIDER_NET_MAX_FRAME,
370 PCI_DMA_FROMDEVICE); 366 PCI_DMA_BIDIRECTIONAL);
371 } 367 }
372 descr = descr->next; 368 descr = descr->next;
373 } 369 } while (descr != card->rx_chain.head);
374} 370}
375 371
376/** 372/**
@@ -645,26 +641,41 @@ static int
645spider_net_prepare_tx_descr(struct spider_net_card *card, 641spider_net_prepare_tx_descr(struct spider_net_card *card,
646 struct sk_buff *skb) 642 struct sk_buff *skb)
647{ 643{
648 struct spider_net_descr *descr = card->tx_chain.head; 644 struct spider_net_descr *descr;
649 dma_addr_t buf; 645 dma_addr_t buf;
646 unsigned long flags;
647 int length;
650 648
651 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 649 length = skb->len;
650 if (length < ETH_ZLEN) {
651 if (skb_pad(skb, ETH_ZLEN-length))
652 return 0;
653 length = ETH_ZLEN;
654 }
655
656 buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
652 if (pci_dma_mapping_error(buf)) { 657 if (pci_dma_mapping_error(buf)) {
653 if (netif_msg_tx_err(card) && net_ratelimit()) 658 if (netif_msg_tx_err(card) && net_ratelimit())
654 pr_err("could not iommu-map packet (%p, %i). " 659 pr_err("could not iommu-map packet (%p, %i). "
655 "Dropping packet\n", skb->data, skb->len); 660 "Dropping packet\n", skb->data, length);
656 card->spider_stats.tx_iommu_map_error++; 661 card->spider_stats.tx_iommu_map_error++;
657 return -ENOMEM; 662 return -ENOMEM;
658 } 663 }
659 664
665 spin_lock_irqsave(&card->tx_chain.lock, flags);
666 descr = card->tx_chain.head;
667 card->tx_chain.head = descr->next;
668
660 descr->buf_addr = buf; 669 descr->buf_addr = buf;
661 descr->buf_size = skb->len; 670 descr->buf_size = length;
662 descr->next_descr_addr = 0; 671 descr->next_descr_addr = 0;
663 descr->skb = skb; 672 descr->skb = skb;
664 descr->data_status = 0; 673 descr->data_status = 0;
665 674
666 descr->dmac_cmd_status = 675 descr->dmac_cmd_status =
667 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 676 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
677 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
678
668 if (skb->protocol == htons(ETH_P_IP)) 679 if (skb->protocol == htons(ETH_P_IP))
669 switch (skb->nh.iph->protocol) { 680 switch (skb->nh.iph->protocol) {
670 case IPPROTO_TCP: 681 case IPPROTO_TCP:
@@ -675,32 +686,51 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
675 break; 686 break;
676 } 687 }
677 688
689 /* Chain the bus address, so that the DMA engine finds this descr. */
678 descr->prev->next_descr_addr = descr->bus_addr; 690 descr->prev->next_descr_addr = descr->bus_addr;
679 691
692 card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
680 return 0; 693 return 0;
681} 694}
682 695
683/** 696static int
684 * spider_net_release_tx_descr - processes a used tx descriptor 697spider_net_set_low_watermark(struct spider_net_card *card)
685 * @card: card structure
686 * @descr: descriptor to release
687 *
688 * releases a used tx descriptor (unmapping, freeing of skb)
689 */
690static inline void
691spider_net_release_tx_descr(struct spider_net_card *card)
692{ 698{
699 unsigned long flags;
700 int status;
701 int cnt=0;
702 int i;
693 struct spider_net_descr *descr = card->tx_chain.tail; 703 struct spider_net_descr *descr = card->tx_chain.tail;
694 struct sk_buff *skb;
695 704
696 card->tx_chain.tail = card->tx_chain.tail->next; 705 /* Measure the length of the queue. Measurement does not
697 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 706 * need to be precise -- does not need a lock. */
707 while (descr != card->tx_chain.head) {
708 status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
709 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
710 break;
711 descr = descr->next;
712 cnt++;
713 }
698 714
699 /* unmap the skb */ 715 /* If TX queue is short, don't even bother with interrupts */
700 skb = descr->skb; 716 if (cnt < card->num_tx_desc/4)
701 pci_unmap_single(card->pdev, descr->buf_addr, skb->len, 717 return cnt;
702 PCI_DMA_TODEVICE); 718
703 dev_kfree_skb_any(skb); 719 /* Set low-watermark 3/4th's of the way into the queue. */
720 descr = card->tx_chain.tail;
721 cnt = (cnt*3)/4;
722 for (i=0;i<cnt; i++)
723 descr = descr->next;
724
725 /* Set the new watermark, clear the old watermark */
726 spin_lock_irqsave(&card->tx_chain.lock, flags);
727 descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
728 if (card->low_watermark && card->low_watermark != descr)
729 card->low_watermark->dmac_cmd_status =
730 card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
731 card->low_watermark = descr;
732 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
733 return cnt;
704} 734}
705 735
706/** 736/**
@@ -719,21 +749,29 @@ static int
719spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 749spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
720{ 750{
721 struct spider_net_descr_chain *chain = &card->tx_chain; 751 struct spider_net_descr_chain *chain = &card->tx_chain;
752 struct spider_net_descr *descr;
753 struct sk_buff *skb;
754 u32 buf_addr;
755 unsigned long flags;
722 int status; 756 int status;
723 757
724 spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
725
726 while (chain->tail != chain->head) { 758 while (chain->tail != chain->head) {
727 status = spider_net_get_descr_status(chain->tail); 759 spin_lock_irqsave(&chain->lock, flags);
760 descr = chain->tail;
761
762 status = spider_net_get_descr_status(descr);
728 switch (status) { 763 switch (status) {
729 case SPIDER_NET_DESCR_COMPLETE: 764 case SPIDER_NET_DESCR_COMPLETE:
730 card->netdev_stats.tx_packets++; 765 card->netdev_stats.tx_packets++;
731 card->netdev_stats.tx_bytes += chain->tail->skb->len; 766 card->netdev_stats.tx_bytes += descr->skb->len;
732 break; 767 break;
733 768
734 case SPIDER_NET_DESCR_CARDOWNED: 769 case SPIDER_NET_DESCR_CARDOWNED:
735 if (!brutal) 770 if (!brutal) {
771 spin_unlock_irqrestore(&chain->lock, flags);
736 return 1; 772 return 1;
773 }
774
737 /* fallthrough, if we release the descriptors 775 /* fallthrough, if we release the descriptors
738 * brutally (then we don't care about 776 * brutally (then we don't care about
739 * SPIDER_NET_DESCR_CARDOWNED) */ 777 * SPIDER_NET_DESCR_CARDOWNED) */
@@ -750,11 +788,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
750 788
751 default: 789 default:
752 card->netdev_stats.tx_dropped++; 790 card->netdev_stats.tx_dropped++;
753 return 1; 791 if (!brutal) {
792 spin_unlock_irqrestore(&chain->lock, flags);
793 return 1;
794 }
754 } 795 }
755 spider_net_release_tx_descr(card);
756 }
757 796
797 chain->tail = descr->next;
798 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
799 skb = descr->skb;
800 buf_addr = descr->buf_addr;
801 spin_unlock_irqrestore(&chain->lock, flags);
802
803 /* unmap the skb */
804 if (skb) {
805 int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
806 pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
807 dev_kfree_skb(skb);
808 }
809 }
758 return 0; 810 return 0;
759} 811}
760 812
@@ -763,8 +815,12 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
763 * @card: card structure 815 * @card: card structure
764 * @descr: descriptor address to enable TX processing at 816 * @descr: descriptor address to enable TX processing at
765 * 817 *
766 * spider_net_kick_tx_dma writes the current tx chain head as start address 818 * This routine will start the transmit DMA running if
767 * of the tx descriptor chain and enables the transmission DMA engine 819 * it is not already running. This routine ned only be
820 * called when queueing a new packet to an empty tx queue.
821 * Writes the current tx chain head as start address
822 * of the tx descriptor chain and enables the transmission
823 * DMA engine.
768 */ 824 */
769static inline void 825static inline void
770spider_net_kick_tx_dma(struct spider_net_card *card) 826spider_net_kick_tx_dma(struct spider_net_card *card)
@@ -804,65 +860,43 @@ out:
804static int 860static int
805spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 861spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
806{ 862{
863 int cnt;
807 struct spider_net_card *card = netdev_priv(netdev); 864 struct spider_net_card *card = netdev_priv(netdev);
808 struct spider_net_descr_chain *chain = &card->tx_chain; 865 struct spider_net_descr_chain *chain = &card->tx_chain;
809 struct spider_net_descr *descr = chain->head;
810 unsigned long flags;
811 int result;
812
813 spin_lock_irqsave(&chain->lock, flags);
814 866
815 spider_net_release_tx_chain(card, 0); 867 spider_net_release_tx_chain(card, 0);
816 868
817 if (chain->head->next == chain->tail->prev) { 869 if ((chain->head->next == chain->tail->prev) ||
818 card->netdev_stats.tx_dropped++; 870 (spider_net_prepare_tx_descr(card, skb) != 0)) {
819 result = NETDEV_TX_LOCKED;
820 goto out;
821 }
822 871
823 if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
824 card->netdev_stats.tx_dropped++; 872 card->netdev_stats.tx_dropped++;
825 result = NETDEV_TX_LOCKED; 873 netif_stop_queue(netdev);
826 goto out; 874 return NETDEV_TX_BUSY;
827 } 875 }
828 876
829 if (spider_net_prepare_tx_descr(card, skb) != 0) { 877 cnt = spider_net_set_low_watermark(card);
830 card->netdev_stats.tx_dropped++; 878 if (cnt < 5)
831 result = NETDEV_TX_BUSY; 879 spider_net_kick_tx_dma(card);
832 goto out; 880 return NETDEV_TX_OK;
833 }
834
835 result = NETDEV_TX_OK;
836
837 spider_net_kick_tx_dma(card);
838 card->tx_chain.head = card->tx_chain.head->next;
839
840out:
841 spin_unlock_irqrestore(&chain->lock, flags);
842 netif_wake_queue(netdev);
843 return result;
844} 881}
845 882
846/** 883/**
847 * spider_net_cleanup_tx_ring - cleans up the TX ring 884 * spider_net_cleanup_tx_ring - cleans up the TX ring
848 * @card: card structure 885 * @card: card structure
849 * 886 *
850 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use 887 * spider_net_cleanup_tx_ring is called by either the tx_timer
851 * interrupts to cleanup our TX ring) and returns sent packets to the stack 888 * or from the NAPI polling routine.
852 * by freeing them 889 * This routine releases resources associted with transmitted
890 * packets, including updating the queue tail pointer.
853 */ 891 */
854static void 892static void
855spider_net_cleanup_tx_ring(struct spider_net_card *card) 893spider_net_cleanup_tx_ring(struct spider_net_card *card)
856{ 894{
857 unsigned long flags;
858
859 spin_lock_irqsave(&card->tx_chain.lock, flags);
860
861 if ((spider_net_release_tx_chain(card, 0) != 0) && 895 if ((spider_net_release_tx_chain(card, 0) != 0) &&
862 (card->netdev->flags & IFF_UP)) 896 (card->netdev->flags & IFF_UP)) {
863 spider_net_kick_tx_dma(card); 897 spider_net_kick_tx_dma(card);
864 898 netif_wake_queue(card->netdev);
865 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 899 }
866} 900}
867 901
868/** 902/**
@@ -1053,6 +1087,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1053 int packets_to_do, packets_done = 0; 1087 int packets_to_do, packets_done = 0;
1054 int no_more_packets = 0; 1088 int no_more_packets = 0;
1055 1089
1090 spider_net_cleanup_tx_ring(card);
1056 packets_to_do = min(*budget, netdev->quota); 1091 packets_to_do = min(*budget, netdev->quota);
1057 1092
1058 while (packets_to_do) { 1093 while (packets_to_do) {
@@ -1243,12 +1278,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1243 case SPIDER_NET_PHYINT: 1278 case SPIDER_NET_PHYINT:
1244 case SPIDER_NET_GMAC2INT: 1279 case SPIDER_NET_GMAC2INT:
1245 case SPIDER_NET_GMAC1INT: 1280 case SPIDER_NET_GMAC1INT:
1246 case SPIDER_NET_GIPSINT:
1247 case SPIDER_NET_GFIFOINT: 1281 case SPIDER_NET_GFIFOINT:
1248 case SPIDER_NET_DMACINT: 1282 case SPIDER_NET_DMACINT:
1249 case SPIDER_NET_GSYSINT: 1283 case SPIDER_NET_GSYSINT:
1250 break; */ 1284 break; */
1251 1285
1286 case SPIDER_NET_GIPSINT:
1287 show_error = 0;
1288 break;
1289
1252 case SPIDER_NET_GPWOPCMPINT: 1290 case SPIDER_NET_GPWOPCMPINT:
1253 /* PHY write operation completed */ 1291 /* PHY write operation completed */
1254 show_error = 0; 1292 show_error = 0;
@@ -1307,9 +1345,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1307 case SPIDER_NET_GDTDCEINT: 1345 case SPIDER_NET_GDTDCEINT:
1308 /* chain end. If a descriptor should be sent, kick off 1346 /* chain end. If a descriptor should be sent, kick off
1309 * tx dma 1347 * tx dma
1310 if (card->tx_chain.tail == card->tx_chain.head) 1348 if (card->tx_chain.tail != card->tx_chain.head)
1311 spider_net_kick_tx_dma(card); 1349 spider_net_kick_tx_dma(card);
1312 show_error = 0; */ 1350 */
1351 show_error = 0;
1313 break; 1352 break;
1314 1353
1315 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */ 1354 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
@@ -1354,7 +1393,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1354 if (netif_msg_intr(card)) 1393 if (netif_msg_intr(card))
1355 pr_err("got descriptor chain end interrupt, " 1394 pr_err("got descriptor chain end interrupt, "
1356 "restarting DMAC %c.\n", 1395 "restarting DMAC %c.\n",
1357 'D'+i-SPIDER_NET_GDDDCEINT); 1396 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
1358 spider_net_refill_rx_chain(card); 1397 spider_net_refill_rx_chain(card);
1359 spider_net_enable_rxdmac(card); 1398 spider_net_enable_rxdmac(card);
1360 show_error = 0; 1399 show_error = 0;
@@ -1423,8 +1462,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1423 } 1462 }
1424 1463
1425 if ((show_error) && (netif_msg_intr(card))) 1464 if ((show_error) && (netif_msg_intr(card)))
1426 pr_err("Got error interrupt, GHIINT0STS = 0x%08x, " 1465 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
1427 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1466 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1467 card->netdev->name,
1428 status_reg, error_reg1, error_reg2); 1468 status_reg, error_reg1, error_reg2);
1429 1469
1430 /* clear interrupt sources */ 1470 /* clear interrupt sources */
@@ -1460,6 +1500,8 @@ spider_net_interrupt(int irq, void *ptr)
1460 spider_net_rx_irq_off(card); 1500 spider_net_rx_irq_off(card);
1461 netif_rx_schedule(netdev); 1501 netif_rx_schedule(netdev);
1462 } 1502 }
1503 if (status_reg & SPIDER_NET_TXINT)
1504 netif_rx_schedule(netdev);
1463 1505
1464 if (status_reg & SPIDER_NET_ERRINT ) 1506 if (status_reg & SPIDER_NET_ERRINT )
1465 spider_net_handle_error_irq(card, status_reg); 1507 spider_net_handle_error_irq(card, status_reg);
@@ -1599,7 +1641,7 @@ spider_net_enable_card(struct spider_net_card *card)
1599 SPIDER_NET_INT2_MASK_VALUE); 1641 SPIDER_NET_INT2_MASK_VALUE);
1600 1642
1601 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1643 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1602 SPIDER_NET_GDTDCEIDIS); 1644 SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
1603} 1645}
1604 1646
1605/** 1647/**
@@ -1615,17 +1657,26 @@ int
1615spider_net_open(struct net_device *netdev) 1657spider_net_open(struct net_device *netdev)
1616{ 1658{
1617 struct spider_net_card *card = netdev_priv(netdev); 1659 struct spider_net_card *card = netdev_priv(netdev);
1618 int result; 1660 struct spider_net_descr *descr;
1661 int i, result;
1619 1662
1620 result = -ENOMEM; 1663 result = -ENOMEM;
1621 if (spider_net_init_chain(card, &card->tx_chain, card->descr, 1664 if (spider_net_init_chain(card, &card->tx_chain, card->descr,
1622 PCI_DMA_TODEVICE, card->tx_desc)) 1665 card->num_tx_desc))
1623 goto alloc_tx_failed; 1666 goto alloc_tx_failed;
1667
1668 card->low_watermark = NULL;
1669
1670 /* rx_chain is after tx_chain, so offset is descr + tx_count */
1624 if (spider_net_init_chain(card, &card->rx_chain, 1671 if (spider_net_init_chain(card, &card->rx_chain,
1625 card->descr + card->rx_desc, 1672 card->descr + card->num_tx_desc,
1626 PCI_DMA_FROMDEVICE, card->rx_desc)) 1673 card->num_rx_desc))
1627 goto alloc_rx_failed; 1674 goto alloc_rx_failed;
1628 1675
1676 descr = card->rx_chain.head;
1677 for (i=0; i < card->num_rx_desc; i++, descr++)
1678 descr->next_descr_addr = descr->next->bus_addr;
1679
1629 /* allocate rx skbs */ 1680 /* allocate rx skbs */
1630 if (spider_net_alloc_rx_skbs(card)) 1681 if (spider_net_alloc_rx_skbs(card))
1631 goto alloc_skbs_failed; 1682 goto alloc_skbs_failed;
@@ -1878,10 +1929,7 @@ spider_net_stop(struct net_device *netdev)
1878 spider_net_disable_rxdmac(card); 1929 spider_net_disable_rxdmac(card);
1879 1930
1880 /* release chains */ 1931 /* release chains */
1881 if (spin_trylock(&card->tx_chain.lock)) { 1932 spider_net_release_tx_chain(card, 1);
1882 spider_net_release_tx_chain(card, 1);
1883 spin_unlock(&card->tx_chain.lock);
1884 }
1885 1933
1886 spider_net_free_chain(card, &card->tx_chain); 1934 spider_net_free_chain(card, &card->tx_chain);
1887 spider_net_free_chain(card, &card->rx_chain); 1935 spider_net_free_chain(card, &card->rx_chain);
@@ -2012,8 +2060,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
2012 2060
2013 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2061 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2014 2062
2015 card->tx_desc = tx_descriptors; 2063 card->num_tx_desc = tx_descriptors;
2016 card->rx_desc = rx_descriptors; 2064 card->num_rx_desc = rx_descriptors;
2017 2065
2018 spider_net_setup_netdev_ops(netdev); 2066 spider_net_setup_netdev_ops(netdev);
2019 2067
@@ -2252,6 +2300,8 @@ static struct pci_driver spider_net_driver = {
2252 */ 2300 */
2253static int __init spider_net_init(void) 2301static int __init spider_net_init(void)
2254{ 2302{
2303 printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2304
2255 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { 2305 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2256 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; 2306 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2257 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); 2307 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index a59deda2f95e..b3b46119b424 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -24,6 +24,8 @@
24#ifndef _SPIDER_NET_H 24#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H 25#define _SPIDER_NET_H
26 26
27#define VERSION "1.1 A"
28
27#include "sungem_phy.h" 29#include "sungem_phy.h"
28 30
29extern int spider_net_stop(struct net_device *netdev); 31extern int spider_net_stop(struct net_device *netdev);
@@ -47,7 +49,7 @@ extern char spider_net_driver_name[];
47#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 49#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
48#define SPIDER_NET_TX_DESCRIPTORS_MAX 512 50#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
49 51
50#define SPIDER_NET_TX_TIMER 20 52#define SPIDER_NET_TX_TIMER (HZ/5)
51 53
52#define SPIDER_NET_RX_CSUM_DEFAULT 1 54#define SPIDER_NET_RX_CSUM_DEFAULT 1
53 55
@@ -189,7 +191,9 @@ extern char spider_net_driver_name[];
189#define SPIDER_NET_MACMODE_VALUE 0x00000001 191#define SPIDER_NET_MACMODE_VALUE 0x00000001
190#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */ 192#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
191 193
192/* 1(0) enable r/tx dma 194/* DMAC control register GDMACCNTR
195 *
196 * 1(0) enable r/tx dma
193 * 0000000 fixed to 0 197 * 0000000 fixed to 0
194 * 198 *
195 * 000000 fixed to 0 199 * 000000 fixed to 0
@@ -198,6 +202,7 @@ extern char spider_net_driver_name[];
198 * 202 *
199 * 000000 fixed to 0 203 * 000000 fixed to 0
200 * 00 burst alignment: 128 bytes 204 * 00 burst alignment: 128 bytes
205 * 11 burst alignment: 1024 bytes
201 * 206 *
202 * 00000 fixed to 0 207 * 00000 fixed to 0
203 * 0 descr writeback size 32 bytes 208 * 0 descr writeback size 32 bytes
@@ -208,10 +213,13 @@ extern char spider_net_driver_name[];
208#define SPIDER_NET_DMA_RX_VALUE 0x80000000 213#define SPIDER_NET_DMA_RX_VALUE 0x80000000
209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 214#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
210/* to set TX_DMA_EN */ 215/* to set TX_DMA_EN */
211#define SPIDER_NET_TX_DMA_EN 0x80000000 216#define SPIDER_NET_TX_DMA_EN 0x80000000
212#define SPIDER_NET_GDTDCEIDIS 0x00000002 217#define SPIDER_NET_GDTBSTA 0x00000300
213#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ 218#define SPIDER_NET_GDTDCEIDIS 0x00000002
214 SPIDER_NET_GDTDCEIDIS 219#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
220 SPIDER_NET_GDTBSTA | \
221 SPIDER_NET_GDTDCEIDIS
222
215#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 223#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
216 224
217/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ 225/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -320,13 +328,10 @@ enum spider_net_int2_status {
320 SPIDER_NET_GRISPDNGINT 328 SPIDER_NET_GRISPDNGINT
321}; 329};
322 330
323#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GTTEDINT) | \ 331#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GDTFDCINT) )
324 (1 << SPIDER_NET_GDTDCEINT) | \
325 (1 << SPIDER_NET_GDTFDCINT) )
326 332
327/* we rely on flagged descriptor interrupts*/ 333/* We rely on flagged descriptor interrupts */
328#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ 334#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
329 (1 << SPIDER_NET_GRMFLLINT) )
330 335
331#define SPIDER_NET_ERRINT ( 0xffffffff & \ 336#define SPIDER_NET_ERRINT ( 0xffffffff & \
332 (~SPIDER_NET_TXINT) & \ 337 (~SPIDER_NET_TXINT) & \
@@ -349,6 +354,7 @@ enum spider_net_int2_status {
349#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ 354#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
350#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ 355#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
351#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 356#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
357#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
352 358
353struct spider_net_descr { 359struct spider_net_descr {
354 /* as defined by the hardware */ 360 /* as defined by the hardware */
@@ -433,6 +439,7 @@ struct spider_net_card {
433 439
434 struct spider_net_descr_chain tx_chain; 440 struct spider_net_descr_chain tx_chain;
435 struct spider_net_descr_chain rx_chain; 441 struct spider_net_descr_chain rx_chain;
442 struct spider_net_descr *low_watermark;
436 443
437 struct net_device_stats netdev_stats; 444 struct net_device_stats netdev_stats;
438 445
@@ -448,8 +455,8 @@ struct spider_net_card {
448 455
449 /* for ethtool */ 456 /* for ethtool */
450 int msg_enable; 457 int msg_enable;
451 int rx_desc; 458 int num_rx_desc;
452 int tx_desc; 459 int num_tx_desc;
453 struct spider_net_extra_stats spider_stats; 460 struct spider_net_extra_stats spider_stats;
454 461
455 struct spider_net_descr descr[0]; 462 struct spider_net_descr descr[0];
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 589e43658dee..91b995102915 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -76,7 +76,7 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
76 /* clear and fill out info */ 76 /* clear and fill out info */
77 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); 77 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
78 strncpy(drvinfo->driver, spider_net_driver_name, 32); 78 strncpy(drvinfo->driver, spider_net_driver_name, 32);
79 strncpy(drvinfo->version, "0.1", 32); 79 strncpy(drvinfo->version, VERSION, 32);
80 strcpy(drvinfo->fw_version, "no information"); 80 strcpy(drvinfo->fw_version, "no information");
81 strncpy(drvinfo->bus_info, pci_name(card->pdev), 32); 81 strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
82} 82}
@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
158 struct spider_net_card *card = netdev->priv; 158 struct spider_net_card *card = netdev->priv;
159 159
160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; 160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
161 ering->tx_pending = card->tx_desc; 161 ering->tx_pending = card->num_tx_desc;
162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; 162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
163 ering->rx_pending = card->rx_desc; 163 ering->rx_pending = card->num_rx_desc;
164} 164}
165 165
166static int spider_net_get_stats_count(struct net_device *netdev) 166static int spider_net_get_stats_count(struct net_device *netdev)
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 2cfd9634895a..f6b3a94e97bf 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1730,7 +1730,7 @@ static void __init de21040_get_media_info(struct de_private *de)
1730} 1730}
1731 1731
1732/* Note: this routine returns extra data bits for size detection. */ 1732/* Note: this routine returns extra data bits for size detection. */
1733static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len) 1733static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1734{ 1734{
1735 int i; 1735 int i;
1736 unsigned retval = 0; 1736 unsigned retval = 0;
@@ -1926,7 +1926,7 @@ bad_srom:
1926 goto fill_defaults; 1926 goto fill_defaults;
1927} 1927}
1928 1928
1929static int __init de_init_one (struct pci_dev *pdev, 1929static int __devinit de_init_one (struct pci_dev *pdev,
1930 const struct pci_device_id *ent) 1930 const struct pci_device_id *ent)
1931{ 1931{
1932 struct net_device *dev; 1932 struct net_device *dev;
@@ -2082,7 +2082,7 @@ err_out_free:
2082 return rc; 2082 return rc;
2083} 2083}
2084 2084
2085static void __exit de_remove_one (struct pci_dev *pdev) 2085static void __devexit de_remove_one (struct pci_dev *pdev)
2086{ 2086{
2087 struct net_device *dev = pci_get_drvdata(pdev); 2087 struct net_device *dev = pci_get_drvdata(pdev);
2088 struct de_private *de = dev->priv; 2088 struct de_private *de = dev->priv;
@@ -2164,7 +2164,7 @@ static struct pci_driver de_driver = {
2164 .name = DRV_NAME, 2164 .name = DRV_NAME,
2165 .id_table = de_pci_tbl, 2165 .id_table = de_pci_tbl,
2166 .probe = de_init_one, 2166 .probe = de_init_one,
2167 .remove = __exit_p(de_remove_one), 2167 .remove = __devexit_p(de_remove_one),
2168#ifdef CONFIG_PM 2168#ifdef CONFIG_PM
2169 .suspend = de_suspend, 2169 .suspend = de_suspend,
2170 .resume = de_resume, 2170 .resume = de_resume,