aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-05 15:04:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-05 15:04:02 -0500
commit70d9d825e0a5a78ec1dacaaaf5c72ff5b0206fab (patch)
tree34f0675602943161c3dc1340d6c8c449283b681c
parent537a95d9351f41cc3c24ddb2a646aedd6debb21b (diff)
parentf896424cbc61225e8f029fe23e5aae3e32103229 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
-rw-r--r--Documentation/networking/s2io.txt199
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/fec_8xx/Kconfig8
-rw-r--r--drivers/net/fec_8xx/fec_mii.c42
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c21
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h22
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c20
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h5
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.c12
-rw-r--r--drivers/net/pcnet32.c87
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/s2io.c762
-rw-r--r--drivers/net/s2io.h91
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--include/linux/phy.h3
16 files changed, 792 insertions, 507 deletions
diff --git a/Documentation/networking/s2io.txt b/Documentation/networking/s2io.txt
index 6726b524ec45..bd528ffbeb4b 100644
--- a/Documentation/networking/s2io.txt
+++ b/Documentation/networking/s2io.txt
@@ -1,48 +1,153 @@
1S2IO Technologies XFrame 10 Gig adapter. 1Release notes for Neterion's (Formerly S2io) Xframe I/II PCI-X 10GbE driver.
2------------------------------------------- 2
3 3Contents
4I. Module loadable parameters. 4=======
5When loaded as a module, the driver provides a host of Module loadable 5- 1. Introduction
6parameters, so the device can be tuned as per the users needs. 6- 2. Identifying the adapter/interface
7A list of the Module params is given below. 7- 3. Features supported
8(i) ring_num: This can be used to program the number of 8- 4. Command line parameters
9 receive rings used in the driver. 9- 5. Performance suggestions
10(ii) ring_len: This defines the number of descriptors each ring 10- 6. Available Downloads
11 can have. There can be a maximum of 8 rings. 11
12(iii) frame_len: This is an array of size 8. Using this we can 12
13 set the maximum size of the received frame that can 131. Introduction:
14 be steered into the corrsponding receive ring. 14This Linux driver supports Neterion's Xframe I PCI-X 1.0 and
15(iv) fifo_num: This defines the number of Tx FIFOs thats used in 15Xframe II PCI-X 2.0 adapters. It supports several features
16 the driver. 16such as jumbo frames, MSI/MSI-X, checksum offloads, TSO, UFO and so on.
17(v) fifo_len: Each element defines the number of 17See below for complete list of features.
18 Tx descriptors that can be associated with each 18All features are supported for both IPv4 and IPv6.
19 corresponding FIFO. There are a maximum of 8 FIFOs. 19
20(vi) tx_prio: This is a bool, if module is loaded with a non-zero 202. Identifying the adapter/interface:
21 value for tx_prio multi FIFO scheme is activated. 21a. Insert the adapter(s) in your system.
22(vii) rx_prio: This is a bool, if module is loaded with a non-zero 22b. Build and load driver
23 value for tx_prio multi RING scheme is activated. 23# insmod s2io.ko
24(viii) latency_timer: The value given against this param will be 24c. View log messages
25 loaded into the latency timer register in PCI Config 25# dmesg | tail -40
26 space, else the register is left with its reset value. 26You will see messages similar to:
27 27eth3: Neterion Xframe I 10GbE adapter (rev 3), Version 2.0.9.1, Intr type INTA
28II. Performance tuning. 28eth4: Neterion Xframe II 10GbE adapter (rev 2), Version 2.0.9.1, Intr type INTA
29 By changing a few sysctl parameters. 29eth4: Device is on 64 bit 133MHz PCIX(M1) bus
30 Copy the following lines into a file and run the following command, 30
31 "sysctl -p <file_name>" 31The above messages identify the adapter type(Xframe I/II), adapter revision,
32### IPV4 specific settings 32driver version, interface name(eth3, eth4), Interrupt type(INTA, MSI, MSI-X).
33net.ipv4.tcp_timestamps = 0 # turns TCP timestamp support off, default 1, reduces CPU use 33In case of Xframe II, the PCI/PCI-X bus width and frequency are displayed
34net.ipv4.tcp_sack = 0 # turn SACK support off, default on 34as well.
35# on systems with a VERY fast bus -> memory interface this is the big gainer 35
36net.ipv4.tcp_rmem = 10000000 10000000 10000000 # sets min/default/max TCP read buffer, default 4096 87380 174760 36To associate an interface with a physical adapter use "ethtool -p <ethX>".
37net.ipv4.tcp_wmem = 10000000 10000000 10000000 # sets min/pressure/max TCP write buffer, default 4096 16384 131072 37The corresponding adapter's LED will blink multiple times.
38net.ipv4.tcp_mem = 10000000 10000000 10000000 # sets min/pressure/max TCP buffer space, default 31744 32256 32768 38
39 393. Features supported:
40### CORE settings (mostly for socket and UDP effect) 40a. Jumbo frames. Xframe I/II supports MTU upto 9600 bytes,
41net.core.rmem_max = 524287 # maximum receive socket buffer size, default 131071 41modifiable using ifconfig command.
42net.core.wmem_max = 524287 # maximum send socket buffer size, default 131071 42
43net.core.rmem_default = 524287 # default receive socket buffer size, default 65535 43b. Offloads. Supports checksum offload(TCP/UDP/IP) on transmit
44net.core.wmem_default = 524287 # default send socket buffer size, default 65535 44and receive, TSO.
45net.core.optmem_max = 524287 # maximum amount of option memory buffers, default 10240 45
46net.core.netdev_max_backlog = 300000 # number of unprocessed input packets before kernel starts dropping them, default 300 46c. Multi-buffer receive mode. Scattering of packet across multiple
47---End of performance tuning file--- 47buffers. Currently driver supports 2-buffer mode which yields
48significant performance improvement on certain platforms(SGI Altix,
49IBM xSeries).
50
51d. MSI/MSI-X. Can be enabled on platforms which support this feature
52(IA64, Xeon) resulting in noticeable performance improvement(upto 7%
53on certain platforms).
54
55e. NAPI. Compile-time option(CONFIG_S2IO_NAPI) for better Rx interrupt
56moderation.
57
58f. Statistics. Comprehensive MAC-level and software statistics displayed
59using "ethtool -S" option.
60
61g. Multi-FIFO/Ring. Supports up to 8 transmit queues and receive rings,
62with multiple steering options.
63
644. Command line parameters
65a. tx_fifo_num
66Number of transmit queues
67Valid range: 1-8
68Default: 1
69
70b. rx_ring_num
71Number of receive rings
72Valid range: 1-8
73Default: 1
74
75c. tx_fifo_len
76Size of each transmit queue
77Valid range: Total length of all queues should not exceed 8192
78Default: 4096
79
80d. rx_ring_sz
81Size of each receive ring(in 4K blocks)
82Valid range: Limited by memory on system
83Default: 30
84
85e. intr_type
86Specifies interrupt type. Possible values 1(INTA), 2(MSI), 3(MSI-X)
87Valid range: 1-3
88Default: 1
89
905. Performance suggestions
91General:
92a. Set MTU to maximum(9000 for switch setup, 9600 in back-to-back configuration)
93b. Set TCP windows size to optimal value.
94For instance, for MTU=1500 a value of 210K has been observed to result in
95good performance.
96# sysctl -w net.ipv4.tcp_rmem="210000 210000 210000"
97# sysctl -w net.ipv4.tcp_wmem="210000 210000 210000"
98For MTU=9000, TCP window size of 10 MB is recommended.
99# sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
100# sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
101
102Transmit performance:
103a. By default, the driver respects BIOS settings for PCI bus parameters.
104However, you may want to experiment with PCI bus parameters
105max-split-transactions(MOST) and MMRBC (use setpci command).
106A MOST value of 2 has been found optimal for Opterons and 3 for Itanium.
107It could be different for your hardware.
108Set MMRBC to 4K**.
109
110For example you can set
111For opteron
112#setpci -d 17d5:* 62=1d
113For Itanium
114#setpci -d 17d5:* 62=3d
115
116For detailed description of the PCI registers, please see Xframe User Guide.
117
118b. Ensure Transmit Checksum offload is enabled. Use ethtool to set/verify this
119parameter.
120c. Turn on TSO(using "ethtool -K")
121# ethtool -K <ethX> tso on
122
123Receive performance:
124a. By default, the driver respects BIOS settings for PCI bus parameters.
125However, you may want to set PCI latency timer to 248.
126#setpci -d 17d5:* LATENCY_TIMER=f8
127For detailed description of the PCI registers, please see Xframe User Guide.
128b. Use 2-buffer mode. This results in large performance boost on
129on certain platforms(eg. SGI Altix, IBM xSeries).
130c. Ensure Receive Checksum offload is enabled. Use "ethtool -K ethX" command to
131set/verify this option.
132d. Enable NAPI feature(in kernel configuration Device Drivers ---> Network
133device support ---> Ethernet (10000 Mbit) ---> S2IO 10Gbe Xframe NIC) to
134bring down CPU utilization.
135
136** For AMD opteron platforms with 8131 chipset, MMRBC=1 and MOST=1 are
137recommended as safe parameters.
138For more information, please review the AMD8131 errata at
139http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/26310.pdf
140
1416. Available Downloads
142Neterion "s2io" driver in Red Hat and Suse 2.6-based distributions is kept up
143to date, also the latest "s2io" code (including support for 2.4 kernels) is
144available via "Support" link on the Neterion site: http://www.neterion.com.
145
146For Xframe User Guide (Programming manual), visit ftp site ns1.s2io.com,
147user: linuxdocs password: HALdocs
148
1497. Support
150For further support please contact either your 10GbE Xframe NIC vendor (IBM,
151HP, SGI etc.) or click on the "Support" link on the Neterion site:
152http://www.neterion.com.
48 153
diff --git a/MAINTAINERS b/MAINTAINERS
index 983f9e9aed61..f08a1434b217 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -910,6 +910,15 @@ L: linux-fbdev-devel@lists.sourceforge.net
910W: http://linux-fbdev.sourceforge.net/ 910W: http://linux-fbdev.sourceforge.net/
911S: Maintained 911S: Maintained
912 912
913FREESCALE SOC FS_ENET DRIVER
914P: Pantelis Antoniou
915M: pantelis.antoniou@gmail.com
916P: Vitaly Bordug
917M: vbordug@ru.mvista.com
918L: linuxppc-embedded@ozlabs.org
919L: netdev@vger.kernel.org
920S: Maintained
921
913FILE LOCKING (flock() and fcntl()/lockf()) 922FILE LOCKING (flock() and fcntl()/lockf())
914P: Matthew Wilcox 923P: Matthew Wilcox
915M: matthew@wil.cx 924M: matthew@wil.cx
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 57edae4790e8..1958d9e16a3a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1203,7 +1203,7 @@ config IBM_EMAC_RX_SKB_HEADROOM
1203 1203
1204config IBM_EMAC_PHY_RX_CLK_FIX 1204config IBM_EMAC_PHY_RX_CLK_FIX
1205 bool "PHY Rx clock workaround" 1205 bool "PHY Rx clock workaround"
1206 depends on IBM_EMAC && (405EP || 440GX || 440EP) 1206 depends on IBM_EMAC && (405EP || 440GX || 440EP || 440GR)
1207 help 1207 help
1208 Enable this if EMAC attached to a PHY which doesn't generate 1208 Enable this if EMAC attached to a PHY which doesn't generate
1209 RX clock if there is no link, if this is the case, you will 1209 RX clock if there is no link, if this is the case, you will
@@ -2258,17 +2258,6 @@ config S2IO_NAPI
2258 2258
2259 If in doubt, say N. 2259 If in doubt, say N.
2260 2260
2261config 2BUFF_MODE
2262 bool "Use 2 Buffer Mode on Rx side."
2263 depends on S2IO
2264 ---help---
2265 On enabling the 2 buffer mode, the received frame will be
2266 split into 2 parts before being DMA'ed to the hosts memory.
2267 The parts are the ethernet header and ethernet payload.
2268 This is useful on systems where DMA'ing to to unaligned
2269 physical memory loactions comes with a heavy price.
2270 If not sure please say N.
2271
2272endmenu 2261endmenu
2273 2262
2274if !UML 2263if !UML
diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig
index db36ac3ea453..4560026ed419 100644
--- a/drivers/net/fec_8xx/Kconfig
+++ b/drivers/net/fec_8xx/Kconfig
@@ -1,6 +1,6 @@
1config FEC_8XX 1config FEC_8XX
2 tristate "Motorola 8xx FEC driver" 2 tristate "Motorola 8xx FEC driver"
3 depends on NET_ETHERNET && 8xx && (NETTA || NETPHONE) 3 depends on NET_ETHERNET
4 select MII 4 select MII
5 5
6config FEC_8XX_GENERIC_PHY 6config FEC_8XX_GENERIC_PHY
@@ -12,3 +12,9 @@ config FEC_8XX_DM9161_PHY
12 bool "Support DM9161 PHY" 12 bool "Support DM9161 PHY"
13 depends on FEC_8XX 13 depends on FEC_8XX
14 default n 14 default n
15
16config FEC_8XX_LXT971_PHY
17 bool "Support LXT971/LXT972 PHY"
18 depends on FEC_8XX
19 default n
20
diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c
index 803eb095cf8e..3b44ac1a7bfe 100644
--- a/drivers/net/fec_8xx/fec_mii.c
+++ b/drivers/net/fec_8xx/fec_mii.c
@@ -203,6 +203,39 @@ static void dm9161_shutdown(struct net_device *dev)
203 203
204#endif 204#endif
205 205
206#ifdef CONFIG_FEC_8XX_LXT971_PHY
207
208/* Support for LXT971/972 PHY */
209
210#define MII_LXT971_PCR 16 /* Port Control Register */
211#define MII_LXT971_SR2 17 /* Status Register 2 */
212#define MII_LXT971_IER 18 /* Interrupt Enable Register */
213#define MII_LXT971_ISR 19 /* Interrupt Status Register */
214#define MII_LXT971_LCR 20 /* LED Control Register */
215#define MII_LXT971_TCR 30 /* Transmit Control Register */
216
217static void lxt971_startup(struct net_device *dev)
218{
219 struct fec_enet_private *fep = netdev_priv(dev);
220
221 fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x00F2);
222}
223
224static void lxt971_ack_int(struct net_device *dev)
225{
226 struct fec_enet_private *fep = netdev_priv(dev);
227
228 fec_mii_read(dev, fep->mii_if.phy_id, MII_LXT971_ISR);
229}
230
231static void lxt971_shutdown(struct net_device *dev)
232{
233 struct fec_enet_private *fep = netdev_priv(dev);
234
235 fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x0000);
236}
237#endif
238
206/**********************************************************************************/ 239/**********************************************************************************/
207 240
208static const struct phy_info phy_info[] = { 241static const struct phy_info phy_info[] = {
@@ -215,6 +248,15 @@ static const struct phy_info phy_info[] = {
215 .shutdown = dm9161_shutdown, 248 .shutdown = dm9161_shutdown,
216 }, 249 },
217#endif 250#endif
251#ifdef CONFIG_FEC_8XX_LXT971_PHY
252 {
253 .id = 0x0001378e,
254 .name = "LXT971/972",
255 .startup = lxt971_startup,
256 .ack_int = lxt971_ack_int,
257 .shutdown = lxt971_shutdown,
258 },
259#endif
218#ifdef CONFIG_FEC_8XX_GENERIC_PHY 260#ifdef CONFIG_FEC_8XX_GENERIC_PHY
219 { 261 {
220 .id = 0, 262 .id = 0,
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 44fac7373289..9342d5bc7bb4 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -130,7 +130,7 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
130 130
131 skb = fep->rx_skbuff[curidx]; 131 skb = fep->rx_skbuff[curidx];
132 132
133 dma_unmap_single(fep->dev, skb->data, 133 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
134 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 134 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
135 DMA_FROM_DEVICE); 135 DMA_FROM_DEVICE);
136 136
@@ -144,7 +144,7 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
144 144
145 skb = fep->rx_skbuff[curidx]; 145 skb = fep->rx_skbuff[curidx];
146 146
147 dma_unmap_single(fep->dev, skb->data, 147 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149 DMA_FROM_DEVICE); 149 DMA_FROM_DEVICE);
150 150
@@ -268,7 +268,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
268 268
269 skb = fep->rx_skbuff[curidx]; 269 skb = fep->rx_skbuff[curidx];
270 270
271 dma_unmap_single(fep->dev, skb->data, 271 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
272 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 272 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
273 DMA_FROM_DEVICE); 273 DMA_FROM_DEVICE);
274 274
@@ -278,7 +278,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
278 278
279 skb = fep->rx_skbuff[curidx]; 279 skb = fep->rx_skbuff[curidx];
280 280
281 dma_unmap_single(fep->dev, skb->data, 281 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
282 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 282 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
283 DMA_FROM_DEVICE); 283 DMA_FROM_DEVICE);
284 284
@@ -399,7 +399,8 @@ static void fs_enet_tx(struct net_device *dev)
399 fep->stats.collisions++; 399 fep->stats.collisions++;
400 400
401 /* unmap */ 401 /* unmap */
402 dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE); 402 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
403 skb->len, DMA_TO_DEVICE);
403 404
404 /* 405 /*
405 * Free the sk buffer associated with this last transmit. 406 * Free the sk buffer associated with this last transmit.
@@ -547,17 +548,19 @@ void fs_cleanup_bds(struct net_device *dev)
547{ 548{
548 struct fs_enet_private *fep = netdev_priv(dev); 549 struct fs_enet_private *fep = netdev_priv(dev);
549 struct sk_buff *skb; 550 struct sk_buff *skb;
551 cbd_t *bdp;
550 int i; 552 int i;
551 553
552 /* 554 /*
553 * Reset SKB transmit buffers. 555 * Reset SKB transmit buffers.
554 */ 556 */
555 for (i = 0; i < fep->tx_ring; i++) { 557 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
556 if ((skb = fep->tx_skbuff[i]) == NULL) 558 if ((skb = fep->tx_skbuff[i]) == NULL)
557 continue; 559 continue;
558 560
559 /* unmap */ 561 /* unmap */
560 dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE); 562 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
563 skb->len, DMA_TO_DEVICE);
561 564
562 fep->tx_skbuff[i] = NULL; 565 fep->tx_skbuff[i] = NULL;
563 dev_kfree_skb(skb); 566 dev_kfree_skb(skb);
@@ -566,12 +569,12 @@ void fs_cleanup_bds(struct net_device *dev)
566 /* 569 /*
567 * Reset SKB receive buffers 570 * Reset SKB receive buffers
568 */ 571 */
569 for (i = 0; i < fep->rx_ring; i++) { 572 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
570 if ((skb = fep->rx_skbuff[i]) == NULL) 573 if ((skb = fep->rx_skbuff[i]) == NULL)
571 continue; 574 continue;
572 575
573 /* unmap */ 576 /* unmap */
574 dma_unmap_single(fep->dev, skb->data, 577 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
575 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 578 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
576 DMA_FROM_DEVICE); 579 DMA_FROM_DEVICE);
577 580
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
index 28c476f28c20..644edbff4f94 100644
--- a/drivers/net/ibm_emac/ibm_emac.h
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -26,7 +26,8 @@
26/* This is a simple check to prevent use of this driver on non-tested SoCs */ 26/* This is a simple check to prevent use of this driver on non-tested SoCs */
27#if !defined(CONFIG_405GP) && !defined(CONFIG_405GPR) && !defined(CONFIG_405EP) && \ 27#if !defined(CONFIG_405GP) && !defined(CONFIG_405GPR) && !defined(CONFIG_405EP) && \
28 !defined(CONFIG_440GP) && !defined(CONFIG_440GX) && !defined(CONFIG_440SP) && \ 28 !defined(CONFIG_440GP) && !defined(CONFIG_440GX) && !defined(CONFIG_440SP) && \
29 !defined(CONFIG_440EP) && !defined(CONFIG_NP405H) 29 !defined(CONFIG_440EP) && !defined(CONFIG_NP405H) && !defined(CONFIG_440SPE) && \
30 !defined(CONFIG_440GR)
30#error "Unknown SoC. Please, check chip user manual and make sure EMAC defines are OK" 31#error "Unknown SoC. Please, check chip user manual and make sure EMAC defines are OK"
31#endif 32#endif
32 33
@@ -246,6 +247,25 @@ struct emac_regs {
246#define EMAC_STACR_PCDA_SHIFT 5 247#define EMAC_STACR_PCDA_SHIFT 5
247#define EMAC_STACR_PRA_MASK 0x1f 248#define EMAC_STACR_PRA_MASK 0x1f
248 249
250/*
251 * For the 440SPe, AMCC inexplicably changed the polarity of
252 * the "operation complete" bit in the MII control register.
253 */
254#if defined(CONFIG_440SPE)
255static inline int emac_phy_done(u32 stacr)
256{
257 return !(stacr & EMAC_STACR_OC);
258};
259#define EMAC_STACR_START EMAC_STACR_OC
260
261#else /* CONFIG_440SPE */
262static inline int emac_phy_done(u32 stacr)
263{
264 return stacr & EMAC_STACR_OC;
265};
266#define EMAC_STACR_START 0
267#endif /* !CONFIG_440SPE */
268
249/* EMACx_TRTR */ 269/* EMACx_TRTR */
250#if !defined(CONFIG_IBM_EMAC4) 270#if !defined(CONFIG_IBM_EMAC4)
251#define EMAC_TRTR_SHIFT 27 271#define EMAC_TRTR_SHIFT 27
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 943fbd1546ff..eb7d69478715 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -87,10 +87,11 @@ MODULE_LICENSE("GPL");
87 */ 87 */
88static u32 busy_phy_map; 88static u32 busy_phy_map;
89 89
90#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP)) 90#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
91/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us 92/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
92 * with PHY RX clock problem. 93 * with PHY RX clock problem.
93 * 440EP has more sane SDR0_MFR register implementation than 440GX, which 94 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
94 * also allows controlling each EMAC clock 95 * also allows controlling each EMAC clock
95 */ 96 */
96static inline void EMAC_RX_CLK_TX(int idx) 97static inline void EMAC_RX_CLK_TX(int idx)
@@ -100,7 +101,7 @@ static inline void EMAC_RX_CLK_TX(int idx)
100 101
101#if defined(CONFIG_405EP) 102#if defined(CONFIG_405EP)
102 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx)); 103 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
103#else /* CONFIG_440EP */ 104#else /* CONFIG_440EP || CONFIG_440GR */
104 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx)); 105 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
105#endif 106#endif
106 107
@@ -546,7 +547,7 @@ static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
546 547
547 /* Wait for management interface to become idle */ 548 /* Wait for management interface to become idle */
548 n = 10; 549 n = 10;
549 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) { 550 while (!emac_phy_done(in_be32(&p->stacr))) {
550 udelay(1); 551 udelay(1);
551 if (!--n) 552 if (!--n)
552 goto to; 553 goto to;
@@ -556,11 +557,12 @@ static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
556 out_be32(&p->stacr, 557 out_be32(&p->stacr,
557 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ | 558 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
558 (reg & EMAC_STACR_PRA_MASK) 559 (reg & EMAC_STACR_PRA_MASK)
559 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)); 560 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
561 | EMAC_STACR_START);
560 562
561 /* Wait for read to complete */ 563 /* Wait for read to complete */
562 n = 100; 564 n = 100;
563 while (!((r = in_be32(&p->stacr)) & EMAC_STACR_OC)) { 565 while (!emac_phy_done(r = in_be32(&p->stacr))) {
564 udelay(1); 566 udelay(1);
565 if (!--n) 567 if (!--n)
566 goto to; 568 goto to;
@@ -594,7 +596,7 @@ static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
594 596
595 /* Wait for management interface to be idle */ 597 /* Wait for management interface to be idle */
596 n = 10; 598 n = 10;
597 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) { 599 while (!emac_phy_done(in_be32(&p->stacr))) {
598 udelay(1); 600 udelay(1);
599 if (!--n) 601 if (!--n)
600 goto to; 602 goto to;
@@ -605,11 +607,11 @@ static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
605 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE | 607 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
606 (reg & EMAC_STACR_PRA_MASK) | 608 (reg & EMAC_STACR_PRA_MASK) |
607 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) | 609 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
608 (val << EMAC_STACR_PHYD_SHIFT)); 610 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
609 611
610 /* Wait for write to complete */ 612 /* Wait for write to complete */
611 n = 100; 613 n = 100;
612 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) { 614 while (!emac_phy_done(in_be32(&p->stacr))) {
613 udelay(1); 615 udelay(1);
614 if (!--n) 616 if (!--n)
615 goto to; 617 goto to;
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index 15b0bdae26ac..2a2d3b24b037 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -32,9 +32,10 @@
32 * reflect the fact that 40x and 44x have slightly different MALs. --ebs 32 * reflect the fact that 40x and 44x have slightly different MALs. --ebs
33 */ 33 */
34#if defined(CONFIG_405GP) || defined(CONFIG_405GPR) || defined(CONFIG_405EP) || \ 34#if defined(CONFIG_405GP) || defined(CONFIG_405GPR) || defined(CONFIG_405EP) || \
35 defined(CONFIG_440EP) || defined(CONFIG_NP405H) 35 defined(CONFIG_440EP) || defined(CONFIG_440GR) || defined(CONFIG_NP405H)
36#define MAL_VERSION 1 36#define MAL_VERSION 1
37#elif defined(CONFIG_440GP) || defined(CONFIG_440GX) || defined(CONFIG_440SP) 37#elif defined(CONFIG_440GP) || defined(CONFIG_440GX) || defined(CONFIG_440SP) || \
38 defined(CONFIG_440SPE)
38#define MAL_VERSION 2 39#define MAL_VERSION 2
39#else 40#else
40#error "Unknown SoC, please check chip manual and choose MAL 'version'" 41#error "Unknown SoC, please check chip manual and choose MAL 'version'"
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
index a27e49cfe43b..67935dd33a65 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.c
+++ b/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -236,12 +236,16 @@ static struct mii_phy_def genmii_phy_def = {
236}; 236};
237 237
238/* CIS8201 */ 238/* CIS8201 */
239#define MII_CIS8201_10BTCSR 0x16
240#define TENBTCSR_ECHO_DISABLE 0x2000
239#define MII_CIS8201_EPCR 0x17 241#define MII_CIS8201_EPCR 0x17
240#define EPCR_MODE_MASK 0x3000 242#define EPCR_MODE_MASK 0x3000
241#define EPCR_GMII_MODE 0x0000 243#define EPCR_GMII_MODE 0x0000
242#define EPCR_RGMII_MODE 0x1000 244#define EPCR_RGMII_MODE 0x1000
243#define EPCR_TBI_MODE 0x2000 245#define EPCR_TBI_MODE 0x2000
244#define EPCR_RTBI_MODE 0x3000 246#define EPCR_RTBI_MODE 0x3000
247#define MII_CIS8201_ACSR 0x1c
248#define ACSR_PIN_PRIO_SELECT 0x0004
245 249
246static int cis8201_init(struct mii_phy *phy) 250static int cis8201_init(struct mii_phy *phy)
247{ 251{
@@ -269,6 +273,14 @@ static int cis8201_init(struct mii_phy *phy)
269 } 273 }
270 274
271 phy_write(phy, MII_CIS8201_EPCR, epcr); 275 phy_write(phy, MII_CIS8201_EPCR, epcr);
276
277 /* MII regs override strap pins */
278 phy_write(phy, MII_CIS8201_ACSR,
279 phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT);
280
281 /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */
282 phy_write(phy, MII_CIS8201_10BTCSR,
283 phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE);
272 284
273 return 0; 285 return 0;
274} 286}
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 70fe81a89df9..be319229f543 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.31a" 25#define DRV_VERSION "1.31c"
26#define DRV_RELDATE "12.Sep.2005" 26#define DRV_RELDATE "01.Nov.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char *version =
@@ -260,6 +260,11 @@ static int homepna[MAX_UNITS];
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam(). 260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4 261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
262 * to allow loopback test to work unchanged. 262 * to allow loopback test to work unchanged.
263 * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device
264 * if allocation fails
265 * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only.
266 * Force 100Mbit FD if Auto (ASEL) is selected.
267 * See Bugzilla 2669 and 4551.
263 */ 268 */
264 269
265 270
@@ -408,7 +413,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
408static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 413static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
409 void *ptr); 414 void *ptr);
410static void pcnet32_purge_tx_ring(struct net_device *dev); 415static void pcnet32_purge_tx_ring(struct net_device *dev);
411static int pcnet32_alloc_ring(struct net_device *dev); 416static int pcnet32_alloc_ring(struct net_device *dev, char *name);
412static void pcnet32_free_ring(struct net_device *dev); 417static void pcnet32_free_ring(struct net_device *dev);
413 418
414 419
@@ -669,15 +674,17 @@ static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringpara
669 lp->rx_mod_mask = lp->rx_ring_size - 1; 674 lp->rx_mod_mask = lp->rx_ring_size - 1;
670 lp->rx_len_bits = (i << 4); 675 lp->rx_len_bits = (i << 4);
671 676
672 if (pcnet32_alloc_ring(dev)) { 677 if (pcnet32_alloc_ring(dev, dev->name)) {
673 pcnet32_free_ring(dev); 678 pcnet32_free_ring(dev);
679 spin_unlock_irqrestore(&lp->lock, flags);
674 return -ENOMEM; 680 return -ENOMEM;
675 } 681 }
676 682
677 spin_unlock_irqrestore(&lp->lock, flags); 683 spin_unlock_irqrestore(&lp->lock, flags);
678 684
679 if (pcnet32_debug & NETIF_MSG_DRV) 685 if (pcnet32_debug & NETIF_MSG_DRV)
680 printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); 686 printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n",
687 dev->name, lp->rx_ring_size, lp->tx_ring_size);
681 688
682 if (netif_running(dev)) 689 if (netif_running(dev))
683 pcnet32_open(dev); 690 pcnet32_open(dev);
@@ -981,7 +988,11 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
981 *buff++ = a->read_csr(ioaddr, 114); 988 *buff++ = a->read_csr(ioaddr, 114);
982 989
983 /* read bus configuration registers */ 990 /* read bus configuration registers */
984 for (i=0; i<36; i++) { 991 for (i=0; i<30; i++) {
992 *buff++ = a->read_bcr(ioaddr, i);
993 }
994 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
995 for (i=31; i<36; i++) {
985 *buff++ = a->read_bcr(ioaddr, i); 996 *buff++ = a->read_bcr(ioaddr, i);
986 } 997 }
987 998
@@ -1340,7 +1351,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1340 } 1351 }
1341 lp->a = *a; 1352 lp->a = *a;
1342 1353
1343 if (pcnet32_alloc_ring(dev)) { 1354 /* prior to register_netdev, dev->name is not yet correct */
1355 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1344 ret = -ENOMEM; 1356 ret = -ENOMEM;
1345 goto err_free_ring; 1357 goto err_free_ring;
1346 } 1358 }
@@ -1448,48 +1460,63 @@ err_release_region:
1448} 1460}
1449 1461
1450 1462
1451static int pcnet32_alloc_ring(struct net_device *dev) 1463/* if any allocation fails, caller must also call pcnet32_free_ring */
1464static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1452{ 1465{
1453 struct pcnet32_private *lp = dev->priv; 1466 struct pcnet32_private *lp = dev->priv;
1454 1467
1455 if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, 1468 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1456 &lp->tx_ring_dma_addr)) == NULL) { 1469 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1470 &lp->tx_ring_dma_addr);
1471 if (lp->tx_ring == NULL) {
1457 if (pcnet32_debug & NETIF_MSG_DRV) 1472 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); 1473 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
1474 name);
1459 return -ENOMEM; 1475 return -ENOMEM;
1460 } 1476 }
1461 1477
1462 if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, 1478 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1463 &lp->rx_ring_dma_addr)) == NULL) { 1479 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1480 &lp->rx_ring_dma_addr);
1481 if (lp->rx_ring == NULL) {
1464 if (pcnet32_debug & NETIF_MSG_DRV) 1482 if (pcnet32_debug & NETIF_MSG_DRV)
1465 printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); 1483 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
1484 name);
1466 return -ENOMEM; 1485 return -ENOMEM;
1467 } 1486 }
1468 1487
1469 if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) { 1488 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1489 GFP_ATOMIC);
1490 if (!lp->tx_dma_addr) {
1470 if (pcnet32_debug & NETIF_MSG_DRV) 1491 if (pcnet32_debug & NETIF_MSG_DRV)
1471 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1492 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1472 return -ENOMEM; 1493 return -ENOMEM;
1473 } 1494 }
1474 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); 1495 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1475 1496
1476 if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) { 1497 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1498 GFP_ATOMIC);
1499 if (!lp->rx_dma_addr) {
1477 if (pcnet32_debug & NETIF_MSG_DRV) 1500 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1501 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1479 return -ENOMEM; 1502 return -ENOMEM;
1480 } 1503 }
1481 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); 1504 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1482 1505
1483 if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) { 1506 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1507 GFP_ATOMIC);
1508 if (!lp->tx_skbuff) {
1484 if (pcnet32_debug & NETIF_MSG_DRV) 1509 if (pcnet32_debug & NETIF_MSG_DRV)
1485 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1510 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1486 return -ENOMEM; 1511 return -ENOMEM;
1487 } 1512 }
1488 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); 1513 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1489 1514
1490 if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) { 1515 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1516 GFP_ATOMIC);
1517 if (!lp->rx_skbuff) {
1491 if (pcnet32_debug & NETIF_MSG_DRV) 1518 if (pcnet32_debug & NETIF_MSG_DRV)
1492 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1519 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1493 return -ENOMEM; 1520 return -ENOMEM;
1494 } 1521 }
1495 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); 1522 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
@@ -1592,12 +1619,18 @@ pcnet32_open(struct net_device *dev)
1592 val |= 0x10; 1619 val |= 0x10;
1593 lp->a.write_csr (ioaddr, 124, val); 1620 lp->a.write_csr (ioaddr, 124, val);
1594 1621
1595 /* Allied Telesyn AT 2700/2701 FX looses the link, so skip that */ 1622 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
1596 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 1623 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
1597 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 1624 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
1598 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 1625 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
1599 printk(KERN_DEBUG "%s: Skipping PHY selection.\n", dev->name); 1626 if (lp->options & PCNET32_PORT_ASEL) {
1600 } else { 1627 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
1628 if (netif_msg_link(lp))
1629 printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n",
1630 dev->name);
1631 }
1632 }
1633 {
1601 /* 1634 /*
1602 * 24 Jun 2004 according AMD, in order to change the PHY, 1635 * 24 Jun 2004 according AMD, in order to change the PHY,
1603 * DANAS (or DISPM for 79C976) must be set; then select the speed, 1636 * DANAS (or DISPM for 79C976) must be set; then select the speed,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index ad93b0da87f0..5eab9c42a111 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -61,6 +61,9 @@ int mdiobus_register(struct mii_bus *bus)
61 for (i = 0; i < PHY_MAX_ADDR; i++) { 61 for (i = 0; i < PHY_MAX_ADDR; i++) {
62 struct phy_device *phydev; 62 struct phy_device *phydev;
63 63
64 if (bus->phy_mask & (1 << i))
65 continue;
66
64 phydev = get_phy_device(bus, i); 67 phydev = get_phy_device(bus, i);
65 68
66 if (IS_ERR(phydev)) 69 if (IS_ERR(phydev))
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3f5e93aad5c7..9c4935407f26 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -30,6 +30,8 @@
30 * in the driver. 30 * in the driver.
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 37 * Tx descriptors that can be associated with each corresponding FIFO.
@@ -65,12 +67,15 @@
65#include "s2io.h" 67#include "s2io.h"
66#include "s2io-regs.h" 68#include "s2io-regs.h"
67 69
68#define DRV_VERSION "Version 2.0.9.1" 70#define DRV_VERSION "Version 2.0.9.3"
69 71
70/* S2io Driver name & version. */ 72/* S2io Driver name & version. */
71static char s2io_driver_name[] = "Neterion"; 73static char s2io_driver_name[] = "Neterion";
72static char s2io_driver_version[] = DRV_VERSION; 74static char s2io_driver_version[] = DRV_VERSION;
73 75
76int rxd_size[4] = {32,48,48,64};
77int rxd_count[4] = {127,85,85,63};
78
74static inline int RXD_IS_UP2DT(RxD_t *rxdp) 79static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75{ 80{
76 int ret; 81 int ret;
@@ -104,7 +109,7 @@ static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
104 mac_control = &sp->mac_control; 109 mac_control = &sp->mac_control;
105 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) { 110 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
106 level = LOW; 111 level = LOW;
107 if (rxb_size <= MAX_RXDS_PER_BLOCK) { 112 if (rxb_size <= rxd_count[sp->rxd_mode]) {
108 level = PANIC; 113 level = PANIC;
109 } 114 }
110 } 115 }
@@ -296,6 +301,7 @@ static unsigned int rx_ring_sz[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 301 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297static unsigned int rts_frm_len[MAX_RX_RINGS] = 302static unsigned int rts_frm_len[MAX_RX_RINGS] =
298 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 303 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
304static unsigned int rx_ring_mode = 1;
299static unsigned int use_continuous_tx_intrs = 1; 305static unsigned int use_continuous_tx_intrs = 1;
300static unsigned int rmac_pause_time = 65535; 306static unsigned int rmac_pause_time = 65535;
301static unsigned int mc_pause_threshold_q0q3 = 187; 307static unsigned int mc_pause_threshold_q0q3 = 187;
@@ -304,6 +310,7 @@ static unsigned int shared_splits;
304static unsigned int tmac_util_period = 5; 310static unsigned int tmac_util_period = 5;
305static unsigned int rmac_util_period = 5; 311static unsigned int rmac_util_period = 5;
306static unsigned int bimodal = 0; 312static unsigned int bimodal = 0;
313static unsigned int l3l4hdr_size = 128;
307#ifndef CONFIG_S2IO_NAPI 314#ifndef CONFIG_S2IO_NAPI
308static unsigned int indicate_max_pkts; 315static unsigned int indicate_max_pkts;
309#endif 316#endif
@@ -357,10 +364,8 @@ static int init_shared_mem(struct s2io_nic *nic)
357 int i, j, blk_cnt, rx_sz, tx_sz; 364 int i, j, blk_cnt, rx_sz, tx_sz;
358 int lst_size, lst_per_page; 365 int lst_size, lst_per_page;
359 struct net_device *dev = nic->dev; 366 struct net_device *dev = nic->dev;
360#ifdef CONFIG_2BUFF_MODE
361 unsigned long tmp; 367 unsigned long tmp;
362 buffAdd_t *ba; 368 buffAdd_t *ba;
363#endif
364 369
365 mac_info_t *mac_control; 370 mac_info_t *mac_control;
366 struct config_param *config; 371 struct config_param *config;
@@ -458,7 +463,8 @@ static int init_shared_mem(struct s2io_nic *nic)
458 /* Allocation and initialization of RXDs in Rings */ 463 /* Allocation and initialization of RXDs in Rings */
459 size = 0; 464 size = 0;
460 for (i = 0; i < config->rx_ring_num; i++) { 465 for (i = 0; i < config->rx_ring_num; i++) {
461 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) { 466 if (config->rx_cfg[i].num_rxd %
467 (rxd_count[nic->rxd_mode] + 1)) {
462 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); 468 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
463 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", 469 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
464 i); 470 i);
@@ -467,11 +473,15 @@ static int init_shared_mem(struct s2io_nic *nic)
467 } 473 }
468 size += config->rx_cfg[i].num_rxd; 474 size += config->rx_cfg[i].num_rxd;
469 mac_control->rings[i].block_count = 475 mac_control->rings[i].block_count =
470 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 476 config->rx_cfg[i].num_rxd /
471 mac_control->rings[i].pkt_cnt = 477 (rxd_count[nic->rxd_mode] + 1 );
472 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count; 478 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
479 mac_control->rings[i].block_count;
473 } 480 }
474 size = (size * (sizeof(RxD_t))); 481 if (nic->rxd_mode == RXD_MODE_1)
482 size = (size * (sizeof(RxD1_t)));
483 else
484 size = (size * (sizeof(RxD3_t)));
475 rx_sz = size; 485 rx_sz = size;
476 486
477 for (i = 0; i < config->rx_ring_num; i++) { 487 for (i = 0; i < config->rx_ring_num; i++) {
@@ -486,15 +496,15 @@ static int init_shared_mem(struct s2io_nic *nic)
486 mac_control->rings[i].nic = nic; 496 mac_control->rings[i].nic = nic;
487 mac_control->rings[i].ring_no = i; 497 mac_control->rings[i].ring_no = i;
488 498
489 blk_cnt = 499 blk_cnt = config->rx_cfg[i].num_rxd /
490 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 500 (rxd_count[nic->rxd_mode] + 1);
491 /* Allocating all the Rx blocks */ 501 /* Allocating all the Rx blocks */
492 for (j = 0; j < blk_cnt; j++) { 502 for (j = 0; j < blk_cnt; j++) {
493#ifndef CONFIG_2BUFF_MODE 503 rx_block_info_t *rx_blocks;
494 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t)); 504 int l;
495#else 505
496 size = SIZE_OF_BLOCK; 506 rx_blocks = &mac_control->rings[i].rx_blocks[j];
497#endif 507 size = SIZE_OF_BLOCK; //size is always page size
498 tmp_v_addr = pci_alloc_consistent(nic->pdev, size, 508 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
499 &tmp_p_addr); 509 &tmp_p_addr);
500 if (tmp_v_addr == NULL) { 510 if (tmp_v_addr == NULL) {
@@ -504,11 +514,24 @@ static int init_shared_mem(struct s2io_nic *nic)
504 * memory that was alloced till the 514 * memory that was alloced till the
505 * failure happened. 515 * failure happened.
506 */ 516 */
507 mac_control->rings[i].rx_blocks[j].block_virt_addr = 517 rx_blocks->block_virt_addr = tmp_v_addr;
508 tmp_v_addr;
509 return -ENOMEM; 518 return -ENOMEM;
510 } 519 }
511 memset(tmp_v_addr, 0, size); 520 memset(tmp_v_addr, 0, size);
521 rx_blocks->block_virt_addr = tmp_v_addr;
522 rx_blocks->block_dma_addr = tmp_p_addr;
523 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
524 rxd_count[nic->rxd_mode],
525 GFP_KERNEL);
526 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
527 rx_blocks->rxds[l].virt_addr =
528 rx_blocks->block_virt_addr +
529 (rxd_size[nic->rxd_mode] * l);
530 rx_blocks->rxds[l].dma_addr =
531 rx_blocks->block_dma_addr +
532 (rxd_size[nic->rxd_mode] * l);
533 }
534
512 mac_control->rings[i].rx_blocks[j].block_virt_addr = 535 mac_control->rings[i].rx_blocks[j].block_virt_addr =
513 tmp_v_addr; 536 tmp_v_addr;
514 mac_control->rings[i].rx_blocks[j].block_dma_addr = 537 mac_control->rings[i].rx_blocks[j].block_dma_addr =
@@ -528,62 +551,58 @@ static int init_shared_mem(struct s2io_nic *nic)
528 blk_cnt].block_dma_addr; 551 blk_cnt].block_dma_addr;
529 552
530 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 553 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
531 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
532 * marker.
533 */
534#ifndef CONFIG_2BUFF_MODE
535 pre_rxd_blk->reserved_2_pNext_RxD_block = 554 pre_rxd_blk->reserved_2_pNext_RxD_block =
536 (unsigned long) tmp_v_addr_next; 555 (unsigned long) tmp_v_addr_next;
537#endif
538 pre_rxd_blk->pNext_RxD_Blk_physical = 556 pre_rxd_blk->pNext_RxD_Blk_physical =
539 (u64) tmp_p_addr_next; 557 (u64) tmp_p_addr_next;
540 } 558 }
541 } 559 }
542 560 if (nic->rxd_mode >= RXD_MODE_3A) {
543#ifdef CONFIG_2BUFF_MODE 561 /*
544 /* 562 * Allocation of Storages for buffer addresses in 2BUFF mode
545 * Allocation of Storages for buffer addresses in 2BUFF mode 563 * and the buffers as well.
546 * and the buffers as well. 564 */
547 */ 565 for (i = 0; i < config->rx_ring_num; i++) {
548 for (i = 0; i < config->rx_ring_num; i++) { 566 blk_cnt = config->rx_cfg[i].num_rxd /
549 blk_cnt = 567 (rxd_count[nic->rxd_mode]+ 1);
550 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 568 mac_control->rings[i].ba =
551 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 569 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
552 GFP_KERNEL); 570 GFP_KERNEL);
553 if (!mac_control->rings[i].ba) 571 if (!mac_control->rings[i].ba)
554 return -ENOMEM;
555 for (j = 0; j < blk_cnt; j++) {
556 int k = 0;
557 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
558 (MAX_RXDS_PER_BLOCK + 1)),
559 GFP_KERNEL);
560 if (!mac_control->rings[i].ba[j])
561 return -ENOMEM; 572 return -ENOMEM;
562 while (k != MAX_RXDS_PER_BLOCK) { 573 for (j = 0; j < blk_cnt; j++) {
563 ba = &mac_control->rings[i].ba[j][k]; 574 int k = 0;
564 575 mac_control->rings[i].ba[j] =
565 ba->ba_0_org = (void *) kmalloc 576 kmalloc((sizeof(buffAdd_t) *
566 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 577 (rxd_count[nic->rxd_mode] + 1)),
567 if (!ba->ba_0_org) 578 GFP_KERNEL);
568 return -ENOMEM; 579 if (!mac_control->rings[i].ba[j])
569 tmp = (unsigned long) ba->ba_0_org;
570 tmp += ALIGN_SIZE;
571 tmp &= ~((unsigned long) ALIGN_SIZE);
572 ba->ba_0 = (void *) tmp;
573
574 ba->ba_1_org = (void *) kmalloc
575 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
576 if (!ba->ba_1_org)
577 return -ENOMEM; 580 return -ENOMEM;
578 tmp = (unsigned long) ba->ba_1_org; 581 while (k != rxd_count[nic->rxd_mode]) {
579 tmp += ALIGN_SIZE; 582 ba = &mac_control->rings[i].ba[j][k];
580 tmp &= ~((unsigned long) ALIGN_SIZE); 583
581 ba->ba_1 = (void *) tmp; 584 ba->ba_0_org = (void *) kmalloc
582 k++; 585 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
586 if (!ba->ba_0_org)
587 return -ENOMEM;
588 tmp = (unsigned long)ba->ba_0_org;
589 tmp += ALIGN_SIZE;
590 tmp &= ~((unsigned long) ALIGN_SIZE);
591 ba->ba_0 = (void *) tmp;
592
593 ba->ba_1_org = (void *) kmalloc
594 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
595 if (!ba->ba_1_org)
596 return -ENOMEM;
597 tmp = (unsigned long) ba->ba_1_org;
598 tmp += ALIGN_SIZE;
599 tmp &= ~((unsigned long) ALIGN_SIZE);
600 ba->ba_1 = (void *) tmp;
601 k++;
602 }
583 } 603 }
584 } 604 }
585 } 605 }
586#endif
587 606
588 /* Allocation and initialization of Statistics block */ 607 /* Allocation and initialization of Statistics block */
589 size = sizeof(StatInfo_t); 608 size = sizeof(StatInfo_t);
@@ -669,11 +688,7 @@ static void free_shared_mem(struct s2io_nic *nic)
669 kfree(mac_control->fifos[i].list_info); 688 kfree(mac_control->fifos[i].list_info);
670 } 689 }
671 690
672#ifndef CONFIG_2BUFF_MODE
673 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
674#else
675 size = SIZE_OF_BLOCK; 691 size = SIZE_OF_BLOCK;
676#endif
677 for (i = 0; i < config->rx_ring_num; i++) { 692 for (i = 0; i < config->rx_ring_num; i++) {
678 blk_cnt = mac_control->rings[i].block_count; 693 blk_cnt = mac_control->rings[i].block_count;
679 for (j = 0; j < blk_cnt; j++) { 694 for (j = 0; j < blk_cnt; j++) {
@@ -685,29 +700,31 @@ static void free_shared_mem(struct s2io_nic *nic)
685 break; 700 break;
686 pci_free_consistent(nic->pdev, size, 701 pci_free_consistent(nic->pdev, size,
687 tmp_v_addr, tmp_p_addr); 702 tmp_v_addr, tmp_p_addr);
703 kfree(mac_control->rings[i].rx_blocks[j].rxds);
688 } 704 }
689 } 705 }
690 706
691#ifdef CONFIG_2BUFF_MODE 707 if (nic->rxd_mode >= RXD_MODE_3A) {
692 /* Freeing buffer storage addresses in 2BUFF mode. */ 708 /* Freeing buffer storage addresses in 2BUFF mode. */
693 for (i = 0; i < config->rx_ring_num; i++) { 709 for (i = 0; i < config->rx_ring_num; i++) {
694 blk_cnt = 710 blk_cnt = config->rx_cfg[i].num_rxd /
695 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 711 (rxd_count[nic->rxd_mode] + 1);
696 for (j = 0; j < blk_cnt; j++) { 712 for (j = 0; j < blk_cnt; j++) {
697 int k = 0; 713 int k = 0;
698 if (!mac_control->rings[i].ba[j]) 714 if (!mac_control->rings[i].ba[j])
699 continue; 715 continue;
700 while (k != MAX_RXDS_PER_BLOCK) { 716 while (k != rxd_count[nic->rxd_mode]) {
701 buffAdd_t *ba = &mac_control->rings[i].ba[j][k]; 717 buffAdd_t *ba =
702 kfree(ba->ba_0_org); 718 &mac_control->rings[i].ba[j][k];
703 kfree(ba->ba_1_org); 719 kfree(ba->ba_0_org);
704 k++; 720 kfree(ba->ba_1_org);
721 k++;
722 }
723 kfree(mac_control->rings[i].ba[j]);
705 } 724 }
706 kfree(mac_control->rings[i].ba[j]); 725 kfree(mac_control->rings[i].ba);
707 } 726 }
708 kfree(mac_control->rings[i].ba);
709 } 727 }
710#endif
711 728
712 if (mac_control->stats_mem) { 729 if (mac_control->stats_mem) {
713 pci_free_consistent(nic->pdev, 730 pci_free_consistent(nic->pdev,
@@ -1894,20 +1911,19 @@ static int start_nic(struct s2io_nic *nic)
1894 val64 = readq(&bar0->prc_ctrl_n[i]); 1911 val64 = readq(&bar0->prc_ctrl_n[i]);
1895 if (nic->config.bimodal) 1912 if (nic->config.bimodal)
1896 val64 |= PRC_CTRL_BIMODAL_INTERRUPT; 1913 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1897#ifndef CONFIG_2BUFF_MODE 1914 if (nic->rxd_mode == RXD_MODE_1)
1898 val64 |= PRC_CTRL_RC_ENABLED; 1915 val64 |= PRC_CTRL_RC_ENABLED;
1899#else 1916 else
1900 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 1917 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1901#endif
1902 writeq(val64, &bar0->prc_ctrl_n[i]); 1918 writeq(val64, &bar0->prc_ctrl_n[i]);
1903 } 1919 }
1904 1920
1905#ifdef CONFIG_2BUFF_MODE 1921 if (nic->rxd_mode == RXD_MODE_3B) {
1906 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 1922 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1907 val64 = readq(&bar0->rx_pa_cfg); 1923 val64 = readq(&bar0->rx_pa_cfg);
1908 val64 |= RX_PA_CFG_IGNORE_L2_ERR; 1924 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1909 writeq(val64, &bar0->rx_pa_cfg); 1925 writeq(val64, &bar0->rx_pa_cfg);
1910#endif 1926 }
1911 1927
1912 /* 1928 /*
1913 * Enabling MC-RLDRAM. After enabling the device, we timeout 1929 * Enabling MC-RLDRAM. After enabling the device, we timeout
@@ -2090,6 +2106,41 @@ static void stop_nic(struct s2io_nic *nic)
2090 } 2106 }
2091} 2107}
2092 2108
2109int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2110{
2111 struct net_device *dev = nic->dev;
2112 struct sk_buff *frag_list;
2113 u64 tmp;
2114
2115 /* Buffer-1 receives L3/L4 headers */
2116 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2117 (nic->pdev, skb->data, l3l4hdr_size + 4,
2118 PCI_DMA_FROMDEVICE);
2119
2120 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2121 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2122 if (skb_shinfo(skb)->frag_list == NULL) {
2123 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2124 return -ENOMEM ;
2125 }
2126 frag_list = skb_shinfo(skb)->frag_list;
2127 frag_list->next = NULL;
2128 tmp = (u64) frag_list->data;
2129 tmp += ALIGN_SIZE;
2130 tmp &= ~ALIGN_SIZE;
2131 frag_list->data = (void *) tmp;
2132 frag_list->tail = (void *) tmp;
2133
2134 /* Buffer-2 receives L4 data payload */
2135 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2136 frag_list->data, dev->mtu,
2137 PCI_DMA_FROMDEVICE);
2138 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2139 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2140
2141 return SUCCESS;
2142}
2143
2093/** 2144/**
2094 * fill_rx_buffers - Allocates the Rx side skbs 2145 * fill_rx_buffers - Allocates the Rx side skbs
2095 * @nic: device private variable 2146 * @nic: device private variable
@@ -2117,18 +2168,12 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2117 struct sk_buff *skb; 2168 struct sk_buff *skb;
2118 RxD_t *rxdp; 2169 RxD_t *rxdp;
2119 int off, off1, size, block_no, block_no1; 2170 int off, off1, size, block_no, block_no1;
2120 int offset, offset1;
2121 u32 alloc_tab = 0; 2171 u32 alloc_tab = 0;
2122 u32 alloc_cnt; 2172 u32 alloc_cnt;
2123 mac_info_t *mac_control; 2173 mac_info_t *mac_control;
2124 struct config_param *config; 2174 struct config_param *config;
2125#ifdef CONFIG_2BUFF_MODE
2126 RxD_t *rxdpnext;
2127 int nextblk;
2128 u64 tmp; 2175 u64 tmp;
2129 buffAdd_t *ba; 2176 buffAdd_t *ba;
2130 dma_addr_t rxdpphys;
2131#endif
2132#ifndef CONFIG_S2IO_NAPI 2177#ifndef CONFIG_S2IO_NAPI
2133 unsigned long flags; 2178 unsigned long flags;
2134#endif 2179#endif
@@ -2138,8 +2183,6 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2138 config = &nic->config; 2183 config = &nic->config;
2139 alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2184 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2140 atomic_read(&nic->rx_bufs_left[ring_no]); 2185 atomic_read(&nic->rx_bufs_left[ring_no]);
2141 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2142 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2143 2186
2144 while (alloc_tab < alloc_cnt) { 2187 while (alloc_tab < alloc_cnt) {
2145 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2188 block_no = mac_control->rings[ring_no].rx_curr_put_info.
@@ -2148,159 +2191,145 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2148 block_index; 2191 block_index;
2149 off = mac_control->rings[ring_no].rx_curr_put_info.offset; 2192 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2150 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2193 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2151#ifndef CONFIG_2BUFF_MODE
2152 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2153 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2154#else
2155 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2156 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2157#endif
2158 2194
2159 rxdp = mac_control->rings[ring_no].rx_blocks[block_no]. 2195 rxdp = mac_control->rings[ring_no].
2160 block_virt_addr + off; 2196 rx_blocks[block_no].rxds[off].virt_addr;
2161 if ((offset == offset1) && (rxdp->Host_Control)) { 2197
2162 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2198 if ((block_no == block_no1) && (off == off1) &&
2199 (rxdp->Host_Control)) {
2200 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2201 dev->name);
2163 DBG_PRINT(INTR_DBG, " info equated\n"); 2202 DBG_PRINT(INTR_DBG, " info equated\n");
2164 goto end; 2203 goto end;
2165 } 2204 }
2166#ifndef CONFIG_2BUFF_MODE 2205 if (off && (off == rxd_count[nic->rxd_mode])) {
2167 if (rxdp->Control_1 == END_OF_BLOCK) {
2168 mac_control->rings[ring_no].rx_curr_put_info. 2206 mac_control->rings[ring_no].rx_curr_put_info.
2169 block_index++; 2207 block_index++;
2208 if (mac_control->rings[ring_no].rx_curr_put_info.
2209 block_index == mac_control->rings[ring_no].
2210 block_count)
2211 mac_control->rings[ring_no].rx_curr_put_info.
2212 block_index = 0;
2213 block_no = mac_control->rings[ring_no].
2214 rx_curr_put_info.block_index;
2215 if (off == rxd_count[nic->rxd_mode])
2216 off = 0;
2170 mac_control->rings[ring_no].rx_curr_put_info. 2217 mac_control->rings[ring_no].rx_curr_put_info.
2171 block_index %= mac_control->rings[ring_no].block_count; 2218 offset = off;
2172 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2219 rxdp = mac_control->rings[ring_no].
2173 block_index; 2220 rx_blocks[block_no].block_virt_addr;
2174 off++;
2175 off %= (MAX_RXDS_PER_BLOCK + 1);
2176 mac_control->rings[ring_no].rx_curr_put_info.offset =
2177 off;
2178 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2179 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2221 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2180 dev->name, rxdp); 2222 dev->name, rxdp);
2181 } 2223 }
2182#ifndef CONFIG_S2IO_NAPI 2224#ifndef CONFIG_S2IO_NAPI
2183 spin_lock_irqsave(&nic->put_lock, flags); 2225 spin_lock_irqsave(&nic->put_lock, flags);
2184 mac_control->rings[ring_no].put_pos = 2226 mac_control->rings[ring_no].put_pos =
2185 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2227 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2186 spin_unlock_irqrestore(&nic->put_lock, flags); 2228 spin_unlock_irqrestore(&nic->put_lock, flags);
2187#endif 2229#endif
2188#else 2230 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2189 if (rxdp->Host_Control == END_OF_BLOCK) { 2231 ((nic->rxd_mode >= RXD_MODE_3A) &&
2232 (rxdp->Control_2 & BIT(0)))) {
2190 mac_control->rings[ring_no].rx_curr_put_info. 2233 mac_control->rings[ring_no].rx_curr_put_info.
2191 block_index++; 2234 offset = off;
2192 mac_control->rings[ring_no].rx_curr_put_info.block_index
2193 %= mac_control->rings[ring_no].block_count;
2194 block_no = mac_control->rings[ring_no].rx_curr_put_info
2195 .block_index;
2196 off = 0;
2197 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2198 dev->name, block_no,
2199 (unsigned long long) rxdp->Control_1);
2200 mac_control->rings[ring_no].rx_curr_put_info.offset =
2201 off;
2202 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2203 block_virt_addr;
2204 }
2205#ifndef CONFIG_S2IO_NAPI
2206 spin_lock_irqsave(&nic->put_lock, flags);
2207 mac_control->rings[ring_no].put_pos = (block_no *
2208 (MAX_RXDS_PER_BLOCK + 1)) + off;
2209 spin_unlock_irqrestore(&nic->put_lock, flags);
2210#endif
2211#endif
2212
2213#ifndef CONFIG_2BUFF_MODE
2214 if (rxdp->Control_1 & RXD_OWN_XENA)
2215#else
2216 if (rxdp->Control_2 & BIT(0))
2217#endif
2218 {
2219 mac_control->rings[ring_no].rx_curr_put_info.
2220 offset = off;
2221 goto end; 2235 goto end;
2222 } 2236 }
2223#ifdef CONFIG_2BUFF_MODE 2237 /* calculate size of skb based on ring mode */
2224 /* 2238 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2225 * RxDs Spanning cache lines will be replenished only 2239 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2226 * if the succeeding RxD is also owned by Host. It 2240 if (nic->rxd_mode == RXD_MODE_1)
2227 * will always be the ((8*i)+3) and ((8*i)+6) 2241 size += NET_IP_ALIGN;
2228 * descriptors for the 48 byte descriptor. The offending 2242 else if (nic->rxd_mode == RXD_MODE_3B)
2229 * decsriptor is of-course the 3rd descriptor. 2243 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2230 */ 2244 else
2231 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no]. 2245 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2232 block_dma_addr + (off * sizeof(RxD_t));
2233 if (((u64) (rxdpphys)) % 128 > 80) {
2234 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2235 block_virt_addr + (off + 1);
2236 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2237 nextblk = (block_no + 1) %
2238 (mac_control->rings[ring_no].block_count);
2239 rxdpnext = mac_control->rings[ring_no].rx_blocks
2240 [nextblk].block_virt_addr;
2241 }
2242 if (rxdpnext->Control_2 & BIT(0))
2243 goto end;
2244 }
2245#endif
2246 2246
2247#ifndef CONFIG_2BUFF_MODE 2247 /* allocate skb */
2248 skb = dev_alloc_skb(size + NET_IP_ALIGN); 2248 skb = dev_alloc_skb(size);
2249#else 2249 if(!skb) {
2250 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2251#endif
2252 if (!skb) {
2253 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2250 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2254 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2251 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2255 if (first_rxdp) { 2252 if (first_rxdp) {
2256 wmb(); 2253 wmb();
2257 first_rxdp->Control_1 |= RXD_OWN_XENA; 2254 first_rxdp->Control_1 |= RXD_OWN_XENA;
2258 } 2255 }
2259 return -ENOMEM; 2256 return -ENOMEM ;
2257 }
2258 if (nic->rxd_mode == RXD_MODE_1) {
2259 /* 1 buffer mode - normal operation mode */
2260 memset(rxdp, 0, sizeof(RxD1_t));
2261 skb_reserve(skb, NET_IP_ALIGN);
2262 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2263 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2264 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
2265 rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
2266
2267 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2268 /*
2269 * 2 or 3 buffer mode -
2270 * Both 2 buffer mode and 3 buffer mode provides 128
2271 * byte aligned receive buffers.
2272 *
2273 * 3 buffer mode provides header separation where in
2274 * skb->data will have L3/L4 headers where as
2275 * skb_shinfo(skb)->frag_list will have the L4 data
2276 * payload
2277 */
2278
2279 memset(rxdp, 0, sizeof(RxD3_t));
2280 ba = &mac_control->rings[ring_no].ba[block_no][off];
2281 skb_reserve(skb, BUF0_LEN);
2282 tmp = (u64)(unsigned long) skb->data;
2283 tmp += ALIGN_SIZE;
2284 tmp &= ~ALIGN_SIZE;
2285 skb->data = (void *) (unsigned long)tmp;
2286 skb->tail = (void *) (unsigned long)tmp;
2287
2288 ((RxD3_t*)rxdp)->Buffer0_ptr =
2289 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2290 PCI_DMA_FROMDEVICE);
2291 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2292 if (nic->rxd_mode == RXD_MODE_3B) {
2293 /* Two buffer mode */
2294
2295 /*
2296 * Buffer2 will have L3/L4 header plus
2297 * L4 payload
2298 */
2299 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2300 (nic->pdev, skb->data, dev->mtu + 4,
2301 PCI_DMA_FROMDEVICE);
2302
2303 /* Buffer-1 will be dummy buffer not used */
2304 ((RxD3_t*)rxdp)->Buffer1_ptr =
2305 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2306 PCI_DMA_FROMDEVICE);
2307 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2308 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2309 (dev->mtu + 4);
2310 } else {
2311 /* 3 buffer mode */
2312 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2313 dev_kfree_skb_irq(skb);
2314 if (first_rxdp) {
2315 wmb();
2316 first_rxdp->Control_1 |=
2317 RXD_OWN_XENA;
2318 }
2319 return -ENOMEM ;
2320 }
2321 }
2322 rxdp->Control_2 |= BIT(0);
2260 } 2323 }
2261#ifndef CONFIG_2BUFF_MODE
2262 skb_reserve(skb, NET_IP_ALIGN);
2263 memset(rxdp, 0, sizeof(RxD_t));
2264 rxdp->Buffer0_ptr = pci_map_single
2265 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2266 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2267 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2268 rxdp->Host_Control = (unsigned long) (skb); 2324 rxdp->Host_Control = (unsigned long) (skb);
2269 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2325 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2270 rxdp->Control_1 |= RXD_OWN_XENA; 2326 rxdp->Control_1 |= RXD_OWN_XENA;
2271 off++; 2327 off++;
2272 off %= (MAX_RXDS_PER_BLOCK + 1); 2328 if (off == (rxd_count[nic->rxd_mode] + 1))
2273 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2329 off = 0;
2274#else
2275 ba = &mac_control->rings[ring_no].ba[block_no][off];
2276 skb_reserve(skb, BUF0_LEN);
2277 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2278 if (tmp)
2279 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2280
2281 memset(rxdp, 0, sizeof(RxD_t));
2282 rxdp->Buffer2_ptr = pci_map_single
2283 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2284 PCI_DMA_FROMDEVICE);
2285 rxdp->Buffer0_ptr =
2286 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2287 PCI_DMA_FROMDEVICE);
2288 rxdp->Buffer1_ptr =
2289 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2290 PCI_DMA_FROMDEVICE);
2291
2292 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2293 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2294 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2295 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2296 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2297 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2298 rxdp->Control_1 |= RXD_OWN_XENA;
2299 off++;
2300 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2330 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2301#endif
2302 rxdp->Control_2 |= SET_RXD_MARKER;
2303 2331
2332 rxdp->Control_2 |= SET_RXD_MARKER;
2304 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2333 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2305 if (first_rxdp) { 2334 if (first_rxdp) {
2306 wmb(); 2335 wmb();
@@ -2325,6 +2354,67 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2325 return SUCCESS; 2354 return SUCCESS;
2326} 2355}
2327 2356
2357static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2358{
2359 struct net_device *dev = sp->dev;
2360 int j;
2361 struct sk_buff *skb;
2362 RxD_t *rxdp;
2363 mac_info_t *mac_control;
2364 buffAdd_t *ba;
2365
2366 mac_control = &sp->mac_control;
2367 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2368 rxdp = mac_control->rings[ring_no].
2369 rx_blocks[blk].rxds[j].virt_addr;
2370 skb = (struct sk_buff *)
2371 ((unsigned long) rxdp->Host_Control);
2372 if (!skb) {
2373 continue;
2374 }
2375 if (sp->rxd_mode == RXD_MODE_1) {
2376 pci_unmap_single(sp->pdev, (dma_addr_t)
2377 ((RxD1_t*)rxdp)->Buffer0_ptr,
2378 dev->mtu +
2379 HEADER_ETHERNET_II_802_3_SIZE
2380 + HEADER_802_2_SIZE +
2381 HEADER_SNAP_SIZE,
2382 PCI_DMA_FROMDEVICE);
2383 memset(rxdp, 0, sizeof(RxD1_t));
2384 } else if(sp->rxd_mode == RXD_MODE_3B) {
2385 ba = &mac_control->rings[ring_no].
2386 ba[blk][j];
2387 pci_unmap_single(sp->pdev, (dma_addr_t)
2388 ((RxD3_t*)rxdp)->Buffer0_ptr,
2389 BUF0_LEN,
2390 PCI_DMA_FROMDEVICE);
2391 pci_unmap_single(sp->pdev, (dma_addr_t)
2392 ((RxD3_t*)rxdp)->Buffer1_ptr,
2393 BUF1_LEN,
2394 PCI_DMA_FROMDEVICE);
2395 pci_unmap_single(sp->pdev, (dma_addr_t)
2396 ((RxD3_t*)rxdp)->Buffer2_ptr,
2397 dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE);
2399 memset(rxdp, 0, sizeof(RxD3_t));
2400 } else {
2401 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2403 PCI_DMA_FROMDEVICE);
2404 pci_unmap_single(sp->pdev, (dma_addr_t)
2405 ((RxD3_t*)rxdp)->Buffer1_ptr,
2406 l3l4hdr_size + 4,
2407 PCI_DMA_FROMDEVICE);
2408 pci_unmap_single(sp->pdev, (dma_addr_t)
2409 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2410 PCI_DMA_FROMDEVICE);
2411 memset(rxdp, 0, sizeof(RxD3_t));
2412 }
2413 dev_kfree_skb(skb);
2414 atomic_dec(&sp->rx_bufs_left[ring_no]);
2415 }
2416}
2417
2328/** 2418/**
2329 * free_rx_buffers - Frees all Rx buffers 2419 * free_rx_buffers - Frees all Rx buffers
2330 * @sp: device private variable. 2420 * @sp: device private variable.
@@ -2337,77 +2427,17 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2337static void free_rx_buffers(struct s2io_nic *sp) 2427static void free_rx_buffers(struct s2io_nic *sp)
2338{ 2428{
2339 struct net_device *dev = sp->dev; 2429 struct net_device *dev = sp->dev;
2340 int i, j, blk = 0, off, buf_cnt = 0; 2430 int i, blk = 0, buf_cnt = 0;
2341 RxD_t *rxdp;
2342 struct sk_buff *skb;
2343 mac_info_t *mac_control; 2431 mac_info_t *mac_control;
2344 struct config_param *config; 2432 struct config_param *config;
2345#ifdef CONFIG_2BUFF_MODE
2346 buffAdd_t *ba;
2347#endif
2348 2433
2349 mac_control = &sp->mac_control; 2434 mac_control = &sp->mac_control;
2350 config = &sp->config; 2435 config = &sp->config;
2351 2436
2352 for (i = 0; i < config->rx_ring_num; i++) { 2437 for (i = 0; i < config->rx_ring_num; i++) {
2353 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2438 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2354 off = j % (MAX_RXDS_PER_BLOCK + 1); 2439 free_rxd_blk(sp,i,blk);
2355 rxdp = mac_control->rings[i].rx_blocks[blk].
2356 block_virt_addr + off;
2357
2358#ifndef CONFIG_2BUFF_MODE
2359 if (rxdp->Control_1 == END_OF_BLOCK) {
2360 rxdp =
2361 (RxD_t *) ((unsigned long) rxdp->
2362 Control_2);
2363 j++;
2364 blk++;
2365 }
2366#else
2367 if (rxdp->Host_Control == END_OF_BLOCK) {
2368 blk++;
2369 continue;
2370 }
2371#endif
2372 2440
2373 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2374 memset(rxdp, 0, sizeof(RxD_t));
2375 continue;
2376 }
2377
2378 skb =
2379 (struct sk_buff *) ((unsigned long) rxdp->
2380 Host_Control);
2381 if (skb) {
2382#ifndef CONFIG_2BUFF_MODE
2383 pci_unmap_single(sp->pdev, (dma_addr_t)
2384 rxdp->Buffer0_ptr,
2385 dev->mtu +
2386 HEADER_ETHERNET_II_802_3_SIZE
2387 + HEADER_802_2_SIZE +
2388 HEADER_SNAP_SIZE,
2389 PCI_DMA_FROMDEVICE);
2390#else
2391 ba = &mac_control->rings[i].ba[blk][off];
2392 pci_unmap_single(sp->pdev, (dma_addr_t)
2393 rxdp->Buffer0_ptr,
2394 BUF0_LEN,
2395 PCI_DMA_FROMDEVICE);
2396 pci_unmap_single(sp->pdev, (dma_addr_t)
2397 rxdp->Buffer1_ptr,
2398 BUF1_LEN,
2399 PCI_DMA_FROMDEVICE);
2400 pci_unmap_single(sp->pdev, (dma_addr_t)
2401 rxdp->Buffer2_ptr,
2402 dev->mtu + BUF0_LEN + 4,
2403 PCI_DMA_FROMDEVICE);
2404#endif
2405 dev_kfree_skb(skb);
2406 atomic_dec(&sp->rx_bufs_left[i]);
2407 buf_cnt++;
2408 }
2409 memset(rxdp, 0, sizeof(RxD_t));
2410 }
2411 mac_control->rings[i].rx_curr_put_info.block_index = 0; 2441 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2412 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2442 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2413 mac_control->rings[i].rx_curr_put_info.offset = 0; 2443 mac_control->rings[i].rx_curr_put_info.offset = 0;
@@ -2513,7 +2543,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2513{ 2543{
2514 nic_t *nic = ring_data->nic; 2544 nic_t *nic = ring_data->nic;
2515 struct net_device *dev = (struct net_device *) nic->dev; 2545 struct net_device *dev = (struct net_device *) nic->dev;
2516 int get_block, get_offset, put_block, put_offset, ring_bufs; 2546 int get_block, put_block, put_offset;
2517 rx_curr_get_info_t get_info, put_info; 2547 rx_curr_get_info_t get_info, put_info;
2518 RxD_t *rxdp; 2548 RxD_t *rxdp;
2519 struct sk_buff *skb; 2549 struct sk_buff *skb;
@@ -2532,21 +2562,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2532 get_block = get_info.block_index; 2562 get_block = get_info.block_index;
2533 put_info = ring_data->rx_curr_put_info; 2563 put_info = ring_data->rx_curr_put_info;
2534 put_block = put_info.block_index; 2564 put_block = put_info.block_index;
2535 ring_bufs = get_info.ring_len+1; 2565 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2536 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2537 get_info.offset;
2538 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2539 get_info.offset;
2540#ifndef CONFIG_S2IO_NAPI 2566#ifndef CONFIG_S2IO_NAPI
2541 spin_lock(&nic->put_lock); 2567 spin_lock(&nic->put_lock);
2542 put_offset = ring_data->put_pos; 2568 put_offset = ring_data->put_pos;
2543 spin_unlock(&nic->put_lock); 2569 spin_unlock(&nic->put_lock);
2544#else 2570#else
2545 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) + 2571 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2546 put_info.offset; 2572 put_info.offset;
2547#endif 2573#endif
2548 while (RXD_IS_UP2DT(rxdp) && 2574 while (RXD_IS_UP2DT(rxdp)) {
2549 (((get_offset + 1) % ring_bufs) != put_offset)) { 2575 /* If your are next to put index then it's FIFO full condition */
2576 if ((get_block == put_block) &&
2577 (get_info.offset + 1) == put_info.offset) {
2578 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2579 break;
2580 }
2550 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2581 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2551 if (skb == NULL) { 2582 if (skb == NULL) {
2552 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2583 DBG_PRINT(ERR_DBG, "%s: The skb is ",
@@ -2555,46 +2586,52 @@ static void rx_intr_handler(ring_info_t *ring_data)
2555 spin_unlock(&nic->rx_lock); 2586 spin_unlock(&nic->rx_lock);
2556 return; 2587 return;
2557 } 2588 }
2558#ifndef CONFIG_2BUFF_MODE 2589 if (nic->rxd_mode == RXD_MODE_1) {
2559 pci_unmap_single(nic->pdev, (dma_addr_t) 2590 pci_unmap_single(nic->pdev, (dma_addr_t)
2560 rxdp->Buffer0_ptr, 2591 ((RxD1_t*)rxdp)->Buffer0_ptr,
2561 dev->mtu + 2592 dev->mtu +
2562 HEADER_ETHERNET_II_802_3_SIZE + 2593 HEADER_ETHERNET_II_802_3_SIZE +
2563 HEADER_802_2_SIZE + 2594 HEADER_802_2_SIZE +
2564 HEADER_SNAP_SIZE, 2595 HEADER_SNAP_SIZE,
2565 PCI_DMA_FROMDEVICE); 2596 PCI_DMA_FROMDEVICE);
2566#else 2597 } else if (nic->rxd_mode == RXD_MODE_3B) {
2567 pci_unmap_single(nic->pdev, (dma_addr_t) 2598 pci_unmap_single(nic->pdev, (dma_addr_t)
2568 rxdp->Buffer0_ptr, 2599 ((RxD3_t*)rxdp)->Buffer0_ptr,
2569 BUF0_LEN, PCI_DMA_FROMDEVICE); 2600 BUF0_LEN, PCI_DMA_FROMDEVICE);
2570 pci_unmap_single(nic->pdev, (dma_addr_t) 2601 pci_unmap_single(nic->pdev, (dma_addr_t)
2571 rxdp->Buffer1_ptr, 2602 ((RxD3_t*)rxdp)->Buffer1_ptr,
2572 BUF1_LEN, PCI_DMA_FROMDEVICE); 2603 BUF1_LEN, PCI_DMA_FROMDEVICE);
2573 pci_unmap_single(nic->pdev, (dma_addr_t) 2604 pci_unmap_single(nic->pdev, (dma_addr_t)
2574 rxdp->Buffer2_ptr, 2605 ((RxD3_t*)rxdp)->Buffer2_ptr,
2575 dev->mtu + BUF0_LEN + 4, 2606 dev->mtu + 4,
2576 PCI_DMA_FROMDEVICE); 2607 PCI_DMA_FROMDEVICE);
2577#endif 2608 } else {
2609 pci_unmap_single(nic->pdev, (dma_addr_t)
2610 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2611 PCI_DMA_FROMDEVICE);
2612 pci_unmap_single(nic->pdev, (dma_addr_t)
2613 ((RxD3_t*)rxdp)->Buffer1_ptr,
2614 l3l4hdr_size + 4,
2615 PCI_DMA_FROMDEVICE);
2616 pci_unmap_single(nic->pdev, (dma_addr_t)
2617 ((RxD3_t*)rxdp)->Buffer2_ptr,
2618 dev->mtu, PCI_DMA_FROMDEVICE);
2619 }
2578 rx_osm_handler(ring_data, rxdp); 2620 rx_osm_handler(ring_data, rxdp);
2579 get_info.offset++; 2621 get_info.offset++;
2580 ring_data->rx_curr_get_info.offset = 2622 ring_data->rx_curr_get_info.offset = get_info.offset;
2581 get_info.offset; 2623 rxdp = ring_data->rx_blocks[get_block].
2582 rxdp = ring_data->rx_blocks[get_block].block_virt_addr + 2624 rxds[get_info.offset].virt_addr;
2583 get_info.offset; 2625 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2584 if (get_info.offset &&
2585 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2586 get_info.offset = 0; 2626 get_info.offset = 0;
2587 ring_data->rx_curr_get_info.offset 2627 ring_data->rx_curr_get_info.offset = get_info.offset;
2588 = get_info.offset;
2589 get_block++; 2628 get_block++;
2590 get_block %= ring_data->block_count; 2629 if (get_block == ring_data->block_count)
2591 ring_data->rx_curr_get_info.block_index 2630 get_block = 0;
2592 = get_block; 2631 ring_data->rx_curr_get_info.block_index = get_block;
2593 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2632 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2594 } 2633 }
2595 2634
2596 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2597 get_info.offset;
2598#ifdef CONFIG_S2IO_NAPI 2635#ifdef CONFIG_S2IO_NAPI
2599 nic->pkts_to_process -= 1; 2636 nic->pkts_to_process -= 1;
2600 if (!nic->pkts_to_process) 2637 if (!nic->pkts_to_process)
@@ -3044,7 +3081,7 @@ int s2io_set_swapper(nic_t * sp)
3044 3081
3045int wait_for_msix_trans(nic_t *nic, int i) 3082int wait_for_msix_trans(nic_t *nic, int i)
3046{ 3083{
3047 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3084 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3048 u64 val64; 3085 u64 val64;
3049 int ret = 0, cnt = 0; 3086 int ret = 0, cnt = 0;
3050 3087
@@ -3065,7 +3102,7 @@ int wait_for_msix_trans(nic_t *nic, int i)
3065 3102
3066void restore_xmsi_data(nic_t *nic) 3103void restore_xmsi_data(nic_t *nic)
3067{ 3104{
3068 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3105 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3069 u64 val64; 3106 u64 val64;
3070 int i; 3107 int i;
3071 3108
@@ -3083,7 +3120,7 @@ void restore_xmsi_data(nic_t *nic)
3083 3120
3084void store_xmsi_data(nic_t *nic) 3121void store_xmsi_data(nic_t *nic)
3085{ 3122{
3086 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3123 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3087 u64 val64, addr, data; 3124 u64 val64, addr, data;
3088 int i; 3125 int i;
3089 3126
@@ -3106,7 +3143,7 @@ void store_xmsi_data(nic_t *nic)
3106 3143
3107int s2io_enable_msi(nic_t *nic) 3144int s2io_enable_msi(nic_t *nic)
3108{ 3145{
3109 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3146 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3110 u16 msi_ctrl, msg_val; 3147 u16 msi_ctrl, msg_val;
3111 struct config_param *config = &nic->config; 3148 struct config_param *config = &nic->config;
3112 struct net_device *dev = nic->dev; 3149 struct net_device *dev = nic->dev;
@@ -3156,7 +3193,7 @@ int s2io_enable_msi(nic_t *nic)
3156 3193
3157int s2io_enable_msi_x(nic_t *nic) 3194int s2io_enable_msi_x(nic_t *nic)
3158{ 3195{
3159 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3196 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3160 u64 tx_mat, rx_mat; 3197 u64 tx_mat, rx_mat;
3161 u16 msi_control; /* Temp variable */ 3198 u16 msi_control; /* Temp variable */
3162 int ret, i, j, msix_indx = 1; 3199 int ret, i, j, msix_indx = 1;
@@ -5537,16 +5574,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5537 ((unsigned long) rxdp->Host_Control); 5574 ((unsigned long) rxdp->Host_Control);
5538 int ring_no = ring_data->ring_no; 5575 int ring_no = ring_data->ring_no;
5539 u16 l3_csum, l4_csum; 5576 u16 l3_csum, l4_csum;
5540#ifdef CONFIG_2BUFF_MODE 5577
5541 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5542 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5543 int get_block = ring_data->rx_curr_get_info.block_index;
5544 int get_off = ring_data->rx_curr_get_info.offset;
5545 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5546 unsigned char *buff;
5547#else
5548 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5549#endif
5550 skb->dev = dev; 5578 skb->dev = dev;
5551 if (rxdp->Control_1 & RXD_T_CODE) { 5579 if (rxdp->Control_1 & RXD_T_CODE) {
5552 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 5580 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
@@ -5563,19 +5591,36 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5563 rxdp->Host_Control = 0; 5591 rxdp->Host_Control = 0;
5564 sp->rx_pkt_count++; 5592 sp->rx_pkt_count++;
5565 sp->stats.rx_packets++; 5593 sp->stats.rx_packets++;
5566#ifndef CONFIG_2BUFF_MODE 5594 if (sp->rxd_mode == RXD_MODE_1) {
5567 sp->stats.rx_bytes += len; 5595 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
5568#else
5569 sp->stats.rx_bytes += buf0_len + buf2_len;
5570#endif
5571 5596
5572#ifndef CONFIG_2BUFF_MODE 5597 sp->stats.rx_bytes += len;
5573 skb_put(skb, len); 5598 skb_put(skb, len);
5574#else 5599
5575 buff = skb_push(skb, buf0_len); 5600 } else if (sp->rxd_mode >= RXD_MODE_3A) {
5576 memcpy(buff, ba->ba_0, buf0_len); 5601 int get_block = ring_data->rx_curr_get_info.block_index;
5577 skb_put(skb, buf2_len); 5602 int get_off = ring_data->rx_curr_get_info.offset;
5578#endif 5603 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
5604 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
5605 unsigned char *buff = skb_push(skb, buf0_len);
5606
5607 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5608 sp->stats.rx_bytes += buf0_len + buf2_len;
5609 memcpy(buff, ba->ba_0, buf0_len);
5610
5611 if (sp->rxd_mode == RXD_MODE_3A) {
5612 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
5613
5614 skb_put(skb, buf1_len);
5615 skb->len += buf2_len;
5616 skb->data_len += buf2_len;
5617 skb->truesize += buf2_len;
5618 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
5619 sp->stats.rx_bytes += buf1_len;
5620
5621 } else
5622 skb_put(skb, buf2_len);
5623 }
5579 5624
5580 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 5625 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5581 (sp->rx_csum)) { 5626 (sp->rx_csum)) {
@@ -5711,6 +5756,7 @@ MODULE_VERSION(DRV_VERSION);
5711 5756
5712module_param(tx_fifo_num, int, 0); 5757module_param(tx_fifo_num, int, 0);
5713module_param(rx_ring_num, int, 0); 5758module_param(rx_ring_num, int, 0);
5759module_param(rx_ring_mode, int, 0);
5714module_param_array(tx_fifo_len, uint, NULL, 0); 5760module_param_array(tx_fifo_len, uint, NULL, 0);
5715module_param_array(rx_ring_sz, uint, NULL, 0); 5761module_param_array(rx_ring_sz, uint, NULL, 0);
5716module_param_array(rts_frm_len, uint, NULL, 0); 5762module_param_array(rts_frm_len, uint, NULL, 0);
@@ -5722,6 +5768,7 @@ module_param(shared_splits, int, 0);
5722module_param(tmac_util_period, int, 0); 5768module_param(tmac_util_period, int, 0);
5723module_param(rmac_util_period, int, 0); 5769module_param(rmac_util_period, int, 0);
5724module_param(bimodal, bool, 0); 5770module_param(bimodal, bool, 0);
5771module_param(l3l4hdr_size, int , 0);
5725#ifndef CONFIG_S2IO_NAPI 5772#ifndef CONFIG_S2IO_NAPI
5726module_param(indicate_max_pkts, int, 0); 5773module_param(indicate_max_pkts, int, 0);
5727#endif 5774#endif
@@ -5843,6 +5890,13 @@ Defaulting to INTA\n");
5843 sp->pdev = pdev; 5890 sp->pdev = pdev;
5844 sp->high_dma_flag = dma_flag; 5891 sp->high_dma_flag = dma_flag;
5845 sp->device_enabled_once = FALSE; 5892 sp->device_enabled_once = FALSE;
5893 if (rx_ring_mode == 1)
5894 sp->rxd_mode = RXD_MODE_1;
5895 if (rx_ring_mode == 2)
5896 sp->rxd_mode = RXD_MODE_3B;
5897 if (rx_ring_mode == 3)
5898 sp->rxd_mode = RXD_MODE_3A;
5899
5846 sp->intr_type = dev_intr_type; 5900 sp->intr_type = dev_intr_type;
5847 5901
5848 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || 5902 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
@@ -5895,7 +5949,7 @@ Defaulting to INTA\n");
5895 config->rx_ring_num = rx_ring_num; 5949 config->rx_ring_num = rx_ring_num;
5896 for (i = 0; i < MAX_RX_RINGS; i++) { 5950 for (i = 0; i < MAX_RX_RINGS; i++) {
5897 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5951 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5898 (MAX_RXDS_PER_BLOCK + 1); 5952 (rxd_count[sp->rxd_mode] + 1);
5899 config->rx_cfg[i].ring_priority = i; 5953 config->rx_cfg[i].ring_priority = i;
5900 } 5954 }
5901 5955
@@ -6090,9 +6144,6 @@ Defaulting to INTA\n");
6090 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6144 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6091 get_xena_rev_id(sp->pdev), 6145 get_xena_rev_id(sp->pdev),
6092 s2io_driver_version); 6146 s2io_driver_version);
6093#ifdef CONFIG_2BUFF_MODE
6094 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6095#endif
6096 switch(sp->intr_type) { 6147 switch(sp->intr_type) {
6097 case INTA: 6148 case INTA:
6098 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6149 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6125,9 +6176,6 @@ Defaulting to INTA\n");
6125 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6176 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6126 get_xena_rev_id(sp->pdev), 6177 get_xena_rev_id(sp->pdev),
6127 s2io_driver_version); 6178 s2io_driver_version);
6128#ifdef CONFIG_2BUFF_MODE
6129 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6130#endif
6131 switch(sp->intr_type) { 6179 switch(sp->intr_type) {
6132 case INTA: 6180 case INTA:
6133 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6181 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6148,6 +6196,12 @@ Defaulting to INTA\n");
6148 sp->def_mac_addr[0].mac_addr[4], 6196 sp->def_mac_addr[0].mac_addr[4],
6149 sp->def_mac_addr[0].mac_addr[5]); 6197 sp->def_mac_addr[0].mac_addr[5]);
6150 } 6198 }
6199 if (sp->rxd_mode == RXD_MODE_3B)
6200 DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
6201 "enabled\n",dev->name);
6202 if (sp->rxd_mode == RXD_MODE_3A)
6203 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6204 "enabled\n",dev->name);
6151 6205
6152 /* Initialize device name */ 6206 /* Initialize device name */
6153 strcpy(sp->name, dev->name); 6207 strcpy(sp->name, dev->name);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1cc24b56760e..419aad7f10e7 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -418,7 +418,7 @@ typedef struct list_info_hold {
418 void *list_virt_addr; 418 void *list_virt_addr;
419} list_info_hold_t; 419} list_info_hold_t;
420 420
421/* Rx descriptor structure */ 421/* Rx descriptor structure for 1 buffer mode */
422typedef struct _RxD_t { 422typedef struct _RxD_t {
423 u64 Host_Control; /* reserved for host */ 423 u64 Host_Control; /* reserved for host */
424 u64 Control_1; 424 u64 Control_1;
@@ -439,49 +439,54 @@ typedef struct _RxD_t {
439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2) 439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62) 440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
441 441
442#ifndef CONFIG_2BUFF_MODE
443#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
444#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
445#else
446#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
447#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
448#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
449#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
450#define SET_BUFFER1_SIZE(val) vBIT(val,16,16)
451#define SET_BUFFER2_SIZE(val) vBIT(val,32,16)
452#endif
453
454#define MASK_VLAN_TAG vBIT(0xFFFF,48,16) 442#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
455#define SET_VLAN_TAG(val) vBIT(val,48,16) 443#define SET_VLAN_TAG(val) vBIT(val,48,16)
456#define SET_NUM_TAG(val) vBIT(val,16,32) 444#define SET_NUM_TAG(val) vBIT(val,16,32)
457 445
458#ifndef CONFIG_2BUFF_MODE 446
459#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14))) 447} RxD_t;
460#else 448/* Rx descriptor structure for 1 buffer mode */
461#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 449typedef struct _RxD1_t {
462 >> 48) 450 struct _RxD_t h;
463#define RXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) \ 451
464 >> 32) 452#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
465#define RXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) \ 453#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
466 >> 16) 454#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
455 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
456 u64 Buffer0_ptr;
457} RxD1_t;
458/* Rx descriptor structure for 3 or 2 buffer mode */
459
460typedef struct _RxD3_t {
461 struct _RxD_t h;
462
463#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
464#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
465#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
466#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
467#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
468#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
469#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
470 (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
471#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
472 (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
473#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
474 (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
467#define BUF0_LEN 40 475#define BUF0_LEN 40
468#define BUF1_LEN 1 476#define BUF1_LEN 1
469#endif
470 477
471 u64 Buffer0_ptr; 478 u64 Buffer0_ptr;
472#ifdef CONFIG_2BUFF_MODE
473 u64 Buffer1_ptr; 479 u64 Buffer1_ptr;
474 u64 Buffer2_ptr; 480 u64 Buffer2_ptr;
475#endif 481} RxD3_t;
476} RxD_t; 482
477 483
478/* Structure that represents the Rx descriptor block which contains 484/* Structure that represents the Rx descriptor block which contains
479 * 128 Rx descriptors. 485 * 128 Rx descriptors.
480 */ 486 */
481#ifndef CONFIG_2BUFF_MODE
482typedef struct _RxD_block { 487typedef struct _RxD_block {
483#define MAX_RXDS_PER_BLOCK 127 488#define MAX_RXDS_PER_BLOCK_1 127
484 RxD_t rxd[MAX_RXDS_PER_BLOCK]; 489 RxD1_t rxd[MAX_RXDS_PER_BLOCK_1];
485 490
486 u64 reserved_0; 491 u64 reserved_0;
487#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 492#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
@@ -492,18 +497,13 @@ typedef struct _RxD_block {
492 * the upper 32 bits should 497 * the upper 32 bits should
493 * be 0 */ 498 * be 0 */
494} RxD_block_t; 499} RxD_block_t;
495#else
496typedef struct _RxD_block {
497#define MAX_RXDS_PER_BLOCK 85
498 RxD_t rxd[MAX_RXDS_PER_BLOCK];
499 500
500#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
501 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
502 * in this blk */
503 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
504} RxD_block_t;
505#define SIZE_OF_BLOCK 4096 501#define SIZE_OF_BLOCK 4096
506 502
503#define RXD_MODE_1 0
504#define RXD_MODE_3A 1
505#define RXD_MODE_3B 2
506
507/* Structure to hold virtual addresses of Buf0 and Buf1 in 507/* Structure to hold virtual addresses of Buf0 and Buf1 in
508 * 2buf mode. */ 508 * 2buf mode. */
509typedef struct bufAdd { 509typedef struct bufAdd {
@@ -512,7 +512,6 @@ typedef struct bufAdd {
512 void *ba_0; 512 void *ba_0;
513 void *ba_1; 513 void *ba_1;
514} buffAdd_t; 514} buffAdd_t;
515#endif
516 515
517/* Structure which stores all the MAC control parameters */ 516/* Structure which stores all the MAC control parameters */
518 517
@@ -539,10 +538,17 @@ typedef struct {
539 538
540typedef tx_curr_get_info_t tx_curr_put_info_t; 539typedef tx_curr_get_info_t tx_curr_put_info_t;
541 540
541
542typedef struct rxd_info {
543 void *virt_addr;
544 dma_addr_t dma_addr;
545}rxd_info_t;
546
542/* Structure that holds the Phy and virt addresses of the Blocks */ 547/* Structure that holds the Phy and virt addresses of the Blocks */
543typedef struct rx_block_info { 548typedef struct rx_block_info {
544 RxD_t *block_virt_addr; 549 void *block_virt_addr;
545 dma_addr_t block_dma_addr; 550 dma_addr_t block_dma_addr;
551 rxd_info_t *rxds;
546} rx_block_info_t; 552} rx_block_info_t;
547 553
548/* pre declaration of the nic structure */ 554/* pre declaration of the nic structure */
@@ -578,10 +584,8 @@ typedef struct ring_info {
578 int put_pos; 584 int put_pos;
579#endif 585#endif
580 586
581#ifdef CONFIG_2BUFF_MODE
582 /* Buffer Address store. */ 587 /* Buffer Address store. */
583 buffAdd_t **ba; 588 buffAdd_t **ba;
584#endif
585 nic_t *nic; 589 nic_t *nic;
586} ring_info_t; 590} ring_info_t;
587 591
@@ -647,8 +651,6 @@ typedef struct {
647 651
648/* Default Tunable parameters of the NIC. */ 652/* Default Tunable parameters of the NIC. */
649#define DEFAULT_FIFO_LEN 4096 653#define DEFAULT_FIFO_LEN 4096
650#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
651#define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1)
652#define SMALL_BLK_CNT 30 654#define SMALL_BLK_CNT 30
653#define LARGE_BLK_CNT 100 655#define LARGE_BLK_CNT 100
654 656
@@ -678,6 +680,7 @@ struct msix_info_st {
678 680
679/* Structure representing one instance of the NIC */ 681/* Structure representing one instance of the NIC */
680struct s2io_nic { 682struct s2io_nic {
683 int rxd_mode;
681#ifdef CONFIG_S2IO_NAPI 684#ifdef CONFIG_S2IO_NAPI
682 /* 685 /*
683 * Count of packets to be processed in a given iteration, it will be indicated 686 * Count of packets to be processed in a given iteration, it will be indicated
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 750c0167539c..849ac88bcccc 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2040,7 +2040,7 @@ static int mpi_send_packet (struct net_device *dev)
2040 return 1; 2040 return 1;
2041} 2041}
2042 2042
2043static void get_tx_error(struct airo_info *ai, u32 fid) 2043static void get_tx_error(struct airo_info *ai, s32 fid)
2044{ 2044{
2045 u16 status; 2045 u16 status;
2046 2046
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 72cb67b66e0c..92a9696fdebe 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -72,6 +72,9 @@ struct mii_bus {
72 /* list of all PHYs on bus */ 72 /* list of all PHYs on bus */
73 struct phy_device *phy_map[PHY_MAX_ADDR]; 73 struct phy_device *phy_map[PHY_MAX_ADDR];
74 74
75 /* Phy addresses to be ignored when probing */
76 u32 phy_mask;
77
75 /* Pointer to an array of interrupts, each PHY's 78 /* Pointer to an array of interrupts, each PHY's
76 * interrupt at the index matching its address */ 79 * interrupt at the index matching its address */
77 int *irq; 80 int *irq;