aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/cxgb3/l2t.c2
-rw-r--r--drivers/net/cxgb3/sge.c35
-rw-r--r--drivers/net/dm9000.c654
-rw-r--r--drivers/net/e1000/e1000_main.c18
-rw-r--r--drivers/net/forcedeth.c132
-rw-r--r--drivers/net/mlx4/mr.c21
-rw-r--r--drivers/net/netconsole.c4
-rw-r--r--drivers/net/ni52.c1142
-rw-r--r--drivers/net/ni52.h158
-rw-r--r--drivers/net/pcnet32.c48
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/ps3_gelic_net.c1215
-rw-r--r--drivers/net/ps3_gelic_net.h415
-rw-r--r--drivers/net/ps3_gelic_wireless.c2753
-rw-r--r--drivers/net/ps3_gelic_wireless.h329
-rw-r--r--drivers/net/r6040.c233
-rw-r--r--drivers/net/sis190.c3
20 files changed, 5407 insertions, 1782 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index eef6fecfff2a..be6e918456d9 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -168,7 +168,7 @@ static int debug = -1;
168 * Warning: 64K ring has hardware issues and may lock up. 168 * Warning: 64K ring has hardware issues and may lock up.
169 */ 169 */
170#if defined(CONFIG_SH_DREAMCAST) 170#if defined(CONFIG_SH_DREAMCAST)
171#define RX_BUF_IDX 1 /* 16K ring */ 171#define RX_BUF_IDX 0 /* 8K ring */
172#else 172#else
173#define RX_BUF_IDX 2 /* 32K ring */ 173#define RX_BUF_IDX 2 /* 32K ring */
174#endif 174#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 50c2b60e1fee..f337800076c0 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -931,6 +931,14 @@ config ENC28J60_WRITEVERIFY
931 Enable the verify after the buffer write useful for debugging purpose. 931 Enable the verify after the buffer write useful for debugging purpose.
932 If unsure, say N. 932 If unsure, say N.
933 933
934config DM9000_DEBUGLEVEL
935 int "DM9000 maximum debug level"
936 depends on DM9000
937 default 4
938 help
939 The maximum level of debugging code compiled into the DM9000
940 driver.
941
934config SMC911X 942config SMC911X
935 tristate "SMSC LAN911[5678] support" 943 tristate "SMSC LAN911[5678] support"
936 select CRC32 944 select CRC32
@@ -2352,6 +2360,16 @@ config GELIC_NET
2352 To compile this driver as a module, choose M here: the 2360 To compile this driver as a module, choose M here: the
2353 module will be called ps3_gelic. 2361 module will be called ps3_gelic.
2354 2362
2363config GELIC_WIRELESS
2364 bool "PS3 Wireless support"
2365 depends on GELIC_NET
2366 help
2367 This option adds the support for the wireless feature of PS3.
2368 If you have the wireless-less model of PS3 or have no plan to
2369 use wireless feature, disabling this option saves memory. As
2370 the driver automatically distinguishes the models, you can
2371 safely enable this option even if you have a wireless-less model.
2372
2355config GIANFAR 2373config GIANFAR
2356 tristate "Gianfar Ethernet" 2374 tristate "Gianfar Ethernet"
2357 depends on FSL_SOC 2375 depends on FSL_SOC
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 9fc7794e88ea..3b1ea321dc05 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -70,7 +70,8 @@ obj-$(CONFIG_BNX2X) += bnx2x.o
70spidernet-y += spider_net.o spider_net_ethtool.o 70spidernet-y += spider_net.o spider_net_ethtool.o
71obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o 71obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
72obj-$(CONFIG_GELIC_NET) += ps3_gelic.o 72obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
73ps3_gelic-objs += ps3_gelic_net.o 73gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
74ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
74obj-$(CONFIG_TC35815) += tc35815.o 75obj-$(CONFIG_TC35815) += tc35815.o
75obj-$(CONFIG_SKGE) += skge.o 76obj-$(CONFIG_SKGE) += skge.o
76obj-$(CONFIG_SKY2) += sky2.o 77obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 17ed4c3527b7..865faee53e17 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -404,7 +404,7 @@ found:
404 if (neigh->nud_state & NUD_FAILED) { 404 if (neigh->nud_state & NUD_FAILED) {
405 arpq = e->arpq_head; 405 arpq = e->arpq_head;
406 e->arpq_head = e->arpq_tail = NULL; 406 e->arpq_head = e->arpq_tail = NULL;
407 } else if (neigh_is_connected(neigh)) 407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 408 setup_l2e_send_pending(dev, NULL, e);
409 } else { 409 } else {
410 e->state = neigh_is_connected(neigh) ? 410 e->state = neigh_is_connected(neigh) ?
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 9ca8c66abd16..979f3fc5e765 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1059,6 +1059,14 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1059 htonl(V_WR_TID(q->token))); 1059 htonl(V_WR_TID(q->token)));
1060} 1060}
1061 1061
1062static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1063 struct sge_txq *q)
1064{
1065 netif_stop_queue(dev);
1066 set_bit(TXQ_ETH, &qs->txq_stopped);
1067 q->stops++;
1068}
1069
1062/** 1070/**
1063 * eth_xmit - add a packet to the Ethernet Tx queue 1071 * eth_xmit - add a packet to the Ethernet Tx queue
1064 * @skb: the packet 1072 * @skb: the packet
@@ -1090,31 +1098,18 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1090 ndesc = calc_tx_descs(skb); 1098 ndesc = calc_tx_descs(skb);
1091 1099
1092 if (unlikely(credits < ndesc)) { 1100 if (unlikely(credits < ndesc)) {
1093 if (!netif_queue_stopped(dev)) { 1101 t3_stop_queue(dev, qs, q);
1094 netif_stop_queue(dev); 1102 dev_err(&adap->pdev->dev,
1095 set_bit(TXQ_ETH, &qs->txq_stopped); 1103 "%s: Tx ring %u full while queue awake!\n",
1096 q->stops++; 1104 dev->name, q->cntxt_id & 7);
1097 dev_err(&adap->pdev->dev,
1098 "%s: Tx ring %u full while queue awake!\n",
1099 dev->name, q->cntxt_id & 7);
1100 }
1101 spin_unlock(&q->lock); 1105 spin_unlock(&q->lock);
1102 return NETDEV_TX_BUSY; 1106 return NETDEV_TX_BUSY;
1103 } 1107 }
1104 1108
1105 q->in_use += ndesc; 1109 q->in_use += ndesc;
1106 if (unlikely(credits - ndesc < q->stop_thres)) { 1110 if (unlikely(credits - ndesc < q->stop_thres))
1107 q->stops++; 1111 if (USE_GTS || !should_restart_tx(q))
1108 netif_stop_queue(dev); 1112 t3_stop_queue(dev, qs, q);
1109 set_bit(TXQ_ETH, &qs->txq_stopped);
1110#if !USE_GTS
1111 if (should_restart_tx(q) &&
1112 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1113 q->restarts++;
1114 netif_wake_queue(dev);
1115 }
1116#endif
1117 }
1118 1113
1119 gen = q->gen; 1114 gen = q->gen;
1120 q->unacked += ndesc; 1115 q->unacked += ndesc;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 6a20a5491a96..1fe305ca2cf0 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * dm9000.c: Version 1.2 03/18/2003 2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 *
4 * A Davicom DM9000 ISA NIC fast Ethernet driver for Linux.
5 * Copyright (C) 1997 Sten Wang 3 * Copyright (C) 1997 Sten Wang
6 * 4 *
7 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -14,44 +12,11 @@
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 13 * GNU General Public License for more details.
16 * 14 *
17 * (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
18 *
19 * V0.11 06/20/2001 REG_0A bit3=1, default enable BP with DA match
20 * 06/22/2001 Support DM9801 progrmming
21 * E3: R25 = ((R24 + NF) & 0x00ff) | 0xf000
22 * E4: R25 = ((R24 + NF) & 0x00ff) | 0xc200
23 * R17 = (R17 & 0xfff0) | NF + 3
24 * E5: R25 = ((R24 + NF - 3) & 0x00ff) | 0xc200
25 * R17 = (R17 & 0xfff0) | NF
26 *
27 * v1.00 modify by simon 2001.9.5
28 * change for kernel 2.4.x
29 *
30 * v1.1 11/09/2001 fix force mode bug
31 *
32 * v1.2 03/18/2003 Weilun Huang <weilun_huang@davicom.com.tw>:
33 * Fixed phy reset.
34 * Added tx/rx 32 bit mode.
35 * Cleaned up for kernel merge.
36 *
37 * 03/03/2004 Sascha Hauer <s.hauer@pengutronix.de>
38 * Port to 2.6 kernel
39 *
40 * 24-Sep-2004 Ben Dooks <ben@simtec.co.uk>
41 * Cleanup of code to remove ifdefs
42 * Allowed platform device data to influence access width
43 * Reformatting areas of code
44 *
45 * 17-Mar-2005 Sascha Hauer <s.hauer@pengutronix.de>
46 * * removed 2.4 style module parameters
47 * * removed removed unused stat counter and fixed
48 * net_device_stats
49 * * introduced tx_timeout function
50 * * reworked locking
51 * 16 *
52 * 01-Jul-2005 Ben Dooks <ben@simtec.co.uk> 17 * Additional updates, Copyright:
53 * * fixed spinlock call without pointer 18 * Ben Dooks <ben@simtec.co.uk>
54 * * ensure spinlock is initialised 19 * Sascha Hauer <s.hauer@pengutronix.de>
55 */ 20 */
56 21
57#include <linux/module.h> 22#include <linux/module.h>
@@ -63,6 +28,7 @@
63#include <linux/spinlock.h> 28#include <linux/spinlock.h>
64#include <linux/crc32.h> 29#include <linux/crc32.h>
65#include <linux/mii.h> 30#include <linux/mii.h>
31#include <linux/ethtool.h>
66#include <linux/dm9000.h> 32#include <linux/dm9000.h>
67#include <linux/delay.h> 33#include <linux/delay.h>
68#include <linux/platform_device.h> 34#include <linux/platform_device.h>
@@ -80,30 +46,7 @@
80 46
81#define CARDNAME "dm9000" 47#define CARDNAME "dm9000"
82#define PFX CARDNAME ": " 48#define PFX CARDNAME ": "
83 49#define DRV_VERSION "1.30"
84#define DM9000_TIMER_WUT jiffies+(HZ*2) /* timer wakeup time : 2 second */
85
86#define DM9000_DEBUG 0
87
88#if DM9000_DEBUG > 2
89#define PRINTK3(args...) printk(CARDNAME ": " args)
90#else
91#define PRINTK3(args...) do { } while(0)
92#endif
93
94#if DM9000_DEBUG > 1
95#define PRINTK2(args...) printk(CARDNAME ": " args)
96#else
97#define PRINTK2(args...) do { } while(0)
98#endif
99
100#if DM9000_DEBUG > 0
101#define PRINTK1(args...) printk(CARDNAME ": " args)
102#define PRINTK(args...) printk(CARDNAME ": " args)
103#else
104#define PRINTK1(args...) do { } while(0)
105#define PRINTK(args...) printk(KERN_DEBUG args)
106#endif
107 50
108#ifdef CONFIG_BLACKFIN 51#ifdef CONFIG_BLACKFIN
109#define readsb insb 52#define readsb insb
@@ -112,9 +55,9 @@
112#define writesb outsb 55#define writesb outsb
113#define writesw outsw 56#define writesw outsw
114#define writesl outsl 57#define writesl outsl
115#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQF_TRIGGER_HIGH) 58#define DEFAULT_TRIGGER IRQF_TRIGGER_HIGH
116#else 59#else
117#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQT_RISING) 60#define DEFAULT_TRIGGER (0)
118#endif 61#endif
119 62
120/* 63/*
@@ -124,6 +67,24 @@ static int watchdog = 5000;
124module_param(watchdog, int, 0400); 67module_param(watchdog, int, 0400);
125MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 68MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
126 69
70/* DM9000 register address locking.
71 *
72 * The DM9000 uses an address register to control where data written
73 * to the data register goes. This means that the address register
74 * must be preserved over interrupts or similar calls.
75 *
76 * During interrupt and other critical calls, a spinlock is used to
77 * protect the system, but the calls themselves save the address
78 * in the address register in case they are interrupting another
79 * access to the device.
80 *
81 * For general accesses a lock is provided so that calls which are
82 * allowed to sleep are serialised so that the address register does
83 * not need to be saved. This lock also serves to serialise access
84 * to the EEPROM and PHY access registers which are shared between
85 * these two devices.
86 */
87
127/* Structure/enum declaration ------------------------------- */ 88/* Structure/enum declaration ------------------------------- */
128typedef struct board_info { 89typedef struct board_info {
129 90
@@ -137,33 +98,52 @@ typedef struct board_info {
137 u16 dbug_cnt; 98 u16 dbug_cnt;
138 u8 io_mode; /* 0:word, 2:byte */ 99 u8 io_mode; /* 0:word, 2:byte */
139 u8 phy_addr; 100 u8 phy_addr;
101 unsigned int flags;
102 unsigned int in_suspend :1;
103
104 int debug_level;
140 105
141 void (*inblk)(void __iomem *port, void *data, int length); 106 void (*inblk)(void __iomem *port, void *data, int length);
142 void (*outblk)(void __iomem *port, void *data, int length); 107 void (*outblk)(void __iomem *port, void *data, int length);
143 void (*dumpblk)(void __iomem *port, int length); 108 void (*dumpblk)(void __iomem *port, int length);
144 109
110 struct device *dev; /* parent device */
111
145 struct resource *addr_res; /* resources found */ 112 struct resource *addr_res; /* resources found */
146 struct resource *data_res; 113 struct resource *data_res;
147 struct resource *addr_req; /* resources requested */ 114 struct resource *addr_req; /* resources requested */
148 struct resource *data_req; 115 struct resource *data_req;
149 struct resource *irq_res; 116 struct resource *irq_res;
150 117
151 struct timer_list timer; 118 struct mutex addr_lock; /* phy and eeprom access lock */
152 unsigned char srom[128]; 119
153 spinlock_t lock; 120 spinlock_t lock;
154 121
155 struct mii_if_info mii; 122 struct mii_if_info mii;
156 u32 msg_enable; 123 u32 msg_enable;
157} board_info_t; 124} board_info_t;
158 125
126/* debug code */
127
128#define dm9000_dbg(db, lev, msg...) do { \
129 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
130 (lev) < db->debug_level) { \
131 dev_dbg(db->dev, msg); \
132 } \
133} while (0)
134
135static inline board_info_t *to_dm9000_board(struct net_device *dev)
136{
137 return dev->priv;
138}
139
159/* function declaration ------------------------------------- */ 140/* function declaration ------------------------------------- */
160static int dm9000_probe(struct platform_device *); 141static int dm9000_probe(struct platform_device *);
161static int dm9000_open(struct net_device *); 142static int dm9000_open(struct net_device *);
162static int dm9000_start_xmit(struct sk_buff *, struct net_device *); 143static int dm9000_start_xmit(struct sk_buff *, struct net_device *);
163static int dm9000_stop(struct net_device *); 144static int dm9000_stop(struct net_device *);
145static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
164 146
165
166static void dm9000_timer(unsigned long);
167static void dm9000_init_dm9000(struct net_device *); 147static void dm9000_init_dm9000(struct net_device *);
168 148
169static irqreturn_t dm9000_interrupt(int, void *); 149static irqreturn_t dm9000_interrupt(int, void *);
@@ -171,20 +151,19 @@ static irqreturn_t dm9000_interrupt(int, void *);
171static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); 151static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg);
172static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, 152static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg,
173 int value); 153 int value);
174static u16 read_srom_word(board_info_t *, int); 154
155static void dm9000_read_eeprom(board_info_t *, int addr, u8 *to);
156static void dm9000_write_eeprom(board_info_t *, int addr, u8 *dp);
175static void dm9000_rx(struct net_device *); 157static void dm9000_rx(struct net_device *);
176static void dm9000_hash_table(struct net_device *); 158static void dm9000_hash_table(struct net_device *);
177 159
178//#define DM9000_PROGRAM_EEPROM
179#ifdef DM9000_PROGRAM_EEPROM
180static void program_eeprom(board_info_t * db);
181#endif
182/* DM9000 network board routine ---------------------------- */ 160/* DM9000 network board routine ---------------------------- */
183 161
184static void 162static void
185dm9000_reset(board_info_t * db) 163dm9000_reset(board_info_t * db)
186{ 164{
187 PRINTK1("dm9000x: resetting\n"); 165 dev_dbg(db->dev, "resetting device\n");
166
188 /* RESET device */ 167 /* RESET device */
189 writeb(DM9000_NCR, db->io_addr); 168 writeb(DM9000_NCR, db->io_addr);
190 udelay(200); 169 udelay(200);
@@ -300,14 +279,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
300 db->inblk = dm9000_inblk_8bit; 279 db->inblk = dm9000_inblk_8bit;
301 break; 280 break;
302 281
303 case 2:
304 db->dumpblk = dm9000_dumpblk_16bit;
305 db->outblk = dm9000_outblk_16bit;
306 db->inblk = dm9000_inblk_16bit;
307 break;
308 282
309 case 3: 283 case 3:
310 printk(KERN_ERR PFX ": 3 byte IO, falling back to 16bit\n"); 284 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
285 case 2:
311 db->dumpblk = dm9000_dumpblk_16bit; 286 db->dumpblk = dm9000_dumpblk_16bit;
312 db->outblk = dm9000_outblk_16bit; 287 db->outblk = dm9000_outblk_16bit;
313 db->inblk = dm9000_inblk_16bit; 288 db->inblk = dm9000_inblk_16bit;
@@ -358,6 +333,139 @@ static void dm9000_poll_controller(struct net_device *dev)
358} 333}
359#endif 334#endif
360 335
336static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
337{
338 board_info_t *dm = to_dm9000_board(dev);
339
340 if (!netif_running(dev))
341 return -EINVAL;
342
343 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
344}
345
346/* ethtool ops */
347
348static void dm9000_get_drvinfo(struct net_device *dev,
349 struct ethtool_drvinfo *info)
350{
351 board_info_t *dm = to_dm9000_board(dev);
352
353 strcpy(info->driver, CARDNAME);
354 strcpy(info->version, DRV_VERSION);
355 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
356}
357
358static u32 dm9000_get_msglevel(struct net_device *dev)
359{
360 board_info_t *dm = to_dm9000_board(dev);
361
362 return dm->msg_enable;
363}
364
365static void dm9000_set_msglevel(struct net_device *dev, u32 value)
366{
367 board_info_t *dm = to_dm9000_board(dev);
368
369 dm->msg_enable = value;
370}
371
372static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
373{
374 board_info_t *dm = to_dm9000_board(dev);
375
376 mii_ethtool_gset(&dm->mii, cmd);
377 return 0;
378}
379
380static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
381{
382 board_info_t *dm = to_dm9000_board(dev);
383
384 return mii_ethtool_sset(&dm->mii, cmd);
385}
386
387static int dm9000_nway_reset(struct net_device *dev)
388{
389 board_info_t *dm = to_dm9000_board(dev);
390 return mii_nway_restart(&dm->mii);
391}
392
393static u32 dm9000_get_link(struct net_device *dev)
394{
395 board_info_t *dm = to_dm9000_board(dev);
396 return mii_link_ok(&dm->mii);
397}
398
399#define DM_EEPROM_MAGIC (0x444D394B)
400
401static int dm9000_get_eeprom_len(struct net_device *dev)
402{
403 return 128;
404}
405
406static int dm9000_get_eeprom(struct net_device *dev,
407 struct ethtool_eeprom *ee, u8 *data)
408{
409 board_info_t *dm = to_dm9000_board(dev);
410 int offset = ee->offset;
411 int len = ee->len;
412 int i;
413
414 /* EEPROM access is aligned to two bytes */
415
416 if ((len & 1) != 0 || (offset & 1) != 0)
417 return -EINVAL;
418
419 if (dm->flags & DM9000_PLATF_NO_EEPROM)
420 return -ENOENT;
421
422 ee->magic = DM_EEPROM_MAGIC;
423
424 for (i = 0; i < len; i += 2)
425 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
426
427 return 0;
428}
429
430static int dm9000_set_eeprom(struct net_device *dev,
431 struct ethtool_eeprom *ee, u8 *data)
432{
433 board_info_t *dm = to_dm9000_board(dev);
434 int offset = ee->offset;
435 int len = ee->len;
436 int i;
437
438 /* EEPROM access is aligned to two bytes */
439
440 if ((len & 1) != 0 || (offset & 1) != 0)
441 return -EINVAL;
442
443 if (dm->flags & DM9000_PLATF_NO_EEPROM)
444 return -ENOENT;
445
446 if (ee->magic != DM_EEPROM_MAGIC)
447 return -EINVAL;
448
449 for (i = 0; i < len; i += 2)
450 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
451
452 return 0;
453}
454
455static const struct ethtool_ops dm9000_ethtool_ops = {
456 .get_drvinfo = dm9000_get_drvinfo,
457 .get_settings = dm9000_get_settings,
458 .set_settings = dm9000_set_settings,
459 .get_msglevel = dm9000_get_msglevel,
460 .set_msglevel = dm9000_set_msglevel,
461 .nway_reset = dm9000_nway_reset,
462 .get_link = dm9000_get_link,
463 .get_eeprom_len = dm9000_get_eeprom_len,
464 .get_eeprom = dm9000_get_eeprom,
465 .set_eeprom = dm9000_set_eeprom,
466};
467
468
361/* dm9000_release_board 469/* dm9000_release_board
362 * 470 *
363 * release a board, and any mapped resources 471 * release a board, and any mapped resources
@@ -401,6 +509,7 @@ dm9000_probe(struct platform_device *pdev)
401 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 509 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
402 struct board_info *db; /* Point a board information structure */ 510 struct board_info *db; /* Point a board information structure */
403 struct net_device *ndev; 511 struct net_device *ndev;
512 const unsigned char *mac_src;
404 unsigned long base; 513 unsigned long base;
405 int ret = 0; 514 int ret = 0;
406 int iosize; 515 int iosize;
@@ -410,19 +519,22 @@ dm9000_probe(struct platform_device *pdev)
410 /* Init network device */ 519 /* Init network device */
411 ndev = alloc_etherdev(sizeof (struct board_info)); 520 ndev = alloc_etherdev(sizeof (struct board_info));
412 if (!ndev) { 521 if (!ndev) {
413 printk("%s: could not allocate device.\n", CARDNAME); 522 dev_err(&pdev->dev, "could not allocate device.\n");
414 return -ENOMEM; 523 return -ENOMEM;
415 } 524 }
416 525
417 SET_NETDEV_DEV(ndev, &pdev->dev); 526 SET_NETDEV_DEV(ndev, &pdev->dev);
418 527
419 PRINTK2("dm9000_probe()"); 528 dev_dbg(&pdev->dev, "dm9000_probe()");
420 529
421 /* setup board info structure */ 530 /* setup board info structure */
422 db = (struct board_info *) ndev->priv; 531 db = (struct board_info *) ndev->priv;
423 memset(db, 0, sizeof (*db)); 532 memset(db, 0, sizeof (*db));
424 533
534 db->dev = &pdev->dev;
535
425 spin_lock_init(&db->lock); 536 spin_lock_init(&db->lock);
537 mutex_init(&db->addr_lock);
426 538
427 if (pdev->num_resources < 2) { 539 if (pdev->num_resources < 2) {
428 ret = -ENODEV; 540 ret = -ENODEV;
@@ -450,7 +562,7 @@ dm9000_probe(struct platform_device *pdev)
450 562
451 if (db->addr_res == NULL || db->data_res == NULL || 563 if (db->addr_res == NULL || db->data_res == NULL ||
452 db->irq_res == NULL) { 564 db->irq_res == NULL) {
453 printk(KERN_ERR PFX "insufficient resources\n"); 565 dev_err(db->dev, "insufficient resources\n");
454 ret = -ENOENT; 566 ret = -ENOENT;
455 goto out; 567 goto out;
456 } 568 }
@@ -460,7 +572,7 @@ dm9000_probe(struct platform_device *pdev)
460 pdev->name); 572 pdev->name);
461 573
462 if (db->addr_req == NULL) { 574 if (db->addr_req == NULL) {
463 printk(KERN_ERR PFX "cannot claim address reg area\n"); 575 dev_err(db->dev, "cannot claim address reg area\n");
464 ret = -EIO; 576 ret = -EIO;
465 goto out; 577 goto out;
466 } 578 }
@@ -468,7 +580,7 @@ dm9000_probe(struct platform_device *pdev)
468 db->io_addr = ioremap(db->addr_res->start, i); 580 db->io_addr = ioremap(db->addr_res->start, i);
469 581
470 if (db->io_addr == NULL) { 582 if (db->io_addr == NULL) {
471 printk(KERN_ERR "failed to ioremap address reg\n"); 583 dev_err(db->dev, "failed to ioremap address reg\n");
472 ret = -EINVAL; 584 ret = -EINVAL;
473 goto out; 585 goto out;
474 } 586 }
@@ -478,7 +590,7 @@ dm9000_probe(struct platform_device *pdev)
478 pdev->name); 590 pdev->name);
479 591
480 if (db->data_req == NULL) { 592 if (db->data_req == NULL) {
481 printk(KERN_ERR PFX "cannot claim data reg area\n"); 593 dev_err(db->dev, "cannot claim data reg area\n");
482 ret = -EIO; 594 ret = -EIO;
483 goto out; 595 goto out;
484 } 596 }
@@ -486,7 +598,7 @@ dm9000_probe(struct platform_device *pdev)
486 db->io_data = ioremap(db->data_res->start, iosize); 598 db->io_data = ioremap(db->data_res->start, iosize);
487 599
488 if (db->io_data == NULL) { 600 if (db->io_data == NULL) {
489 printk(KERN_ERR "failed to ioremap data reg\n"); 601 dev_err(db->dev,"failed to ioremap data reg\n");
490 ret = -EINVAL; 602 ret = -EINVAL;
491 goto out; 603 goto out;
492 } 604 }
@@ -525,12 +637,14 @@ dm9000_probe(struct platform_device *pdev)
525 637
526 if (pdata->dumpblk != NULL) 638 if (pdata->dumpblk != NULL)
527 db->dumpblk = pdata->dumpblk; 639 db->dumpblk = pdata->dumpblk;
640
641 db->flags = pdata->flags;
528 } 642 }
529 643
530 dm9000_reset(db); 644 dm9000_reset(db);
531 645
532 /* try two times, DM9000 sometimes gets the first read wrong */ 646 /* try two times, DM9000 sometimes gets the first read wrong */
533 for (i = 0; i < 2; i++) { 647 for (i = 0; i < 8; i++) {
534 id_val = ior(db, DM9000_VIDL); 648 id_val = ior(db, DM9000_VIDL);
535 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 649 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
536 id_val |= (u32)ior(db, DM9000_PIDL) << 16; 650 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
@@ -538,11 +652,11 @@ dm9000_probe(struct platform_device *pdev)
538 652
539 if (id_val == DM9000_ID) 653 if (id_val == DM9000_ID)
540 break; 654 break;
541 printk("%s: read wrong id 0x%08x\n", CARDNAME, id_val); 655 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
542 } 656 }
543 657
544 if (id_val != DM9000_ID) { 658 if (id_val != DM9000_ID) {
545 printk("%s: wrong id: 0x%08x\n", CARDNAME, id_val); 659 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
546 ret = -ENODEV; 660 ret = -ENODEV;
547 goto out; 661 goto out;
548 } 662 }
@@ -558,13 +672,13 @@ dm9000_probe(struct platform_device *pdev)
558 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 672 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
559 ndev->stop = &dm9000_stop; 673 ndev->stop = &dm9000_stop;
560 ndev->set_multicast_list = &dm9000_hash_table; 674 ndev->set_multicast_list = &dm9000_hash_table;
675 ndev->ethtool_ops = &dm9000_ethtool_ops;
676 ndev->do_ioctl = &dm9000_ioctl;
677
561#ifdef CONFIG_NET_POLL_CONTROLLER 678#ifdef CONFIG_NET_POLL_CONTROLLER
562 ndev->poll_controller = &dm9000_poll_controller; 679 ndev->poll_controller = &dm9000_poll_controller;
563#endif 680#endif
564 681
565#ifdef DM9000_PROGRAM_EEPROM
566 program_eeprom(db);
567#endif
568 db->msg_enable = NETIF_MSG_LINK; 682 db->msg_enable = NETIF_MSG_LINK;
569 db->mii.phy_id_mask = 0x1f; 683 db->mii.phy_id_mask = 0x1f;
570 db->mii.reg_num_mask = 0x1f; 684 db->mii.reg_num_mask = 0x1f;
@@ -574,38 +688,37 @@ dm9000_probe(struct platform_device *pdev)
574 db->mii.mdio_read = dm9000_phy_read; 688 db->mii.mdio_read = dm9000_phy_read;
575 db->mii.mdio_write = dm9000_phy_write; 689 db->mii.mdio_write = dm9000_phy_write;
576 690
577 /* Read SROM content */ 691 mac_src = "eeprom";
578 for (i = 0; i < 64; i++)
579 ((u16 *) db->srom)[i] = read_srom_word(db, i);
580 692
581 /* Set Node Address */ 693 /* try reading the node address from the attached EEPROM */
582 for (i = 0; i < 6; i++) 694 for (i = 0; i < 6; i += 2)
583 ndev->dev_addr[i] = db->srom[i]; 695 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
584 696
585 if (!is_valid_ether_addr(ndev->dev_addr)) { 697 if (!is_valid_ether_addr(ndev->dev_addr)) {
586 /* try reading from mac */ 698 /* try reading from mac */
587 699
700 mac_src = "chip";
588 for (i = 0; i < 6; i++) 701 for (i = 0; i < 6; i++)
589 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 702 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
590 } 703 }
591 704
592 if (!is_valid_ether_addr(ndev->dev_addr)) 705 if (!is_valid_ether_addr(ndev->dev_addr))
593 printk("%s: Invalid ethernet MAC address. Please " 706 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
594 "set using ifconfig\n", ndev->name); 707 "set using ifconfig\n", ndev->name);
595 708
596 platform_set_drvdata(pdev, ndev); 709 platform_set_drvdata(pdev, ndev);
597 ret = register_netdev(ndev); 710 ret = register_netdev(ndev);
598 711
599 if (ret == 0) { 712 if (ret == 0) {
600 DECLARE_MAC_BUF(mac); 713 DECLARE_MAC_BUF(mac);
601 printk("%s: dm9000 at %p,%p IRQ %d MAC: %s\n", 714 printk("%s: dm9000 at %p,%p IRQ %d MAC: %s (%s)\n",
602 ndev->name, db->io_addr, db->io_data, ndev->irq, 715 ndev->name, db->io_addr, db->io_data, ndev->irq,
603 print_mac(mac, ndev->dev_addr)); 716 print_mac(mac, ndev->dev_addr), mac_src);
604 } 717 }
605 return 0; 718 return 0;
606 719
607out: 720out:
608 printk("%s: not found (%d).\n", CARDNAME, ret); 721 dev_err(db->dev, "not found (%d).\n", ret);
609 722
610 dm9000_release_board(pdev, db); 723 dm9000_release_board(pdev, db);
611 free_netdev(ndev); 724 free_netdev(ndev);
@@ -621,10 +734,22 @@ static int
621dm9000_open(struct net_device *dev) 734dm9000_open(struct net_device *dev)
622{ 735{
623 board_info_t *db = (board_info_t *) dev->priv; 736 board_info_t *db = (board_info_t *) dev->priv;
737 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
624 738
625 PRINTK2("entering dm9000_open\n"); 739 if (netif_msg_ifup(db))
740 dev_dbg(db->dev, "enabling %s\n", dev->name);
626 741
627 if (request_irq(dev->irq, &dm9000_interrupt, DM9000_IRQ_FLAGS, dev->name, dev)) 742 /* If there is no IRQ type specified, default to something that
743 * may work, and tell the user that this is a problem */
744
745 if (irqflags == IRQF_TRIGGER_NONE) {
746 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
747 irqflags = DEFAULT_TRIGGER;
748 }
749
750 irqflags |= IRQF_SHARED;
751
752 if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev))
628 return -EAGAIN; 753 return -EAGAIN;
629 754
630 /* Initialize DM9000 board */ 755 /* Initialize DM9000 board */
@@ -634,13 +759,6 @@ dm9000_open(struct net_device *dev)
634 /* Init driver variable */ 759 /* Init driver variable */
635 db->dbug_cnt = 0; 760 db->dbug_cnt = 0;
636 761
637 /* set and active a timer process */
638 init_timer(&db->timer);
639 db->timer.expires = DM9000_TIMER_WUT;
640 db->timer.data = (unsigned long) dev;
641 db->timer.function = &dm9000_timer;
642 add_timer(&db->timer);
643
644 mii_check_media(&db->mii, netif_msg_link(db), 1); 762 mii_check_media(&db->mii, netif_msg_link(db), 1);
645 netif_start_queue(dev); 763 netif_start_queue(dev);
646 764
@@ -655,7 +773,7 @@ dm9000_init_dm9000(struct net_device *dev)
655{ 773{
656 board_info_t *db = (board_info_t *) dev->priv; 774 board_info_t *db = (board_info_t *) dev->priv;
657 775
658 PRINTK1("entering %s\n",__FUNCTION__); 776 dm9000_dbg(db, 1, "entering %s\n", __func__);
659 777
660 /* I/O mode */ 778 /* I/O mode */
661 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 779 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
@@ -665,6 +783,9 @@ dm9000_init_dm9000(struct net_device *dev)
665 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 783 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
666 iow(db, DM9000_GPR, 0); /* Enable PHY */ 784 iow(db, DM9000_GPR, 0); /* Enable PHY */
667 785
786 if (db->flags & DM9000_PLATF_EXT_PHY)
787 iow(db, DM9000_NCR, NCR_EXT_PHY);
788
668 /* Program operating register */ 789 /* Program operating register */
669 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 790 iow(db, DM9000_TCR, 0); /* TX Polling clear */
670 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ 791 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
@@ -698,7 +819,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
698 unsigned long flags; 819 unsigned long flags;
699 board_info_t *db = (board_info_t *) dev->priv; 820 board_info_t *db = (board_info_t *) dev->priv;
700 821
701 PRINTK3("dm9000_start_xmit\n"); 822 dm9000_dbg(db, 3, "%s:\n", __func__);
702 823
703 if (db->tx_pkt_cnt > 1) 824 if (db->tx_pkt_cnt > 1)
704 return 1; 825 return 1;
@@ -715,8 +836,8 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
715 /* TX control: First packet immediately send, second packet queue */ 836 /* TX control: First packet immediately send, second packet queue */
716 if (db->tx_pkt_cnt == 1) { 837 if (db->tx_pkt_cnt == 1) {
717 /* Set TX length to DM9000 */ 838 /* Set TX length to DM9000 */
718 iow(db, DM9000_TXPLL, skb->len & 0xff); 839 iow(db, DM9000_TXPLL, skb->len);
719 iow(db, DM9000_TXPLH, (skb->len >> 8) & 0xff); 840 iow(db, DM9000_TXPLH, skb->len >> 8);
720 841
721 /* Issue TX polling command */ 842 /* Issue TX polling command */
722 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 843 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
@@ -757,10 +878,8 @@ dm9000_stop(struct net_device *ndev)
757{ 878{
758 board_info_t *db = (board_info_t *) ndev->priv; 879 board_info_t *db = (board_info_t *) ndev->priv;
759 880
760 PRINTK1("entering %s\n",__FUNCTION__); 881 if (netif_msg_ifdown(db))
761 882 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
762 /* deleted timer */
763 del_timer(&db->timer);
764 883
765 netif_stop_queue(ndev); 884 netif_stop_queue(ndev);
766 netif_carrier_off(ndev); 885 netif_carrier_off(ndev);
@@ -788,10 +907,13 @@ dm9000_tx_done(struct net_device *dev, board_info_t * db)
788 db->tx_pkt_cnt--; 907 db->tx_pkt_cnt--;
789 dev->stats.tx_packets++; 908 dev->stats.tx_packets++;
790 909
910 if (netif_msg_tx_done(db))
911 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
912
791 /* Queue packet check & send */ 913 /* Queue packet check & send */
792 if (db->tx_pkt_cnt > 0) { 914 if (db->tx_pkt_cnt > 0) {
793 iow(db, DM9000_TXPLL, db->queue_pkt_len & 0xff); 915 iow(db, DM9000_TXPLL, db->queue_pkt_len);
794 iow(db, DM9000_TXPLH, (db->queue_pkt_len >> 8) & 0xff); 916 iow(db, DM9000_TXPLH, db->queue_pkt_len >> 8);
795 iow(db, DM9000_TCR, TCR_TXREQ); 917 iow(db, DM9000_TCR, TCR_TXREQ);
796 dev->trans_start = jiffies; 918 dev->trans_start = jiffies;
797 } 919 }
@@ -803,19 +925,14 @@ static irqreturn_t
803dm9000_interrupt(int irq, void *dev_id) 925dm9000_interrupt(int irq, void *dev_id)
804{ 926{
805 struct net_device *dev = dev_id; 927 struct net_device *dev = dev_id;
806 board_info_t *db; 928 board_info_t *db = (board_info_t *) dev->priv;
807 int int_status; 929 int int_status;
808 u8 reg_save; 930 u8 reg_save;
809 931
810 PRINTK3("entering %s\n",__FUNCTION__); 932 dm9000_dbg(db, 3, "entering %s\n", __func__);
811
812 if (!dev) {
813 PRINTK1("dm9000_interrupt() without DEVICE arg\n");
814 return IRQ_HANDLED;
815 }
816 933
817 /* A real interrupt coming */ 934 /* A real interrupt coming */
818 db = (board_info_t *) dev->priv; 935
819 spin_lock(&db->lock); 936 spin_lock(&db->lock);
820 937
821 /* Save previous register address */ 938 /* Save previous register address */
@@ -828,6 +945,9 @@ dm9000_interrupt(int irq, void *dev_id)
828 int_status = ior(db, DM9000_ISR); /* Got ISR */ 945 int_status = ior(db, DM9000_ISR); /* Got ISR */
829 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 946 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
830 947
948 if (netif_msg_intr(db))
949 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
950
831 /* Received the coming packet */ 951 /* Received the coming packet */
832 if (int_status & ISR_PRS) 952 if (int_status & ISR_PRS)
833 dm9000_rx(dev); 953 dm9000_rx(dev);
@@ -847,27 +967,9 @@ dm9000_interrupt(int irq, void *dev_id)
847 return IRQ_HANDLED; 967 return IRQ_HANDLED;
848} 968}
849 969
850/*
851 * A periodic timer routine
852 * Dynamic media sense, allocated Rx buffer...
853 */
854static void
855dm9000_timer(unsigned long data)
856{
857 struct net_device *dev = (struct net_device *) data;
858 board_info_t *db = (board_info_t *) dev->priv;
859
860 PRINTK3("dm9000_timer()\n");
861
862 mii_check_media(&db->mii, netif_msg_link(db), 0);
863
864 /* Set timer again */
865 db->timer.expires = DM9000_TIMER_WUT;
866 add_timer(&db->timer);
867}
868
869struct dm9000_rxhdr { 970struct dm9000_rxhdr {
870 u16 RxStatus; 971 u8 RxPktReady;
972 u8 RxStatus;
871 u16 RxLen; 973 u16 RxLen;
872} __attribute__((__packed__)); 974} __attribute__((__packed__));
873 975
@@ -893,7 +995,7 @@ dm9000_rx(struct net_device *dev)
893 995
894 /* Status check: this byte must be 0 or 1 */ 996 /* Status check: this byte must be 0 or 1 */
895 if (rxbyte > DM9000_PKT_RDY) { 997 if (rxbyte > DM9000_PKT_RDY) {
896 printk("status check failed: %d\n", rxbyte); 998 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
897 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 999 iow(db, DM9000_RCR, 0x00); /* Stop Device */
898 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */ 1000 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
899 return; 1001 return;
@@ -908,30 +1010,38 @@ dm9000_rx(struct net_device *dev)
908 1010
909 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 1011 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
910 1012
911 RxLen = rxhdr.RxLen; 1013 RxLen = le16_to_cpu(rxhdr.RxLen);
1014
1015 if (netif_msg_rx_status(db))
1016 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1017 rxhdr.RxStatus, RxLen);
912 1018
913 /* Packet Status check */ 1019 /* Packet Status check */
914 if (RxLen < 0x40) { 1020 if (RxLen < 0x40) {
915 GoodPacket = false; 1021 GoodPacket = false;
916 PRINTK1("Bad Packet received (runt)\n"); 1022 if (netif_msg_rx_err(db))
1023 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
917 } 1024 }
918 1025
919 if (RxLen > DM9000_PKT_MAX) { 1026 if (RxLen > DM9000_PKT_MAX) {
920 PRINTK1("RST: RX Len:%x\n", RxLen); 1027 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
921 } 1028 }
922 1029
923 if (rxhdr.RxStatus & 0xbf00) { 1030 if (rxhdr.RxStatus & 0xbf) {
924 GoodPacket = false; 1031 GoodPacket = false;
925 if (rxhdr.RxStatus & 0x100) { 1032 if (rxhdr.RxStatus & 0x01) {
926 PRINTK1("fifo error\n"); 1033 if (netif_msg_rx_err(db))
1034 dev_dbg(db->dev, "fifo error\n");
927 dev->stats.rx_fifo_errors++; 1035 dev->stats.rx_fifo_errors++;
928 } 1036 }
929 if (rxhdr.RxStatus & 0x200) { 1037 if (rxhdr.RxStatus & 0x02) {
930 PRINTK1("crc error\n"); 1038 if (netif_msg_rx_err(db))
1039 dev_dbg(db->dev, "crc error\n");
931 dev->stats.rx_crc_errors++; 1040 dev->stats.rx_crc_errors++;
932 } 1041 }
933 if (rxhdr.RxStatus & 0x8000) { 1042 if (rxhdr.RxStatus & 0x80) {
934 PRINTK1("length error\n"); 1043 if (netif_msg_rx_err(db))
1044 dev_dbg(db->dev, "length error\n");
935 dev->stats.rx_length_errors++; 1045 dev->stats.rx_length_errors++;
936 } 1046 }
937 } 1047 }
@@ -960,72 +1070,119 @@ dm9000_rx(struct net_device *dev)
960 } while (rxbyte == DM9000_PKT_RDY); 1070 } while (rxbyte == DM9000_PKT_RDY);
961} 1071}
962 1072
963/* 1073static unsigned int
964 * Read a word data from SROM 1074dm9000_read_locked(board_info_t *db, int reg)
965 */
966static u16
967read_srom_word(board_info_t * db, int offset)
968{ 1075{
969 iow(db, DM9000_EPAR, offset); 1076 unsigned long flags;
970 iow(db, DM9000_EPCR, EPCR_ERPRR); 1077 unsigned int ret;
971 mdelay(8); /* according to the datasheet 200us should be enough, 1078
972 but it doesn't work */ 1079 spin_lock_irqsave(&db->lock, flags);
973 iow(db, DM9000_EPCR, 0x0); 1080 ret = ior(db, reg);
974 return (ior(db, DM9000_EPDRL) + (ior(db, DM9000_EPDRH) << 8)); 1081 spin_unlock_irqrestore(&db->lock, flags);
1082
1083 return ret;
1084}
1085
1086static int dm9000_wait_eeprom(board_info_t *db)
1087{
1088 unsigned int status;
1089 int timeout = 8; /* wait max 8msec */
1090
1091 /* The DM9000 data sheets say we should be able to
1092 * poll the ERRE bit in EPCR to wait for the EEPROM
1093 * operation. From testing several chips, this bit
1094 * does not seem to work.
1095 *
1096 * We attempt to use the bit, but fall back to the
1097 * timeout (which is why we do not return an error
1098 * on expiry) to say that the EEPROM operation has
1099 * completed.
1100 */
1101
1102 while (1) {
1103 status = dm9000_read_locked(db, DM9000_EPCR);
1104
1105 if ((status & EPCR_ERRE) == 0)
1106 break;
1107
1108 if (timeout-- < 0) {
1109 dev_dbg(db->dev, "timeout waiting EEPROM\n");
1110 break;
1111 }
1112 }
1113
1114 return 0;
975} 1115}
976 1116
977#ifdef DM9000_PROGRAM_EEPROM
978/* 1117/*
979 * Write a word data to SROM 1118 * Read a word data from EEPROM
980 */ 1119 */
981static void 1120static void
982write_srom_word(board_info_t * db, int offset, u16 val) 1121dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
983{ 1122{
1123 unsigned long flags;
1124
1125 if (db->flags & DM9000_PLATF_NO_EEPROM) {
1126 to[0] = 0xff;
1127 to[1] = 0xff;
1128 return;
1129 }
1130
1131 mutex_lock(&db->addr_lock);
1132
1133 spin_lock_irqsave(&db->lock, flags);
1134
984 iow(db, DM9000_EPAR, offset); 1135 iow(db, DM9000_EPAR, offset);
985 iow(db, DM9000_EPDRH, ((val >> 8) & 0xff)); 1136 iow(db, DM9000_EPCR, EPCR_ERPRR);
986 iow(db, DM9000_EPDRL, (val & 0xff)); 1137
987 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 1138 spin_unlock_irqrestore(&db->lock, flags);
988 mdelay(8); /* same shit */ 1139
989 iow(db, DM9000_EPCR, 0); 1140 dm9000_wait_eeprom(db);
1141
1142 /* delay for at-least 150uS */
1143 msleep(1);
1144
1145 spin_lock_irqsave(&db->lock, flags);
1146
1147 iow(db, DM9000_EPCR, 0x0);
1148
1149 to[0] = ior(db, DM9000_EPDRL);
1150 to[1] = ior(db, DM9000_EPDRH);
1151
1152 spin_unlock_irqrestore(&db->lock, flags);
1153
1154 mutex_unlock(&db->addr_lock);
990} 1155}
991 1156
992/* 1157/*
993 * Only for development: 1158 * Write a word data to SROM
994 * Here we write static data to the eeprom in case
995 * we don't have valid content on a new board
996 */ 1159 */
997static void 1160static void
998program_eeprom(board_info_t * db) 1161dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
999{ 1162{
1000 u16 eeprom[] = { 0x0c00, 0x007f, 0x1300, /* MAC Address */ 1163 unsigned long flags;
1001 0x0000, /* Autoload: accept nothing */
1002 0x0a46, 0x9000, /* Vendor / Product ID */
1003 0x0000, /* pin control */
1004 0x0000,
1005 }; /* Wake-up mode control */
1006 int i;
1007 for (i = 0; i < 8; i++)
1008 write_srom_word(db, i, eeprom[i]);
1009}
1010#endif
1011 1164
1165 if (db->flags & DM9000_PLATF_NO_EEPROM)
1166 return;
1012 1167
1013/* 1168 mutex_lock(&db->addr_lock);
1014 * Calculate the CRC valude of the Rx packet
1015 * flag = 1 : return the reverse CRC (for the received packet CRC)
1016 * 0 : return the normal CRC (for Hash Table index)
1017 */
1018 1169
1019static unsigned long 1170 spin_lock_irqsave(&db->lock, flags);
1020cal_CRC(unsigned char *Data, unsigned int Len, u8 flag) 1171 iow(db, DM9000_EPAR, offset);
1021{ 1172 iow(db, DM9000_EPDRH, data[1]);
1173 iow(db, DM9000_EPDRL, data[0]);
1174 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
1175 spin_unlock_irqrestore(&db->lock, flags);
1176
1177 dm9000_wait_eeprom(db);
1022 1178
1023 u32 crc = ether_crc_le(Len, Data); 1179 mdelay(1); /* wait at least 150uS to clear */
1024 1180
1025 if (flag) 1181 spin_lock_irqsave(&db->lock, flags);
1026 return ~crc; 1182 iow(db, DM9000_EPCR, 0);
1183 spin_unlock_irqrestore(&db->lock, flags);
1027 1184
1028 return crc; 1185 mutex_unlock(&db->addr_lock);
1029} 1186}
1030 1187
1031/* 1188/*
@@ -1037,15 +1194,16 @@ dm9000_hash_table(struct net_device *dev)
1037 board_info_t *db = (board_info_t *) dev->priv; 1194 board_info_t *db = (board_info_t *) dev->priv;
1038 struct dev_mc_list *mcptr = dev->mc_list; 1195 struct dev_mc_list *mcptr = dev->mc_list;
1039 int mc_cnt = dev->mc_count; 1196 int mc_cnt = dev->mc_count;
1197 int i, oft;
1040 u32 hash_val; 1198 u32 hash_val;
1041 u16 i, oft, hash_table[4]; 1199 u16 hash_table[4];
1042 unsigned long flags; 1200 unsigned long flags;
1043 1201
1044 PRINTK2("dm9000_hash_table()\n"); 1202 dm9000_dbg(db, 1, "entering %s\n", __func__);
1045 1203
1046 spin_lock_irqsave(&db->lock,flags); 1204 spin_lock_irqsave(&db->lock, flags);
1047 1205
1048 for (i = 0, oft = 0x10; i < 6; i++, oft++) 1206 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
1049 iow(db, oft, dev->dev_addr[i]); 1207 iow(db, oft, dev->dev_addr[i]);
1050 1208
1051 /* Clear Hash Table */ 1209 /* Clear Hash Table */
@@ -1057,21 +1215,33 @@ dm9000_hash_table(struct net_device *dev)
1057 1215
1058 /* the multicast address in Hash Table : 64 bits */ 1216 /* the multicast address in Hash Table : 64 bits */
1059 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1217 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1060 hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; 1218 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
1061 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1219 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1062 } 1220 }
1063 1221
1064 /* Write the hash table to MAC MD table */ 1222 /* Write the hash table to MAC MD table */
1065 for (i = 0, oft = 0x16; i < 4; i++) { 1223 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
1066 iow(db, oft++, hash_table[i] & 0xff); 1224 iow(db, oft++, hash_table[i]);
1067 iow(db, oft++, (hash_table[i] >> 8) & 0xff); 1225 iow(db, oft++, hash_table[i] >> 8);
1068 } 1226 }
1069 1227
1070 spin_unlock_irqrestore(&db->lock,flags); 1228 spin_unlock_irqrestore(&db->lock, flags);
1071} 1229}
1072 1230
1073 1231
1074/* 1232/*
1233 * Sleep, either by using msleep() or if we are suspending, then
1234 * use mdelay() to sleep.
1235 */
1236static void dm9000_msleep(board_info_t *db, unsigned int ms)
1237{
1238 if (db->in_suspend)
1239 mdelay(ms);
1240 else
1241 msleep(ms);
1242}
1243
1244/*
1075 * Read a word from phyxcer 1245 * Read a word from phyxcer
1076 */ 1246 */
1077static int 1247static int
@@ -1082,6 +1252,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1082 unsigned int reg_save; 1252 unsigned int reg_save;
1083 int ret; 1253 int ret;
1084 1254
1255 mutex_lock(&db->addr_lock);
1256
1085 spin_lock_irqsave(&db->lock,flags); 1257 spin_lock_irqsave(&db->lock,flags);
1086 1258
1087 /* Save previous register address */ 1259 /* Save previous register address */
@@ -1091,7 +1263,15 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1091 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1263 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1092 1264
1093 iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */ 1265 iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */
1094 udelay(100); /* Wait read complete */ 1266
1267 writeb(reg_save, db->io_addr);
1268 spin_unlock_irqrestore(&db->lock,flags);
1269
1270 dm9000_msleep(db, 1); /* Wait read complete */
1271
1272 spin_lock_irqsave(&db->lock,flags);
1273 reg_save = readb(db->io_addr);
1274
1095 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 1275 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1096 1276
1097 /* The read data keeps on REG_0D & REG_0E */ 1277 /* The read data keeps on REG_0D & REG_0E */
@@ -1099,9 +1279,9 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1099 1279
1100 /* restore the previous address */ 1280 /* restore the previous address */
1101 writeb(reg_save, db->io_addr); 1281 writeb(reg_save, db->io_addr);
1102
1103 spin_unlock_irqrestore(&db->lock,flags); 1282 spin_unlock_irqrestore(&db->lock,flags);
1104 1283
1284 mutex_unlock(&db->addr_lock);
1105 return ret; 1285 return ret;
1106} 1286}
1107 1287
@@ -1115,6 +1295,8 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1115 unsigned long flags; 1295 unsigned long flags;
1116 unsigned long reg_save; 1296 unsigned long reg_save;
1117 1297
1298 mutex_lock(&db->addr_lock);
1299
1118 spin_lock_irqsave(&db->lock,flags); 1300 spin_lock_irqsave(&db->lock,flags);
1119 1301
1120 /* Save previous register address */ 1302 /* Save previous register address */
@@ -1124,25 +1306,38 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1124 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1306 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1125 1307
1126 /* Fill the written data into REG_0D & REG_0E */ 1308 /* Fill the written data into REG_0D & REG_0E */
1127 iow(db, DM9000_EPDRL, (value & 0xff)); 1309 iow(db, DM9000_EPDRL, value);
1128 iow(db, DM9000_EPDRH, ((value >> 8) & 0xff)); 1310 iow(db, DM9000_EPDRH, value >> 8);
1129 1311
1130 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */ 1312 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */
1131 udelay(500); /* Wait write complete */ 1313
1314 writeb(reg_save, db->io_addr);
1315 spin_unlock_irqrestore(&db->lock, flags);
1316
1317 dm9000_msleep(db, 1); /* Wait write complete */
1318
1319 spin_lock_irqsave(&db->lock,flags);
1320 reg_save = readb(db->io_addr);
1321
1132 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 1322 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1133 1323
1134 /* restore the previous address */ 1324 /* restore the previous address */
1135 writeb(reg_save, db->io_addr); 1325 writeb(reg_save, db->io_addr);
1136 1326
1137 spin_unlock_irqrestore(&db->lock,flags); 1327 spin_unlock_irqrestore(&db->lock, flags);
1328 mutex_unlock(&db->addr_lock);
1138} 1329}
1139 1330
1140static int 1331static int
1141dm9000_drv_suspend(struct platform_device *dev, pm_message_t state) 1332dm9000_drv_suspend(struct platform_device *dev, pm_message_t state)
1142{ 1333{
1143 struct net_device *ndev = platform_get_drvdata(dev); 1334 struct net_device *ndev = platform_get_drvdata(dev);
1335 board_info_t *db;
1144 1336
1145 if (ndev) { 1337 if (ndev) {
1338 db = (board_info_t *) ndev->priv;
1339 db->in_suspend = 1;
1340
1146 if (netif_running(ndev)) { 1341 if (netif_running(ndev)) {
1147 netif_device_detach(ndev); 1342 netif_device_detach(ndev);
1148 dm9000_shutdown(ndev); 1343 dm9000_shutdown(ndev);
@@ -1165,6 +1360,8 @@ dm9000_drv_resume(struct platform_device *dev)
1165 1360
1166 netif_device_attach(ndev); 1361 netif_device_attach(ndev);
1167 } 1362 }
1363
1364 db->in_suspend = 0;
1168 } 1365 }
1169 return 0; 1366 return 0;
1170} 1367}
@@ -1180,8 +1377,7 @@ dm9000_drv_remove(struct platform_device *pdev)
1180 dm9000_release_board(pdev, (board_info_t *) ndev->priv); 1377 dm9000_release_board(pdev, (board_info_t *) ndev->priv);
1181 free_netdev(ndev); /* free device structure */ 1378 free_netdev(ndev); /* free device structure */
1182 1379
1183 PRINTK1("clean_module() exit\n"); 1380 dev_dbg(&pdev->dev, "released and freed device\n");
1184
1185 return 0; 1381 return 0;
1186} 1382}
1187 1383
@@ -1199,7 +1395,7 @@ static struct platform_driver dm9000_driver = {
1199static int __init 1395static int __init
1200dm9000_init(void) 1396dm9000_init(void)
1201{ 1397{
1202 printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME); 1398 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1203 1399
1204 return platform_driver_register(&dm9000_driver); /* search board and register */ 1400 return platform_driver_register(&dm9000_driver); /* search board and register */
1205} 1401}
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7c5b05a82f0e..d4ee8ec34b56 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -926,8 +926,6 @@ e1000_probe(struct pci_dev *pdev,
926{ 926{
927 struct net_device *netdev; 927 struct net_device *netdev;
928 struct e1000_adapter *adapter; 928 struct e1000_adapter *adapter;
929 unsigned long mmio_start, mmio_len;
930 unsigned long flash_start, flash_len;
931 929
932 static int cards_found = 0; 930 static int cards_found = 0;
933 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 931 static int global_quad_port_a = 0; /* global ksp3 port a indication */
@@ -970,11 +968,9 @@ e1000_probe(struct pci_dev *pdev,
970 adapter->hw.back = adapter; 968 adapter->hw.back = adapter;
971 adapter->msg_enable = (1 << debug) - 1; 969 adapter->msg_enable = (1 << debug) - 1;
972 970
973 mmio_start = pci_resource_start(pdev, BAR_0);
974 mmio_len = pci_resource_len(pdev, BAR_0);
975
976 err = -EIO; 971 err = -EIO;
977 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 972 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
973 pci_resource_len(pdev, BAR_0));
978 if (!adapter->hw.hw_addr) 974 if (!adapter->hw.hw_addr)
979 goto err_ioremap; 975 goto err_ioremap;
980 976
@@ -1009,10 +1005,6 @@ e1000_probe(struct pci_dev *pdev,
1009#endif 1005#endif
1010 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1006 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1011 1007
1012 netdev->mem_start = mmio_start;
1013 netdev->mem_end = mmio_start + mmio_len;
1014 netdev->base_addr = adapter->hw.io_base;
1015
1016 adapter->bd_number = cards_found; 1008 adapter->bd_number = cards_found;
1017 1009
1018 /* setup the private structure */ 1010 /* setup the private structure */
@@ -1025,9 +1017,9 @@ e1000_probe(struct pci_dev *pdev,
1025 * because it depends on mac_type */ 1017 * because it depends on mac_type */
1026 if ((adapter->hw.mac_type == e1000_ich8lan) && 1018 if ((adapter->hw.mac_type == e1000_ich8lan) &&
1027 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 1019 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1028 flash_start = pci_resource_start(pdev, 1); 1020 adapter->hw.flash_address =
1029 flash_len = pci_resource_len(pdev, 1); 1021 ioremap(pci_resource_start(pdev, 1),
1030 adapter->hw.flash_address = ioremap(flash_start, flash_len); 1022 pci_resource_len(pdev, 1));
1031 if (!adapter->hw.flash_address) 1023 if (!adapter->hw.flash_address)
1032 goto err_flashmap; 1024 goto err_flashmap;
1033 } 1025 }
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d4843d014bc9..801b4d9cd972 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -166,21 +166,24 @@
166 * Hardware access: 166 * Hardware access:
167 */ 167 */
168 168
169#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 169#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
170#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 170#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
171#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 171#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
172#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 172#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
173#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 173#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
174#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 174#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
175#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 175#define DEV_HAS_MSI 0x00040 /* device supports MSI */
176#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 176#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
177#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 177#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
178#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 178#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
179#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ 179#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
180#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ 180#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
181#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ 181#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
182#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ 182#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
183#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ 183#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
184#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
185#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
186#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
184 187
185enum { 188enum {
186 NvRegIrqStatus = 0x000, 189 NvRegIrqStatus = 0x000,
@@ -266,9 +269,12 @@ enum {
266#define NVREG_RNDSEED_FORCE3 0x7400 269#define NVREG_RNDSEED_FORCE3 0x7400
267 270
268 NvRegTxDeferral = 0xA0, 271 NvRegTxDeferral = 0xA0,
269#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 272#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
270#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 273#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
271#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 274#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
275#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
276#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
277#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
272 NvRegRxDeferral = 0xA4, 278 NvRegRxDeferral = 0xA4,
273#define NVREG_RX_DEFERRAL_DEFAULT 0x16 279#define NVREG_RX_DEFERRAL_DEFAULT 0x16
274 NvRegMacAddrA = 0xA8, 280 NvRegMacAddrA = 0xA8,
@@ -318,8 +324,10 @@ enum {
318 NvRegTxRingPhysAddrHigh = 0x148, 324 NvRegTxRingPhysAddrHigh = 0x148,
319 NvRegRxRingPhysAddrHigh = 0x14C, 325 NvRegRxRingPhysAddrHigh = 0x14C,
320 NvRegTxPauseFrame = 0x170, 326 NvRegTxPauseFrame = 0x170,
321#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080 327#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
322#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010 328#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
329#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
330#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
323 NvRegMIIStatus = 0x180, 331 NvRegMIIStatus = 0x180,
324#define NVREG_MIISTAT_ERROR 0x0001 332#define NVREG_MIISTAT_ERROR 0x0001
325#define NVREG_MIISTAT_LINKCHANGE 0x0008 333#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -2751,7 +2759,12 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2751 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2759 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2752 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2760 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2753 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2761 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2754 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2762 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
2763 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
2764 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
2765 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
2766 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
2767 writel(pause_enable, base + NvRegTxPauseFrame);
2755 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2768 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2756 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2769 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2757 } else { 2770 } else {
@@ -2785,6 +2798,7 @@ static int nv_update_linkspeed(struct net_device *dev)
2785 int retval = 0; 2798 int retval = 0;
2786 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2799 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2787 u32 txrxFlags = 0; 2800 u32 txrxFlags = 0;
2801 u32 phy_exp;
2788 2802
2789 /* BMSR_LSTATUS is latched, read it twice: 2803 /* BMSR_LSTATUS is latched, read it twice:
2790 * we want the current value. 2804 * we want the current value.
@@ -2912,13 +2926,25 @@ set_speed:
2912 phyreg |= PHY_1000; 2926 phyreg |= PHY_1000;
2913 writel(phyreg, base + NvRegPhyInterface); 2927 writel(phyreg, base + NvRegPhyInterface);
2914 2928
2929 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
2915 if (phyreg & PHY_RGMII) { 2930 if (phyreg & PHY_RGMII) {
2916 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2931 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
2917 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2932 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2918 else 2933 } else {
2919 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2934 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
2935 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
2936 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
2937 else
2938 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
2939 } else {
2940 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2941 }
2942 }
2920 } else { 2943 } else {
2921 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2944 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
2945 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
2946 else
2947 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2922 } 2948 }
2923 writel(txreg, base + NvRegTxDeferral); 2949 writel(txreg, base + NvRegTxDeferral);
2924 2950
@@ -5155,7 +5181,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5155 } 5181 }
5156 5182
5157 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5183 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5158 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 5184 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5185 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5186 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5159 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5187 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5160 } 5188 }
5161 5189
@@ -5559,107 +5587,107 @@ static struct pci_device_id pci_tbl[] = {
5559 }, 5587 },
5560 { /* MCP55 Ethernet Controller */ 5588 { /* MCP55 Ethernet Controller */
5561 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5562 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5590 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5563 }, 5591 },
5564 { /* MCP55 Ethernet Controller */ 5592 { /* MCP55 Ethernet Controller */
5565 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5566 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5594 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5567 }, 5595 },
5568 { /* MCP61 Ethernet Controller */ 5596 { /* MCP61 Ethernet Controller */
5569 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5570 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5598 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5571 }, 5599 },
5572 { /* MCP61 Ethernet Controller */ 5600 { /* MCP61 Ethernet Controller */
5573 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5574 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5602 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5575 }, 5603 },
5576 { /* MCP61 Ethernet Controller */ 5604 { /* MCP61 Ethernet Controller */
5577 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5578 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5606 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5579 }, 5607 },
5580 { /* MCP61 Ethernet Controller */ 5608 { /* MCP61 Ethernet Controller */
5581 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5582 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5610 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5583 }, 5611 },
5584 { /* MCP65 Ethernet Controller */ 5612 { /* MCP65 Ethernet Controller */
5585 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5586 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5614 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5587 }, 5615 },
5588 { /* MCP65 Ethernet Controller */ 5616 { /* MCP65 Ethernet Controller */
5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5590 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5618 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5591 }, 5619 },
5592 { /* MCP65 Ethernet Controller */ 5620 { /* MCP65 Ethernet Controller */
5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5594 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5595 }, 5623 },
5596 { /* MCP65 Ethernet Controller */ 5624 { /* MCP65 Ethernet Controller */
5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5598 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5599 }, 5627 },
5600 { /* MCP67 Ethernet Controller */ 5628 { /* MCP67 Ethernet Controller */
5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5602 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5603 }, 5631 },
5604 { /* MCP67 Ethernet Controller */ 5632 { /* MCP67 Ethernet Controller */
5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5606 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5607 }, 5635 },
5608 { /* MCP67 Ethernet Controller */ 5636 { /* MCP67 Ethernet Controller */
5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5610 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5638 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5611 }, 5639 },
5612 { /* MCP67 Ethernet Controller */ 5640 { /* MCP67 Ethernet Controller */
5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5614 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5642 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5615 }, 5643 },
5616 { /* MCP73 Ethernet Controller */ 5644 { /* MCP73 Ethernet Controller */
5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5618 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5646 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5619 }, 5647 },
5620 { /* MCP73 Ethernet Controller */ 5648 { /* MCP73 Ethernet Controller */
5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5650 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5623 }, 5651 },
5624 { /* MCP73 Ethernet Controller */ 5652 { /* MCP73 Ethernet Controller */
5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5654 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5627 }, 5655 },
5628 { /* MCP73 Ethernet Controller */ 5656 { /* MCP73 Ethernet Controller */
5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5658 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5631 }, 5659 },
5632 { /* MCP77 Ethernet Controller */ 5660 { /* MCP77 Ethernet Controller */
5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
5634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5662 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5635 }, 5663 },
5636 { /* MCP77 Ethernet Controller */ 5664 { /* MCP77 Ethernet Controller */
5637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5665 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
5638 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5666 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5639 }, 5667 },
5640 { /* MCP77 Ethernet Controller */ 5668 { /* MCP77 Ethernet Controller */
5641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5669 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
5642 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5670 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5643 }, 5671 },
5644 { /* MCP77 Ethernet Controller */ 5672 { /* MCP77 Ethernet Controller */
5645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5673 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
5646 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5674 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5647 }, 5675 },
5648 { /* MCP79 Ethernet Controller */ 5676 { /* MCP79 Ethernet Controller */
5649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5677 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
5650 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5678 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5651 }, 5679 },
5652 { /* MCP79 Ethernet Controller */ 5680 { /* MCP79 Ethernet Controller */
5653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5681 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
5654 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5682 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5655 }, 5683 },
5656 { /* MCP79 Ethernet Controller */ 5684 { /* MCP79 Ethernet Controller */
5657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5685 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
5658 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5686 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5659 }, 5687 },
5660 { /* MCP79 Ethernet Controller */ 5688 { /* MCP79 Ethernet Controller */
5661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5689 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
5662 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5690 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5663 }, 5691 },
5664 {0,}, 5692 {0,},
5665}; 5693};
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 679dfdb6807f..79b317b88c86 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -578,13 +578,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
578 goto err_free; 578 goto err_free;
579 } 579 }
580 580
581 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
582 key_to_hw_index(fmr->mr.key), NULL);
583 if (!fmr->mpt) {
584 err = -ENOMEM;
585 goto err_free;
586 }
587
588 return 0; 581 return 0;
589 582
590err_free: 583err_free:
@@ -595,7 +588,19 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
595 588
596int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 589int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
597{ 590{
598 return mlx4_mr_enable(dev, &fmr->mr); 591 struct mlx4_priv *priv = mlx4_priv(dev);
592 int err;
593
594 err = mlx4_mr_enable(dev, &fmr->mr);
595 if (err)
596 return err;
597
598 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
599 key_to_hw_index(fmr->mr.key), NULL);
600 if (!fmr->mpt)
601 return -ENOMEM;
602
603 return 0;
599} 604}
600EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 605EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
601 606
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 31e047dd7bb3..501e451be911 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -309,8 +309,8 @@ static ssize_t show_local_mac(struct netconsole_target *nt, char *buf)
309 struct net_device *dev = nt->np.dev; 309 struct net_device *dev = nt->np.dev;
310 310
311 DECLARE_MAC_BUF(mac); 311 DECLARE_MAC_BUF(mac);
312 return snprintf(buf, PAGE_SIZE, "%s\n", 312 return snprintf(buf, PAGE_SIZE, "%s\n", dev ?
313 print_mac(mac, dev->dev_addr)); 313 print_mac(mac, dev->dev_addr) : "ff:ff:ff:ff:ff:ff");
314} 314}
315 315
316static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf) 316static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf)
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 6b3384a24f07..26aa8fe1fb2d 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -33,20 +33,20 @@
33 * I have also done a look in the following sources: (mail me if you need them) 33 * I have also done a look in the following sources: (mail me if you need them)
34 * crynwr-packet-driver by Russ Nelson 34 * crynwr-packet-driver by Russ Nelson
35 * Garret A. Wollman's (fourth) i82586-driver for BSD 35 * Garret A. Wollman's (fourth) i82586-driver for BSD
36 * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped 36 * (before getting an i82596 (yes 596 not 586) manual, the existing drivers
37 * me a lot to understand this tricky chip.) 37 * helped me a lot to understand this tricky chip.)
38 * 38 *
39 * Known Problems: 39 * Known Problems:
40 * The internal sysbus seems to be slow. So we often lose packets because of 40 * The internal sysbus seems to be slow. So we often lose packets because of
41 * overruns while receiving from a fast remote host. 41 * overruns while receiving from a fast remote host.
42 * This can slow down TCP connections. Maybe the newer ni5210 cards are better. 42 * This can slow down TCP connections. Maybe the newer ni5210 cards are
43 * my experience is, that if a machine sends with more than about 500-600K/s 43 * better. My experience is, that if a machine sends with more than about
44 * the fifo/sysbus overflows. 44 * 500-600K/s the fifo/sysbus overflows.
45 * 45 *
46 * IMPORTANT NOTE: 46 * IMPORTANT NOTE:
47 * On fast networks, it's a (very) good idea to have 16K shared memory. With 47 * On fast networks, it's a (very) good idea to have 16K shared memory. With
48 * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote 48 * 8K, we can store only 4 receive frames, so it can (easily) happen that a
49 * machine 'overruns' our system. 49 * remote machine 'overruns' our system.
50 * 50 *
51 * Known i82586/card problems (I'm sure, there are many more!): 51 * Known i82586/card problems (I'm sure, there are many more!):
52 * Running the NOP-mode, the i82586 sometimes seems to forget to report 52 * Running the NOP-mode, the i82586 sometimes seems to forget to report
@@ -60,7 +60,8 @@
60 * 60 *
61 * results from ftp performance tests with Linux 1.2.5 61 * results from ftp performance tests with Linux 1.2.5
62 * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s) 62 * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
63 * sending in NOP-mode: peak performance up to 530K/s (but better don't run this mode) 63 * sending in NOP-mode: peak performance up to 530K/s (but better don't
64 * run this mode)
64 */ 65 */
65 66
66/* 67/*
@@ -94,7 +95,8 @@
94 * 95 *
95 * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH) 96 * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
96 * 97 *
97 * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH) 98 * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff,
99 * too (MH)
98 * 100 *
99 * < 30.Sep.93: first versions 101 * < 30.Sep.93: first versions
100 */ 102 */
@@ -102,7 +104,7 @@
102static int debuglevel; /* debug-printk 0: off 1: a few 2: more */ 104static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
103static int automatic_resume; /* experimental .. better should be zero */ 105static int automatic_resume; /* experimental .. better should be zero */
104static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ 106static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
105static int fifo=0x8; /* don't change */ 107static int fifo = 0x8; /* don't change */
106 108
107#include <linux/module.h> 109#include <linux/module.h>
108#include <linux/kernel.h> 110#include <linux/kernel.h>
@@ -127,14 +129,15 @@ static int fifo=0x8; /* don't change */
127#define DEBUG /* debug on */ 129#define DEBUG /* debug on */
128#define SYSBUSVAL 1 /* 8 Bit */ 130#define SYSBUSVAL 1 /* 8 Bit */
129 131
130#define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);} 132#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
131#define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);} 133#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
132#define ni_disint() {outb(0,dev->base_addr+NI52_INTDIS);} 134#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
133#define ni_enaint() {outb(0,dev->base_addr+NI52_INTENA);} 135#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
134 136
135#define make32(ptr16) (p->memtop + (short) (ptr16) ) 137#define make32(ptr16) (p->memtop + (short) (ptr16))
136#define make24(ptr32) ( ((char *) (ptr32)) - p->base) 138#define make24(ptr32) ((unsigned long)(ptr32)) - p->base
137#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )) 139#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32)\
140 - (unsigned long) p->memtop))
138 141
139/******************* how to calculate the buffers ***************************** 142/******************* how to calculate the buffers *****************************
140 143
@@ -159,96 +162,112 @@ sizeof(nop_cmd) = 8;
159 162
160/**************************************************************************/ 163/**************************************************************************/
161 164
162/* different DELAYs */
163#define DELAY(x) mdelay(32 * x);
164#define DELAY_16(); { udelay(16); }
165#define DELAY_18(); { udelay(4); }
166
167/* wait for command with timeout: */
168#define WAIT_4_SCB_CMD() \
169{ int i; \
170 for(i=0;i<16384;i++) { \
171 if(!p->scb->cmd_cuc) break; \
172 DELAY_18(); \
173 if(i == 16383) { \
174 printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
175 if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
176
177#define WAIT_4_SCB_CMD_RUC() { int i; \
178 for(i=0;i<16384;i++) { \
179 if(!p->scb->cmd_ruc) break; \
180 DELAY_18(); \
181 if(i == 16383) { \
182 printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
183 if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
184
185#define WAIT_4_STAT_COMPL(addr) { int i; \
186 for(i=0;i<32767;i++) { \
187 if((addr)->cmd_status & STAT_COMPL) break; \
188 DELAY_16(); DELAY_16(); } }
189 165
190#define NI52_TOTAL_SIZE 16 166#define NI52_TOTAL_SIZE 16
191#define NI52_ADDR0 0x02 167#define NI52_ADDR0 0x02
192#define NI52_ADDR1 0x07 168#define NI52_ADDR1 0x07
193#define NI52_ADDR2 0x01 169#define NI52_ADDR2 0x01
194 170
195static int ni52_probe1(struct net_device *dev,int ioaddr); 171static int ni52_probe1(struct net_device *dev, int ioaddr);
196static irqreturn_t ni52_interrupt(int irq,void *dev_id); 172static irqreturn_t ni52_interrupt(int irq, void *dev_id);
197static int ni52_open(struct net_device *dev); 173static int ni52_open(struct net_device *dev);
198static int ni52_close(struct net_device *dev); 174static int ni52_close(struct net_device *dev);
199static int ni52_send_packet(struct sk_buff *,struct net_device *); 175static int ni52_send_packet(struct sk_buff *, struct net_device *);
200static struct net_device_stats *ni52_get_stats(struct net_device *dev); 176static struct net_device_stats *ni52_get_stats(struct net_device *dev);
201static void set_multicast_list(struct net_device *dev); 177static void set_multicast_list(struct net_device *dev);
202static void ni52_timeout(struct net_device *dev); 178static void ni52_timeout(struct net_device *dev);
203#if 0
204static void ni52_dump(struct net_device *,void *);
205#endif
206 179
207/* helper-functions */ 180/* helper-functions */
208static int init586(struct net_device *dev); 181static int init586(struct net_device *dev);
209static int check586(struct net_device *dev,char *where,unsigned size); 182static int check586(struct net_device *dev, char *where, unsigned size);
210static void alloc586(struct net_device *dev); 183static void alloc586(struct net_device *dev);
211static void startrecv586(struct net_device *dev); 184static void startrecv586(struct net_device *dev);
212static void *alloc_rfa(struct net_device *dev,void *ptr); 185static void *alloc_rfa(struct net_device *dev, void *ptr);
213static void ni52_rcv_int(struct net_device *dev); 186static void ni52_rcv_int(struct net_device *dev);
214static void ni52_xmt_int(struct net_device *dev); 187static void ni52_xmt_int(struct net_device *dev);
215static void ni52_rnr_int(struct net_device *dev); 188static void ni52_rnr_int(struct net_device *dev);
216 189
217struct priv 190struct priv {
218{
219 struct net_device_stats stats; 191 struct net_device_stats stats;
220 unsigned long base; 192 unsigned long base;
221 char *memtop; 193 char *memtop;
222 long int lock; 194 spinlock_t spinlock;
223 int reseted; 195 int reset;
224 volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; 196 struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
225 volatile struct scp_struct *scp; /* volatile is important */ 197 struct scp_struct *scp;
226 volatile struct iscp_struct *iscp; /* volatile is important */ 198 struct iscp_struct *iscp;
227 volatile struct scb_struct *scb; /* volatile is important */ 199 struct scb_struct *scb;
228 volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; 200 struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
229#if (NUM_XMIT_BUFFS == 1) 201#if (NUM_XMIT_BUFFS == 1)
230 volatile struct transmit_cmd_struct *xmit_cmds[2]; 202 struct transmit_cmd_struct *xmit_cmds[2];
231 volatile struct nop_cmd_struct *nop_cmds[2]; 203 struct nop_cmd_struct *nop_cmds[2];
232#else 204#else
233 volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; 205 struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
234 volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; 206 struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
235#endif 207#endif
236 volatile int nop_point,num_recv_buffs; 208 int nop_point, num_recv_buffs;
237 volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; 209 char *xmit_cbuffs[NUM_XMIT_BUFFS];
238 volatile int xmit_count,xmit_last; 210 int xmit_count, xmit_last;
239}; 211};
240 212
213/* wait for command with timeout: */
214static void wait_for_scb_cmd(struct net_device *dev)
215{
216 struct priv *p = dev->priv;
217 int i;
218 for (i = 0; i < 16384; i++) {
219 if (readb(&p->scb->cmd_cuc) == 0)
220 break;
221 udelay(4);
222 if (i == 16383) {
223 printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
224 dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
225 if (!p->reset) {
226 p->reset = 1;
227 ni_reset586();
228 }
229 }
230 }
231}
232
233static void wait_for_scb_cmd_ruc(struct net_device *dev)
234{
235 struct priv *p = dev->priv;
236 int i;
237 for (i = 0; i < 16384; i++) {
238 if (readb(&p->scb->cmd_ruc) == 0)
239 break;
240 udelay(4);
241 if (i == 16383) {
242 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
243 dev->name, p->scb->cmd_ruc, p->scb->rus);
244 if (!p->reset) {
245 p->reset = 1;
246 ni_reset586();
247 }
248 }
249 }
250}
251
252static void wait_for_stat_compl(void *p)
253{
254 struct nop_cmd_struct *addr = p;
255 int i;
256 for (i = 0; i < 32767; i++) {
257 if (readw(&((addr)->cmd_status)) & STAT_COMPL)
258 break;
259 udelay(32);
260 }
261}
262
241/********************************************** 263/**********************************************
242 * close device 264 * close device
243 */ 265 */
244static int ni52_close(struct net_device *dev) 266static int ni52_close(struct net_device *dev)
245{ 267{
246 free_irq(dev->irq, dev); 268 free_irq(dev->irq, dev);
247
248 ni_reset586(); /* the hard way to stop the receiver */ 269 ni_reset586(); /* the hard way to stop the receiver */
249
250 netif_stop_queue(dev); 270 netif_stop_queue(dev);
251
252 return 0; 271 return 0;
253} 272}
254 273
@@ -265,55 +284,53 @@ static int ni52_open(struct net_device *dev)
265 startrecv586(dev); 284 startrecv586(dev);
266 ni_enaint(); 285 ni_enaint();
267 286
268 ret = request_irq(dev->irq, &ni52_interrupt,0,dev->name,dev); 287 ret = request_irq(dev->irq, &ni52_interrupt, 0, dev->name, dev);
269 if (ret) 288 if (ret) {
270 {
271 ni_reset586(); 289 ni_reset586();
272 return ret; 290 return ret;
273 } 291 }
274
275 netif_start_queue(dev); 292 netif_start_queue(dev);
276
277 return 0; /* most done by init */ 293 return 0; /* most done by init */
278} 294}
279 295
280/********************************************** 296/**********************************************
281 * Check to see if there's an 82586 out there. 297 * Check to see if there's an 82586 out there.
282 */ 298 */
283static int check586(struct net_device *dev,char *where,unsigned size) 299static int check586(struct net_device *dev, char *where, unsigned size)
284{ 300{
285 struct priv pb; 301 struct priv pb;
286 struct priv *p = /* (struct priv *) dev->priv*/ &pb; 302 struct priv *p = /* (struct priv *) dev->priv*/ &pb;
287 char *iscp_addrs[2]; 303 char *iscp_addrs[2];
288 int i; 304 int i;
289 305
290 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000; 306 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where)
307 + size - 0x01000000;
291 p->memtop = isa_bus_to_virt((unsigned long)where) + size; 308 p->memtop = isa_bus_to_virt((unsigned long)where) + size;
292 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); 309 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
293 memset((char *)p->scp,0, sizeof(struct scp_struct)); 310 memset_io((char *)p->scp, 0, sizeof(struct scp_struct));
294 for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */ 311 for (i = 0; i < sizeof(struct scp_struct); i++)
295 if(((char *)p->scp)[i]) 312 /* memory was writeable? */
313 if (readb((char *)p->scp + i))
296 return 0; 314 return 0;
297 p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ 315 writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
298 if(p->scp->sysbus != SYSBUSVAL) 316 if (readb(&p->scp->sysbus) != SYSBUSVAL)
299 return 0; 317 return 0;
300 318
301 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where); 319 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
302 iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct); 320 iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
303 321
304 for(i=0;i<2;i++) 322 for (i = 0; i < 2; i++) {
305 {
306 p->iscp = (struct iscp_struct *) iscp_addrs[i]; 323 p->iscp = (struct iscp_struct *) iscp_addrs[i];
307 memset((char *)p->iscp,0, sizeof(struct iscp_struct)); 324 memset_io((char *)p->iscp, 0, sizeof(struct iscp_struct));
308 325
309 p->scp->iscp = make24(p->iscp); 326 writel(make24(p->iscp), &p->scp->iscp);
310 p->iscp->busy = 1; 327 writeb(1, &p->iscp->busy);
311 328
312 ni_reset586(); 329 ni_reset586();
313 ni_attn586(); 330 ni_attn586();
314 DELAY(1); /* wait a while... */ 331 mdelay(32); /* wait a while... */
315 332 /* i82586 clears 'busy' after successful init */
316 if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ 333 if (readb(&p->iscp->busy))
317 return 0; 334 return 0;
318 } 335 }
319 return 1; 336 return 1;
@@ -327,36 +344,39 @@ static void alloc586(struct net_device *dev)
327 struct priv *p = (struct priv *) dev->priv; 344 struct priv *p = (struct priv *) dev->priv;
328 345
329 ni_reset586(); 346 ni_reset586();
330 DELAY(1); 347 mdelay(32);
348
349 spin_lock_init(&p->spinlock);
331 350
332 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); 351 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
333 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start); 352 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
334 p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct)); 353 p->iscp = (struct iscp_struct *)
354 ((char *)p->scp - sizeof(struct iscp_struct));
335 355
336 memset((char *) p->iscp,0,sizeof(struct iscp_struct)); 356 memset_io(p->iscp, 0, sizeof(struct iscp_struct));
337 memset((char *) p->scp ,0,sizeof(struct scp_struct)); 357 memset_io(p->scp , 0, sizeof(struct scp_struct));
338 358
339 p->scp->iscp = make24(p->iscp); 359 writel(make24(p->iscp), &p->scp->iscp);
340 p->scp->sysbus = SYSBUSVAL; 360 writeb(SYSBUSVAL, &p->scp->sysbus);
341 p->iscp->scb_offset = make16(p->scb); 361 writew(make16(p->scb), &p->iscp->scb_offset);
342 362
343 p->iscp->busy = 1; 363 writeb(1, &p->iscp->busy);
344 ni_reset586(); 364 ni_reset586();
345 ni_attn586(); 365 ni_attn586();
346 366
347 DELAY(1); 367 mdelay(32);
348 368
349 if(p->iscp->busy) 369 if (readb(&p->iscp->busy))
350 printk("%s: Init-Problems (alloc).\n",dev->name); 370 printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
351 371
352 p->reseted = 0; 372 p->reset = 0;
353 373
354 memset((char *)p->scb,0,sizeof(struct scb_struct)); 374 memset_io((char *)p->scb, 0, sizeof(struct scb_struct));
355} 375}
356 376
357/* set: io,irq,memstart,memend or set it when calling insmod */ 377/* set: io,irq,memstart,memend or set it when calling insmod */
358static int irq=9; 378static int irq = 9;
359static int io=0x300; 379static int io = 0x300;
360static long memstart; /* e.g 0xd0000 */ 380static long memstart; /* e.g 0xd0000 */
361static long memend; /* e.g 0xd4000 */ 381static long memend; /* e.g 0xd4000 */
362 382
@@ -413,7 +433,7 @@ out:
413 return ERR_PTR(err); 433 return ERR_PTR(err);
414} 434}
415 435
416static int __init ni52_probe1(struct net_device *dev,int ioaddr) 436static int __init ni52_probe1(struct net_device *dev, int ioaddr)
417{ 437{
418 int i, size, retval; 438 int i, size, retval;
419 439
@@ -425,90 +445,96 @@ static int __init ni52_probe1(struct net_device *dev,int ioaddr)
425 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) 445 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
426 return -EBUSY; 446 return -EBUSY;
427 447
428 if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || 448 if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
429 !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) { 449 !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
430 retval = -ENODEV; 450 retval = -ENODEV;
431 goto out; 451 goto out;
432 } 452 }
433 453
434 for(i=0;i<ETH_ALEN;i++) 454 for (i = 0; i < ETH_ALEN; i++)
435 dev->dev_addr[i] = inb(dev->base_addr+i); 455 dev->dev_addr[i] = inb(dev->base_addr+i);
436 456
437 if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 457 if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
438 || dev->dev_addr[2] != NI52_ADDR2) { 458 || dev->dev_addr[2] != NI52_ADDR2) {
439 retval = -ENODEV; 459 retval = -ENODEV;
440 goto out; 460 goto out;
441 } 461 }
442 462
443 printk(KERN_INFO "%s: NI5210 found at %#3lx, ",dev->name,dev->base_addr); 463 printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
464 dev->name, dev->base_addr);
444 465
445 /* 466 /*
446 * check (or search) IO-Memory, 8K and 16K 467 * check (or search) IO-Memory, 8K and 16K
447 */ 468 */
448#ifdef MODULE 469#ifdef MODULE
449 size = dev->mem_end - dev->mem_start; 470 size = dev->mem_end - dev->mem_start;
450 if(size != 0x2000 && size != 0x4000) { 471 if (size != 0x2000 && size != 0x4000) {
451 printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n",dev->name,size); 472 printk("\n");
473 printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
452 retval = -ENODEV; 474 retval = -ENODEV;
453 goto out; 475 goto out;
454 } 476 }
455 if(!check586(dev,(char *) dev->mem_start,size)) { 477 if (!check586(dev, (char *)dev->mem_start, size)) {
456 printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); 478 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
457 retval = -ENODEV; 479 retval = -ENODEV;
458 goto out; 480 goto out;
459 } 481 }
460#else 482#else
461 if(dev->mem_start != 0) /* no auto-mem-probe */ 483 if (dev->mem_start != 0) {
462 { 484 /* no auto-mem-probe */
463 size = 0x4000; /* check for 16K mem */ 485 size = 0x4000; /* check for 16K mem */
464 if(!check586(dev,(char *) dev->mem_start,size)) { 486 if (!check586(dev, (char *) dev->mem_start, size)) {
465 size = 0x2000; /* check for 8K mem */ 487 size = 0x2000; /* check for 8K mem */
466 if(!check586(dev,(char *) dev->mem_start,size)) { 488 if (!check586(dev, (char *)dev->mem_start, size)) {
467 printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start); 489 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
468 retval = -ENODEV; 490 retval = -ENODEV;
469 goto out; 491 goto out;
470 } 492 }
471 } 493 }
472 } 494 } else {
473 else 495 static const unsigned long memaddrs[] = {
474 { 496 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
475 static long memaddrs[] = { 0xc8000,0xca000,0xcc000,0xce000,0xd0000,0xd2000, 497 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
476 0xd4000,0xd6000,0xd8000,0xda000,0xdc000, 0 }; 498 };
477 for(i=0;;i++) 499 for (i = 0;; i++) {
478 { 500 if (!memaddrs[i]) {
479 if(!memaddrs[i]) { 501 printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
480 printk("?memprobe, Can't find io-memory!\n");
481 retval = -ENODEV; 502 retval = -ENODEV;
482 goto out; 503 goto out;
483 } 504 }
484 dev->mem_start = memaddrs[i]; 505 dev->mem_start = memaddrs[i];
485 size = 0x2000; /* check for 8K mem */ 506 size = 0x2000; /* check for 8K mem */
486 if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */ 507 if (check586(dev, (char *)dev->mem_start, size))
508 /* 8K-check */
487 break; 509 break;
488 size = 0x4000; /* check for 16K mem */ 510 size = 0x4000; /* check for 16K mem */
489 if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */ 511 if (check586(dev, (char *)dev->mem_start, size))
512 /* 16K-check */
490 break; 513 break;
491 } 514 }
492 } 515 }
493 dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */ 516 /* set mem_end showed by 'ifconfig' */
517 dev->mem_end = dev->mem_start + size;
494#endif 518#endif
495 519
496 memset((char *) dev->priv,0,sizeof(struct priv)); 520 memset((char *)dev->priv, 0, sizeof(struct priv));
497 521
498 ((struct priv *) (dev->priv))->memtop = isa_bus_to_virt(dev->mem_start) + size; 522 ((struct priv *)(dev->priv))->memtop =
499 ((struct priv *) (dev->priv))->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000; 523 isa_bus_to_virt(dev->mem_start) + size;
524 ((struct priv *)(dev->priv))->base = (unsigned long)
525 isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
500 alloc586(dev); 526 alloc586(dev);
501 527
502 /* set number of receive-buffs according to memsize */ 528 /* set number of receive-buffs according to memsize */
503 if(size == 0x2000) 529 if (size == 0x2000)
504 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8; 530 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
505 else 531 else
506 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16; 532 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
507 533
508 printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size); 534 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
535 dev->mem_start, size);
509 536
510 if(dev->irq < 2) 537 if (dev->irq < 2) {
511 {
512 unsigned long irq_mask; 538 unsigned long irq_mask;
513 539
514 irq_mask = probe_irq_on(); 540 irq_mask = probe_irq_on();
@@ -517,18 +543,16 @@ static int __init ni52_probe1(struct net_device *dev,int ioaddr)
517 543
518 mdelay(20); 544 mdelay(20);
519 dev->irq = probe_irq_off(irq_mask); 545 dev->irq = probe_irq_off(irq_mask);
520 if(!dev->irq) 546 if (!dev->irq) {
521 {
522 printk("?autoirq, Failed to detect IRQ line!\n"); 547 printk("?autoirq, Failed to detect IRQ line!\n");
523 retval = -EAGAIN; 548 retval = -EAGAIN;
524 goto out; 549 goto out;
525 } 550 }
526 printk("IRQ %d (autodetected).\n",dev->irq); 551 printk("IRQ %d (autodetected).\n", dev->irq);
527 } 552 } else {
528 else { 553 if (dev->irq == 2)
529 if(dev->irq == 2)
530 dev->irq = 9; 554 dev->irq = 9;
531 printk("IRQ %d (assigned and not checked!).\n",dev->irq); 555 printk("IRQ %d (assigned and not checked!).\n", dev->irq);
532 } 556 }
533 557
534 dev->open = ni52_open; 558 dev->open = ni52_open;
@@ -555,56 +579,58 @@ out:
555static int init586(struct net_device *dev) 579static int init586(struct net_device *dev)
556{ 580{
557 void *ptr; 581 void *ptr;
558 int i,result=0; 582 int i, result = 0;
559 struct priv *p = (struct priv *) dev->priv; 583 struct priv *p = (struct priv *)dev->priv;
560 volatile struct configure_cmd_struct *cfg_cmd; 584 struct configure_cmd_struct *cfg_cmd;
561 volatile struct iasetup_cmd_struct *ias_cmd; 585 struct iasetup_cmd_struct *ias_cmd;
562 volatile struct tdr_cmd_struct *tdr_cmd; 586 struct tdr_cmd_struct *tdr_cmd;
563 volatile struct mcsetup_cmd_struct *mc_cmd; 587 struct mcsetup_cmd_struct *mc_cmd;
564 struct dev_mc_list *dmi=dev->mc_list; 588 struct dev_mc_list *dmi = dev->mc_list;
565 int num_addrs=dev->mc_count; 589 int num_addrs = dev->mc_count;
566 590
567 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 591 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
568 592
569 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ 593 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
570 cfg_cmd->cmd_status = 0; 594 writew(0, &cfg_cmd->cmd_status);
571 cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; 595 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
572 cfg_cmd->cmd_link = 0xffff; 596 writew(0xFFFF, &cfg_cmd->cmd_link);
573 597
574 cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ 598 /* number of cfg bytes */
575 cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ 599 writeb(0x0a, &cfg_cmd->byte_cnt);
576 cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ 600 /* fifo-limit (8=tx:32/rx:64) */
577 cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ 601 writeb(fifo, &cfg_cmd->fifo);
578 cfg_cmd->priority = 0x00; 602 /* hold or discard bad recv frames (bit 7) */
579 cfg_cmd->ifs = 0x60; 603 writeb(0x40, &cfg_cmd->sav_bf);
580 cfg_cmd->time_low = 0x00; 604 /* addr_len |!src_insert |pre-len |loopback */
581 cfg_cmd->time_high = 0xf2; 605 writeb(0x2e, &cfg_cmd->adr_len);
582 cfg_cmd->promisc = 0; 606 writeb(0x00, &cfg_cmd->priority);
583 if(dev->flags & IFF_ALLMULTI) { 607 writeb(0x60, &cfg_cmd->ifs);;
608 writeb(0x00, &cfg_cmd->time_low);
609 writeb(0xf2, &cfg_cmd->time_high);
610 writeb(0x00, &cfg_cmd->promisc);;
611 if (dev->flags & IFF_ALLMULTI) {
584 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 612 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
585 if(num_addrs > len) { 613 if (num_addrs > len) {
586 printk("%s: switching to promisc. mode\n",dev->name); 614 printk(KERN_ERR "%s: switching to promisc. mode\n",
587 dev->flags|=IFF_PROMISC; 615 dev->name);
616 dev->flags |= IFF_PROMISC;
588 } 617 }
589 } 618 }
590 if(dev->flags&IFF_PROMISC) 619 if (dev->flags & IFF_PROMISC)
591 { 620 writeb(0x01, &cfg_cmd->promisc);
592 cfg_cmd->promisc=1; 621 writeb(0x00, &cfg_cmd->carr_coll);
593 dev->flags|=IFF_PROMISC; 622 writew(make16(cfg_cmd), &p->scb->cbl_offset);
594 } 623 writew(0, &p->scb->cmd_ruc);
595 cfg_cmd->carr_coll = 0x00;
596 624
597 p->scb->cbl_offset = make16(cfg_cmd); 625 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
598 p->scb->cmd_ruc = 0;
599
600 p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
601 ni_attn586(); 626 ni_attn586();
602 627
603 WAIT_4_STAT_COMPL(cfg_cmd); 628 wait_for_stat_compl(cfg_cmd);
604 629
605 if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) 630 if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
606 { 631 (STAT_COMPL|STAT_OK)) {
607 printk("%s: configure command failed: %x\n",dev->name,cfg_cmd->cmd_status); 632 printk(KERN_ERR "%s: configure command failed: %x\n",
633 dev->name, readw(&cfg_cmd->cmd_status));
608 return 1; 634 return 1;
609 } 635 }
610 636
@@ -614,21 +640,22 @@ static int init586(struct net_device *dev)
614 640
615 ias_cmd = (struct iasetup_cmd_struct *)ptr; 641 ias_cmd = (struct iasetup_cmd_struct *)ptr;
616 642
617 ias_cmd->cmd_status = 0; 643 writew(0, &ias_cmd->cmd_status);
618 ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; 644 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
619 ias_cmd->cmd_link = 0xffff; 645 writew(0xffff, &ias_cmd->cmd_link);
620 646
621 memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); 647 memcpy_toio((char *)&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
622 648
623 p->scb->cbl_offset = make16(ias_cmd); 649 writew(make16(ias_cmd), &p->scb->cbl_offset);
624 650
625 p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 651 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
626 ni_attn586(); 652 ni_attn586();
627 653
628 WAIT_4_STAT_COMPL(ias_cmd); 654 wait_for_stat_compl(ias_cmd);
629 655
630 if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { 656 if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
631 printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status); 657 (STAT_OK|STAT_COMPL)) {
658 printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
632 return 1; 659 return 1;
633 } 660 }
634 661
@@ -638,117 +665,119 @@ static int init586(struct net_device *dev)
638 665
639 tdr_cmd = (struct tdr_cmd_struct *)ptr; 666 tdr_cmd = (struct tdr_cmd_struct *)ptr;
640 667
641 tdr_cmd->cmd_status = 0; 668 writew(0, &tdr_cmd->cmd_status);
642 tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; 669 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
643 tdr_cmd->cmd_link = 0xffff; 670 writew(0xffff, &tdr_cmd->cmd_link);
644 tdr_cmd->status = 0; 671 writew(0, &tdr_cmd->status);
645 672
646 p->scb->cbl_offset = make16(tdr_cmd); 673 writew(make16(tdr_cmd), &p->scb->cbl_offset);
647 p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 674 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
648 ni_attn586(); 675 ni_attn586();
649 676
650 WAIT_4_STAT_COMPL(tdr_cmd); 677 wait_for_stat_compl(tdr_cmd);
651
652 if(!(tdr_cmd->cmd_status & STAT_COMPL))
653 {
654 printk("%s: Problems while running the TDR.\n",dev->name);
655 }
656 else
657 {
658 DELAY_16(); /* wait for result */
659 result = tdr_cmd->status;
660 678
661 p->scb->cmd_cuc = p->scb->cus & STAT_MASK; 679 if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
680 printk(KERN_ERR "%s: Problems while running the TDR.\n",
681 dev->name);
682 else {
683 udelay(16);
684 result = readw(&tdr_cmd->status);
685 writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
662 ni_attn586(); /* ack the interrupts */ 686 ni_attn586(); /* ack the interrupts */
663 687
664 if(result & TDR_LNK_OK) 688 if (result & TDR_LNK_OK)
665 ; 689 ;
666 else if(result & TDR_XCVR_PRB) 690 else if (result & TDR_XCVR_PRB)
667 printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); 691 printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
668 else if(result & TDR_ET_OPN) 692 dev->name);
669 printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); 693 else if (result & TDR_ET_OPN)
670 else if(result & TDR_ET_SRT) 694 printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
671 { 695 dev->name, result & TDR_TIMEMASK);
672 if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ 696 else if (result & TDR_ET_SRT) {
673 printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); 697 /* time == 0 -> strange :-) */
674 } 698 if (result & TDR_TIMEMASK)
675 else 699 printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
676 printk("%s: TDR: Unknown status %04x\n",dev->name,result); 700 dev->name, result & TDR_TIMEMASK);
701 } else
702 printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
703 dev->name, result);
677 } 704 }
678 705
679 /* 706 /*
680 * Multicast setup 707 * Multicast setup
681 */ 708 */
682 if(num_addrs && !(dev->flags & IFF_PROMISC) ) 709 if (num_addrs && !(dev->flags & IFF_PROMISC)) {
683 {
684 mc_cmd = (struct mcsetup_cmd_struct *) ptr; 710 mc_cmd = (struct mcsetup_cmd_struct *) ptr;
685 mc_cmd->cmd_status = 0; 711 writew(0, &mc_cmd->cmd_status);
686 mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; 712 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
687 mc_cmd->cmd_link = 0xffff; 713 writew(0xffff, &mc_cmd->cmd_link);
688 mc_cmd->mc_cnt = num_addrs * 6; 714 writew(num_addrs * 6, &mc_cmd->mc_cnt);
689 715
690 for(i=0;i<num_addrs;i++,dmi=dmi->next) 716 for (i = 0; i < num_addrs; i++, dmi = dmi->next)
691 memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6); 717 memcpy_toio((char *) mc_cmd->mc_list[i],
718 dmi->dmi_addr, 6);
692 719
693 p->scb->cbl_offset = make16(mc_cmd); 720 writew(make16(mc_cmd), &p->scb->cbl_offset);
694 p->scb->cmd_cuc = CUC_START; 721 writeb(CUC_START, &p->scb->cmd_cuc);
695 ni_attn586(); 722 ni_attn586();
696 723
697 WAIT_4_STAT_COMPL(mc_cmd); 724 wait_for_stat_compl(mc_cmd);
698 725
699 if( (mc_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) 726 if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
700 printk("%s: Can't apply multicast-address-list.\n",dev->name); 727 != (STAT_COMPL|STAT_OK))
728 printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
701 } 729 }
702 730
703 /* 731 /*
704 * alloc nop/xmit-cmds 732 * alloc nop/xmit-cmds
705 */ 733 */
706#if (NUM_XMIT_BUFFS == 1) 734#if (NUM_XMIT_BUFFS == 1)
707 for(i=0;i<2;i++) 735 for (i = 0; i < 2; i++) {
708 { 736 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
709 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 737 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
710 p->nop_cmds[i]->cmd_cmd = CMD_NOP; 738 writew(0, &p->nop_cmds[i]->cmd_status);
711 p->nop_cmds[i]->cmd_status = 0; 739 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
712 p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
713 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 740 ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
714 } 741 }
715#else 742#else
716 for(i=0;i<NUM_XMIT_BUFFS;i++) 743 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
717 { 744 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
718 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 745 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
719 p->nop_cmds[i]->cmd_cmd = CMD_NOP; 746 writew(0, &p->nop_cmds[i]->cmd_status);
720 p->nop_cmds[i]->cmd_status = 0; 747 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
721 p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
722 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 748 ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
723 } 749 }
724#endif 750#endif
725 751
726 ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ 752 ptr = alloc_rfa(dev, (void *)ptr); /* init receive-frame-area */
727 753
728 /* 754 /*
729 * alloc xmit-buffs / init xmit_cmds 755 * alloc xmit-buffs / init xmit_cmds
730 */ 756 */
731 for(i=0;i<NUM_XMIT_BUFFS;i++) 757 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
732 { 758 /* Transmit cmd/buff 0 */
733 p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ 759 p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr;
734 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); 760 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
735 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ 761 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
736 ptr = (char *) ptr + XMIT_BUFF_SIZE; 762 ptr = (char *) ptr + XMIT_BUFF_SIZE;
737 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 763 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
738 ptr = (char *) ptr + sizeof(struct tbd_struct); 764 ptr = (char *) ptr + sizeof(struct tbd_struct);
739 if((void *)ptr > (void *)p->iscp) 765 if ((void *)ptr > (void *)p->iscp) {
740 { 766 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
741 printk("%s: not enough shared-mem for your configuration!\n",dev->name); 767 dev->name);
742 return 1; 768 return 1;
743 } 769 }
744 memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); 770 memset_io((char *)(p->xmit_cmds[i]), 0,
745 memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); 771 sizeof(struct transmit_cmd_struct));
746 p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); 772 memset_io((char *)(p->xmit_buffs[i]), 0,
747 p->xmit_cmds[i]->cmd_status = STAT_COMPL; 773 sizeof(struct tbd_struct));
748 p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; 774 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
749 p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); 775 &p->xmit_cmds[i]->cmd_link);
750 p->xmit_buffs[i]->next = 0xffff; 776 writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
751 p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); 777 writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
778 writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
779 writew(0xffff, &p->xmit_buffs[i]->next);
780 writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
752 } 781 }
753 782
754 p->xmit_count = 0; 783 p->xmit_count = 0;
@@ -761,21 +790,21 @@ static int init586(struct net_device *dev)
761 * 'start transmitter' 790 * 'start transmitter'
762 */ 791 */
763#ifndef NO_NOPCOMMANDS 792#ifndef NO_NOPCOMMANDS
764 p->scb->cbl_offset = make16(p->nop_cmds[0]); 793 writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
765 p->scb->cmd_cuc = CUC_START; 794 writeb(CUC_START, &p->scb->cmd_cuc);
766 ni_attn586(); 795 ni_attn586();
767 WAIT_4_SCB_CMD(); 796 wait_for_scb_cmd(dev);
768#else 797#else
769 p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); 798 writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
770 p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_SUSPEND | CMD_INT; 799 writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
771#endif 800#endif
772 801
773 /* 802 /*
774 * ack. interrupts 803 * ack. interrupts
775 */ 804 */
776 p->scb->cmd_cuc = p->scb->cus & STAT_MASK; 805 writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
777 ni_attn586(); 806 ni_attn586();
778 DELAY_16(); 807 udelay(16);
779 808
780 ni_enaint(); 809 ni_enaint();
781 810
@@ -787,43 +816,45 @@ static int init586(struct net_device *dev)
787 * It sets up the Receive Frame Area (RFA). 816 * It sets up the Receive Frame Area (RFA).
788 */ 817 */
789 818
790static void *alloc_rfa(struct net_device *dev,void *ptr) 819static void *alloc_rfa(struct net_device *dev, void *ptr)
791{ 820{
792 volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; 821 struct rfd_struct *rfd = (struct rfd_struct *)ptr;
793 volatile struct rbd_struct *rbd; 822 struct rbd_struct *rbd;
794 int i; 823 int i;
795 struct priv *p = (struct priv *) dev->priv; 824 struct priv *p = (struct priv *) dev->priv;
796 825
797 memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); 826 memset_io((char *) rfd, 0,
827 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
798 p->rfd_first = rfd; 828 p->rfd_first = rfd;
799 829
800 for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { 830 for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
801 rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); 831 writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
802 rfd[i].rbd_offset = 0xffff; 832 &rfd[i].next);
833 writew(0xffff, &rfd[i].rbd_offset);
803 } 834 }
804 rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ 835 /* RU suspend */
836 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
805 837
806 ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); 838 ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd));
807 839
808 rbd = (struct rbd_struct *) ptr; 840 rbd = (struct rbd_struct *) ptr;
809 ptr = (void *) (rbd + p->num_recv_buffs); 841 ptr = (void *) (rbd + p->num_recv_buffs);
810 842
811 /* clr descriptors */ 843 /* clr descriptors */
812 memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); 844 memset_io((char *)rbd, 0,
845 sizeof(struct rbd_struct) * (p->num_recv_buffs));
813 846
814 for(i=0;i<p->num_recv_buffs;i++) 847 for (i = 0; i < p->num_recv_buffs; i++) {
815 { 848 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
816 rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); 849 writew(RECV_BUFF_SIZE, &rbd[i].size);
817 rbd[i].size = RECV_BUFF_SIZE; 850 writel(make24(ptr), &rbd[i].buffer);
818 rbd[i].buffer = make24(ptr);
819 ptr = (char *) ptr + RECV_BUFF_SIZE; 851 ptr = (char *) ptr + RECV_BUFF_SIZE;
820 } 852 }
821
822 p->rfd_top = p->rfd_first; 853 p->rfd_top = p->rfd_first;
823 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); 854 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
824 855
825 p->scb->rfa_offset = make16(p->rfd_first); 856 writew(make16(p->rfd_first), &p->scb->rfa_offset);
826 p->rfd_first->rbd_offset = make16(rbd); 857 writew(make16(rbd), &p->rfd_first->rbd_offset);
827 858
828 return ptr; 859 return ptr;
829} 860}
@@ -833,73 +864,71 @@ static void *alloc_rfa(struct net_device *dev,void *ptr)
833 * Interrupt Handler ... 864 * Interrupt Handler ...
834 */ 865 */
835 866
836static irqreturn_t ni52_interrupt(int irq,void *dev_id) 867static irqreturn_t ni52_interrupt(int irq, void *dev_id)
837{ 868{
838 struct net_device *dev = dev_id; 869 struct net_device *dev = dev_id;
839 unsigned short stat; 870 unsigned int stat;
840 int cnt=0; 871 int cnt = 0;
841 struct priv *p; 872 struct priv *p;
842 873
843 if (!dev) {
844 printk ("ni5210-interrupt: irq %d for unknown device.\n",irq);
845 return IRQ_NONE;
846 }
847 p = (struct priv *) dev->priv; 874 p = (struct priv *) dev->priv;
848 875
849 if(debuglevel > 1) 876 if (debuglevel > 1)
850 printk("I"); 877 printk("I");
851 878
852 WAIT_4_SCB_CMD(); /* wait for last command */ 879 spin_lock(&p->spinlock);
853 880
854 while((stat=p->scb->cus & STAT_MASK)) 881 wait_for_scb_cmd(dev); /* wait for last command */
855 { 882
856 p->scb->cmd_cuc = stat; 883 while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
884 writeb(stat, &p->scb->cmd_cuc);
857 ni_attn586(); 885 ni_attn586();
858 886
859 if(stat & STAT_FR) /* received a frame */ 887 if (stat & STAT_FR) /* received a frame */
860 ni52_rcv_int(dev); 888 ni52_rcv_int(dev);
861 889
862 if(stat & STAT_RNR) /* RU went 'not ready' */ 890 if (stat & STAT_RNR) { /* RU went 'not ready' */
863 {
864 printk("(R)"); 891 printk("(R)");
865 if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ 892 if (readb(&p->scb->rus) & RU_SUSPEND) {
866 { 893 /* special case: RU_SUSPEND */
867 WAIT_4_SCB_CMD(); 894 wait_for_scb_cmd(dev);
868 p->scb->cmd_ruc = RUC_RESUME; 895 p->scb->cmd_ruc = RUC_RESUME;
869 ni_attn586(); 896 ni_attn586();
870 WAIT_4_SCB_CMD_RUC(); 897 wait_for_scb_cmd_ruc(dev);
871 } 898 } else {
872 else 899 printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
873 { 900 dev->name, stat, readb(&p->scb->rus));
874 printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
875 ni52_rnr_int(dev); 901 ni52_rnr_int(dev);
876 } 902 }
877 } 903 }
878 904
879 if(stat & STAT_CX) /* command with I-bit set complete */ 905 /* Command with I-bit set complete */
906 if (stat & STAT_CX)
880 ni52_xmt_int(dev); 907 ni52_xmt_int(dev);
881 908
882#ifndef NO_NOPCOMMANDS 909#ifndef NO_NOPCOMMANDS
883 if(stat & STAT_CNA) /* CU went 'not ready' */ 910 if (stat & STAT_CNA) { /* CU went 'not ready' */
884 { 911 if (netif_running(dev))
885 if(netif_running(dev)) 912 printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
886 printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); 913 dev->name, stat, readb(&p->scb->cus));
887 } 914 }
888#endif 915#endif
889 916
890 if(debuglevel > 1) 917 if (debuglevel > 1)
891 printk("%d",cnt++); 918 printk("%d", cnt++);
892 919
893 WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */ 920 /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
894 if(p->scb->cmd_cuc) /* timed out? */ 921 wait_for_scb_cmd(dev);
895 { 922 if (p->scb->cmd_cuc) { /* timed out? */
896 printk("%s: Acknowledge timed out.\n",dev->name); 923 printk(KERN_ERR "%s: Acknowledge timed out.\n",
924 dev->name);
897 ni_disint(); 925 ni_disint();
898 break; 926 break;
899 } 927 }
900 } 928 }
929 spin_unlock(&p->spinlock);
901 930
902 if(debuglevel > 1) 931 if (debuglevel > 1)
903 printk("i"); 932 printk("i");
904 return IRQ_HANDLED; 933 return IRQ_HANDLED;
905} 934}
@@ -910,121 +939,91 @@ static irqreturn_t ni52_interrupt(int irq,void *dev_id)
910 939
911static void ni52_rcv_int(struct net_device *dev) 940static void ni52_rcv_int(struct net_device *dev)
912{ 941{
913 int status,cnt=0; 942 int status, cnt = 0;
914 unsigned short totlen; 943 unsigned short totlen;
915 struct sk_buff *skb; 944 struct sk_buff *skb;
916 struct rbd_struct *rbd; 945 struct rbd_struct *rbd;
917 struct priv *p = (struct priv *) dev->priv; 946 struct priv *p = (struct priv *)dev->priv;
918 947
919 if(debuglevel > 0) 948 if (debuglevel > 0)
920 printk("R"); 949 printk("R");
921 950
922 for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) 951 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
923 { 952 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
924 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 953 if (status & RFD_OK) { /* frame received without error? */
925 954 totlen = readw(&rbd->status);
926 if(status & RFD_OK) /* frame received without error? */ 955 if (totlen & RBD_LAST) {
927 { 956 /* the first and the last buffer? */
928 if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */ 957 totlen &= RBD_MASK; /* length of this frame */
929 { 958 writew(0x00, &rbd->status);
930 totlen &= RBD_MASK; /* length of this frame */ 959 skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
931 rbd->status = 0; 960 if (skb != NULL) {
932 skb = (struct sk_buff *) dev_alloc_skb(totlen+2); 961 skb_reserve(skb, 2);
933 if(skb != NULL) 962 skb_put(skb, totlen);
934 { 963 skb_copy_to_linear_data(skb, (char *)p->base + (unsigned long) rbd->buffer, totlen);
935 skb_reserve(skb,2); 964 skb->protocol = eth_type_trans(skb, dev);
936 skb_put(skb,totlen); 965 netif_rx(skb);
937 skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen); 966 dev->last_rx = jiffies;
938 skb->protocol=eth_type_trans(skb,dev); 967 p->stats.rx_packets++;
939 netif_rx(skb); 968 p->stats.rx_bytes += totlen;
940 dev->last_rx = jiffies; 969 } else
941 p->stats.rx_packets++; 970 p->stats.rx_dropped++;
942 p->stats.rx_bytes += totlen; 971 } else {
972 int rstat;
973 /* free all RBD's until RBD_LAST is set */
974 totlen = 0;
975 while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
976 totlen += rstat & RBD_MASK;
977 if (!rstat) {
978 printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
979 break;
943 } 980 }
944 else 981 writew(0, &rbd->status);
945 p->stats.rx_dropped++; 982 rbd = (struct rbd_struct *) make32(readl(&rbd->next));
946 } 983 }
947 else 984 totlen += rstat & RBD_MASK;
948 { 985 writew(0, &rbd->status);
949 int rstat; 986 printk(KERN_ERR "%s: received oversized frame! length: %d\n",
950 /* free all RBD's until RBD_LAST is set */ 987 dev->name, totlen);
951 totlen = 0; 988 p->stats.rx_dropped++;
952 while(!((rstat=rbd->status) & RBD_LAST))
953 {
954 totlen += rstat & RBD_MASK;
955 if(!rstat)
956 {
957 printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
958 break;
959 }
960 rbd->status = 0;
961 rbd = (struct rbd_struct *) make32(rbd->next);
962 }
963 totlen += rstat & RBD_MASK;
964 rbd->status = 0;
965 printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
966 p->stats.rx_dropped++;
967 } 989 }
968 } 990 } else {/* frame !(ok), only with 'save-bad-frames' */
969 else /* frame !(ok), only with 'save-bad-frames' */ 991 printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
970 { 992 dev->name, status);
971 printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
972 p->stats.rx_errors++; 993 p->stats.rx_errors++;
973 } 994 }
974 p->rfd_top->stat_high = 0; 995 writeb(0, &p->rfd_top->stat_high);
975 p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ 996 writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
976 p->rfd_top->rbd_offset = 0xffff; 997 writew(0xffff, &p->rfd_top->rbd_offset);
977 p->rfd_last->last = 0; /* delete RFD_SUSP */ 998 writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
978 p->rfd_last = p->rfd_top; 999 p->rfd_last = p->rfd_top;
979 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ 1000 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
980 p->scb->rfa_offset = make16(p->rfd_top); 1001 writew(make16(p->rfd_top), &p->scb->rfa_offset);
981 1002
982 if(debuglevel > 0) 1003 if (debuglevel > 0)
983 printk("%d",cnt++); 1004 printk("%d", cnt++);
984 } 1005 }
985 1006
986 if(automatic_resume) 1007 if (automatic_resume) {
987 { 1008 wait_for_scb_cmd(dev);
988 WAIT_4_SCB_CMD(); 1009 writeb(RUC_RESUME, &p->scb->cmd_ruc);
989 p->scb->cmd_ruc = RUC_RESUME;
990 ni_attn586(); 1010 ni_attn586();
991 WAIT_4_SCB_CMD_RUC(); 1011 wait_for_scb_cmd_ruc(dev);
992 } 1012 }
993 1013
994#ifdef WAIT_4_BUSY 1014#ifdef WAIT_4_BUSY
995 { 1015 {
996 int i; 1016 int i;
997 for(i=0;i<1024;i++) 1017 for (i = 0; i < 1024; i++) {
998 { 1018 if (p->rfd_top->status)
999 if(p->rfd_top->status)
1000 break; 1019 break;
1001 DELAY_16(); 1020 udelay(16);
1002 if(i == 1023) 1021 if (i == 1023)
1003 printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); 1022 printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
1004 } 1023 }
1005 } 1024 }
1006#endif 1025#endif
1007 1026 if (debuglevel > 0)
1008#if 0
1009 if(!at_least_one)
1010 {
1011 int i;
1012 volatile struct rfd_struct *rfds=p->rfd_top;
1013 volatile struct rbd_struct *rbds;
1014 printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
1015 for(i=0;i< (p->num_recv_buffs+4);i++)
1016 {
1017 rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
1018 printk("%04x:%04x ",rfds->status,rbds->status);
1019 rfds = (struct rfd_struct *) make32(rfds->next);
1020 }
1021 printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
1022 printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
1023 }
1024 old_at_least = at_least_one;
1025#endif
1026
1027 if(debuglevel > 0)
1028 printk("r"); 1027 printk("r");
1029} 1028}
1030 1029
@@ -1038,16 +1037,16 @@ static void ni52_rnr_int(struct net_device *dev)
1038 1037
1039 p->stats.rx_errors++; 1038 p->stats.rx_errors++;
1040 1039
1041 WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ 1040 wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
1042 p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ 1041 writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
1043 ni_attn586(); 1042 ni_attn586();
1044 WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ 1043 wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
1045 1044
1046 alloc_rfa(dev,(char *)p->rfd_first); 1045 alloc_rfa(dev, (char *)p->rfd_first);
1047/* maybe add a check here, before restarting the RU */ 1046 /* maybe add a check here, before restarting the RU */
1048 startrecv586(dev); /* restart RU */ 1047 startrecv586(dev); /* restart RU */
1049 1048
1050 printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); 1049 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->rus);
1051 1050
1052} 1051}
1053 1052
@@ -1060,43 +1059,41 @@ static void ni52_xmt_int(struct net_device *dev)
1060 int status; 1059 int status;
1061 struct priv *p = (struct priv *) dev->priv; 1060 struct priv *p = (struct priv *) dev->priv;
1062 1061
1063 if(debuglevel > 0) 1062 if (debuglevel > 0)
1064 printk("X"); 1063 printk("X");
1065 1064
1066 status = p->xmit_cmds[p->xmit_last]->cmd_status; 1065 status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
1067 if(!(status & STAT_COMPL)) 1066 if (!(status & STAT_COMPL))
1068 printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); 1067 printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
1069 1068
1070 if(status & STAT_OK) 1069 if (status & STAT_OK) {
1071 {
1072 p->stats.tx_packets++; 1070 p->stats.tx_packets++;
1073 p->stats.collisions += (status & TCMD_MAXCOLLMASK); 1071 p->stats.collisions += (status & TCMD_MAXCOLLMASK);
1074 } 1072 } else {
1075 else
1076 {
1077 p->stats.tx_errors++; 1073 p->stats.tx_errors++;
1078 if(status & TCMD_LATECOLL) { 1074 if (status & TCMD_LATECOLL) {
1079 printk("%s: late collision detected.\n",dev->name); 1075 printk(KERN_ERR "%s: late collision detected.\n",
1076 dev->name);
1080 p->stats.collisions++; 1077 p->stats.collisions++;
1081 } 1078 } else if (status & TCMD_NOCARRIER) {
1082 else if(status & TCMD_NOCARRIER) {
1083 p->stats.tx_carrier_errors++; 1079 p->stats.tx_carrier_errors++;
1084 printk("%s: no carrier detected.\n",dev->name); 1080 printk(KERN_ERR "%s: no carrier detected.\n",
1085 } 1081 dev->name);
1086 else if(status & TCMD_LOSTCTS) 1082 } else if (status & TCMD_LOSTCTS)
1087 printk("%s: loss of CTS detected.\n",dev->name); 1083 printk(KERN_ERR "%s: loss of CTS detected.\n",
1088 else if(status & TCMD_UNDERRUN) { 1084 dev->name);
1085 else if (status & TCMD_UNDERRUN) {
1089 p->stats.tx_fifo_errors++; 1086 p->stats.tx_fifo_errors++;
1090 printk("%s: DMA underrun detected.\n",dev->name); 1087 printk(KERN_ERR "%s: DMA underrun detected.\n",
1091 } 1088 dev->name);
1092 else if(status & TCMD_MAXCOLL) { 1089 } else if (status & TCMD_MAXCOLL) {
1093 printk("%s: Max. collisions exceeded.\n",dev->name); 1090 printk(KERN_ERR "%s: Max. collisions exceeded.\n",
1091 dev->name);
1094 p->stats.collisions += 16; 1092 p->stats.collisions += 16;
1095 } 1093 }
1096 } 1094 }
1097
1098#if (NUM_XMIT_BUFFS > 1) 1095#if (NUM_XMIT_BUFFS > 1)
1099 if( (++p->xmit_last) == NUM_XMIT_BUFFS) 1096 if ((++p->xmit_last) == NUM_XMIT_BUFFS)
1100 p->xmit_last = 0; 1097 p->xmit_last = 0;
1101#endif 1098#endif
1102 netif_wake_queue(dev); 1099 netif_wake_queue(dev);
@@ -1110,41 +1107,51 @@ static void startrecv586(struct net_device *dev)
1110{ 1107{
1111 struct priv *p = (struct priv *) dev->priv; 1108 struct priv *p = (struct priv *) dev->priv;
1112 1109
1113 WAIT_4_SCB_CMD(); 1110 wait_for_scb_cmd(dev);
1114 WAIT_4_SCB_CMD_RUC(); 1111 wait_for_scb_cmd_ruc(dev);
1115 p->scb->rfa_offset = make16(p->rfd_first); 1112 writew(make16(p->rfd_first), &p->scb->rfa_offset);
1116 p->scb->cmd_ruc = RUC_START; 1113 writeb(RUC_START, &p->scb->cmd_ruc);
1117 ni_attn586(); /* start cmd. */ 1114 ni_attn586(); /* start cmd. */
1118 WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ 1115 wait_for_scb_cmd_ruc(dev);
1116 /* wait for accept cmd. (no timeout!!) */
1119} 1117}
1120 1118
1121static void ni52_timeout(struct net_device *dev) 1119static void ni52_timeout(struct net_device *dev)
1122{ 1120{
1123 struct priv *p = (struct priv *) dev->priv; 1121 struct priv *p = (struct priv *) dev->priv;
1124#ifndef NO_NOPCOMMANDS 1122#ifndef NO_NOPCOMMANDS
1125 if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ 1123 if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
1126 {
1127 netif_wake_queue(dev); 1124 netif_wake_queue(dev);
1128#ifdef DEBUG 1125#ifdef DEBUG
1129 printk("%s: strange ... timeout with CU active?!?\n",dev->name); 1126 printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
1130 printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point); 1127 dev->name);
1128 printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
1129 dev->name, (int)p->xmit_cmds[0]->cmd_status,
1130 readw(&p->nop_cmds[0]->cmd_status),
1131 readw(&p->nop_cmds[1]->cmd_status),
1132 p->nop_point);
1131#endif 1133#endif
1132 p->scb->cmd_cuc = CUC_ABORT; 1134 writeb(CUC_ABORT, &p->scb->cmd_cuc);
1133 ni_attn586(); 1135 ni_attn586();
1134 WAIT_4_SCB_CMD(); 1136 wait_for_scb_cmd(dev);
1135 p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); 1137 writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
1136 p->scb->cmd_cuc = CUC_START; 1138 writeb(CUC_START, &p->scb->cmd_cuc);
1137 ni_attn586(); 1139 ni_attn586();
1138 WAIT_4_SCB_CMD(); 1140 wait_for_scb_cmd(dev);
1139 dev->trans_start = jiffies; 1141 dev->trans_start = jiffies;
1140 return 0; 1142 return 0;
1141 } 1143 }
1142#endif 1144#endif
1143 { 1145 {
1144#ifdef DEBUG 1146#ifdef DEBUG
1145 printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); 1147 printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
1146 printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status); 1148 dev->name, readb(&p->scb->cus));
1147 printk("%s: check, whether you set the right interrupt number!\n",dev->name); 1149 printk(KERN_ERR "%s: command-stats: %04x %04x\n",
1150 dev->name,
1151 readw(&p->xmit_cmds[0]->cmd_status),
1152 readw(&p->xmit_cmds[1]->cmd_status));
1153 printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
1154 dev->name);
1148#endif 1155#endif
1149 ni52_close(dev); 1156 ni52_close(dev);
1150 ni52_open(dev); 1157 ni52_open(dev);
@@ -1158,110 +1165,99 @@ static void ni52_timeout(struct net_device *dev)
1158 1165
1159static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) 1166static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1160{ 1167{
1161 int len,i; 1168 int len, i;
1162#ifndef NO_NOPCOMMANDS 1169#ifndef NO_NOPCOMMANDS
1163 int next_nop; 1170 int next_nop;
1164#endif 1171#endif
1165 struct priv *p = (struct priv *) dev->priv; 1172 struct priv *p = (struct priv *) dev->priv;
1166 1173
1167 if(skb->len > XMIT_BUFF_SIZE) 1174 if (skb->len > XMIT_BUFF_SIZE) {
1168 { 1175 printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
1169 printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
1170 return 0; 1176 return 0;
1171 } 1177 }
1172 1178
1173 netif_stop_queue(dev); 1179 netif_stop_queue(dev);
1174 1180
1175#if(NUM_XMIT_BUFFS > 1) 1181 skb_copy_from_linear_data(skb, (char *)p->xmit_cbuffs[p->xmit_count],
1176 if(test_and_set_bit(0,(void *) &p->lock)) { 1182 skb->len);
1177 printk("%s: Queue was locked\n",dev->name); 1183 len = skb->len;
1178 return 1; 1184 if (len < ETH_ZLEN) {
1185 len = ETH_ZLEN;
1186 memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
1187 len - skb->len);
1179 } 1188 }
1180 else
1181#endif
1182 {
1183 skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
1184 len = skb->len;
1185 if (len < ETH_ZLEN) {
1186 len = ETH_ZLEN;
1187 memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, len - skb->len);
1188 }
1189 1189
1190#if (NUM_XMIT_BUFFS == 1) 1190#if (NUM_XMIT_BUFFS == 1)
1191# ifdef NO_NOPCOMMANDS 1191# ifdef NO_NOPCOMMANDS
1192 1192
1193#ifdef DEBUG 1193#ifdef DEBUG
1194 if(p->scb->cus & CU_ACTIVE) 1194 if (p->scb->cus & CU_ACTIVE) {
1195 { 1195 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
1196 printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); 1196 printk(KERN_ERR "%s: stat: %04x %04x\n",
1197 printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,p->xmit_cmds[0]->cmd_status); 1197 dev->name, readb(&p->scb->cus),
1198 } 1198 readw(&p->xmit_cmds[0]->cmd_status));
1199 }
1199#endif 1200#endif
1200 1201 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);;
1201 p->xmit_buffs[0]->size = TBD_LAST | len; 1202 for (i = 0; i < 16; i++) {
1202 for(i=0;i<16;i++) 1203 writew(0, &p->xmit_cmds[0]->cmd_status);
1203 { 1204 wait_for_scb_cmd(dev);
1204 p->xmit_cmds[0]->cmd_status = 0; 1205 if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
1205 WAIT_4_SCB_CMD(); 1206 writeb(CUC_RESUME, &p->scb->cmd_cuc);
1206 if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) 1207 else {
1207 p->scb->cmd_cuc = CUC_RESUME; 1208 writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
1208 else 1209 writeb(CUC_START, &p->scb->cmd_cuc);
1209 {
1210 p->scb->cbl_offset = make16(p->xmit_cmds[0]);
1211 p->scb->cmd_cuc = CUC_START;
1212 }
1213
1214 ni_attn586();
1215 dev->trans_start = jiffies;
1216 if(!i)
1217 dev_kfree_skb(skb);
1218 WAIT_4_SCB_CMD();
1219 if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
1220 break;
1221 if(p->xmit_cmds[0]->cmd_status)
1222 break;
1223 if(i==15)
1224 printk("%s: Can't start transmit-command.\n",dev->name);
1225 } 1210 }
1226# else 1211 ni_attn586();
1227 next_nop = (p->nop_point + 1) & 0x1;
1228 p->xmit_buffs[0]->size = TBD_LAST | len;
1229
1230 p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
1231 = make16((p->nop_cmds[next_nop]));
1232 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
1233
1234 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
1235 dev->trans_start = jiffies; 1212 dev->trans_start = jiffies;
1236 p->nop_point = next_nop; 1213 if (!i)
1237 dev_kfree_skb(skb); 1214 dev_kfree_skb(skb);
1215 wait_for_scb_cmd(dev);
1216 /* test it, because CU sometimes doesn't start immediately */
1217 if (readb(&p->scb->cus) & CU_ACTIVE)
1218 break;
1219 if (readw(&p->xmit_cmds[0]->cmd_status))
1220 break;
1221 if (i == 15)
1222 printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
1223 }
1224# else
1225 next_nop = (p->nop_point + 1) & 0x1;
1226 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
1227 writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
1228 writew(make16(p->nop_cmds[next_nop]),
1229 &p->nop_cmds[next_nop]->cmd_link);
1230 writew(0, &p->xmit_cmds[0]->cmd_status);
1231 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1232
1233 writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
1234 dev->trans_start = jiffies;
1235 p->nop_point = next_nop;
1236 dev_kfree_skb(skb);
1238# endif 1237# endif
1239#else 1238#else
1240 p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; 1239 writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
1241 if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) 1240 next_nop = p->xmit_count + 1
1242 next_nop = 0; 1241 if (next_nop == NUM_XMIT_BUFFS)
1243 1242 next_nop = 0;
1244 p->xmit_cmds[p->xmit_count]->cmd_status = 0; 1243 writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
1245 /* linkpointer of xmit-command already points to next nop cmd */ 1244 /* linkpointer of xmit-command already points to next nop cmd */
1246 p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); 1245 writew(make16(p->nop_cmds[next_nop]),
1247 p->nop_cmds[next_nop]->cmd_status = 0; 1246 &p->nop_cmds[next_nop]->cmd_link);
1248 1247 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1249 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); 1248 writew(make16(p->xmit_cmds[p->xmit_count]),
1250 dev->trans_start = jiffies; 1249 &p->nop_cmds[p->xmit_count]->cmd_link);
1251 p->xmit_count = next_nop; 1250 dev->trans_start = jiffies;
1252 1251 p->xmit_count = next_nop;
1253 { 1252 {
1254 unsigned long flags; 1253 unsigned long flags;
1255 save_flags(flags); 1254 spin_lock_irqsave(&p->spinlock);
1256 cli(); 1255 if (p->xmit_count != p->xmit_last)
1257 if(p->xmit_count != p->xmit_last) 1256 netif_wake_queue(dev);
1258 netif_wake_queue(dev); 1257 spin_unlock_irqrestore(&p->spinlock);
1259 p->lock = 0;
1260 restore_flags(flags);
1261 }
1262 dev_kfree_skb(skb);
1263#endif
1264 } 1258 }
1259 dev_kfree_skb(skb);
1260#endif
1265 return 0; 1261 return 0;
1266} 1262}
1267 1263
@@ -1272,16 +1268,17 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1272static struct net_device_stats *ni52_get_stats(struct net_device *dev) 1268static struct net_device_stats *ni52_get_stats(struct net_device *dev)
1273{ 1269{
1274 struct priv *p = (struct priv *) dev->priv; 1270 struct priv *p = (struct priv *) dev->priv;
1275 unsigned short crc,aln,rsc,ovrn; 1271 unsigned short crc, aln, rsc, ovrn;
1276 1272
1277 crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ 1273 /* Get error-statistics from the ni82586 */
1278 p->scb->crc_errs = 0; 1274 crc = readw(&p->scb->crc_errs);
1279 aln = p->scb->aln_errs; 1275 writew(0, &p->scb->crc_errs);
1280 p->scb->aln_errs = 0; 1276 aln = readw(&p->scb->aln_errs);
1281 rsc = p->scb->rsc_errs; 1277 writew(0, &p->scb->aln_errs);
1282 p->scb->rsc_errs = 0; 1278 rsc = readw(&p->scb->rsc_errs);
1283 ovrn = p->scb->ovrn_errs; 1279 writew(0, &p->scb->rsc_errs);
1284 p->scb->ovrn_errs = 0; 1280 ovrn = readw(&p->scb->ovrn_errs);
1281 writew(0, &p->scb->ovrn_errs);
1285 1282
1286 p->stats.rx_crc_errors += crc; 1283 p->stats.rx_crc_errors += crc;
1287 p->stats.rx_fifo_errors += ovrn; 1284 p->stats.rx_fifo_errors += ovrn;
@@ -1320,8 +1317,9 @@ MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
1320 1317
1321int __init init_module(void) 1318int __init init_module(void)
1322{ 1319{
1323 if(io <= 0x0 || !memend || !memstart || irq < 2) { 1320 if (io <= 0x0 || !memend || !memstart || irq < 2) {
1324 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1321 printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
1322 printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
1325 return -ENODEV; 1323 return -ENODEV;
1326 } 1324 }
1327 dev_ni52 = ni52_probe(-1); 1325 dev_ni52 = ni52_probe(-1);
@@ -1338,42 +1336,6 @@ void __exit cleanup_module(void)
1338} 1336}
1339#endif /* MODULE */ 1337#endif /* MODULE */
1340 1338
1341#if 0
1342/*
1343 * DUMP .. we expect a not running CMD unit and enough space
1344 */
1345void ni52_dump(struct net_device *dev,void *ptr)
1346{
1347 struct priv *p = (struct priv *) dev->priv;
1348 struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
1349 int i;
1350
1351 p->scb->cmd_cuc = CUC_ABORT;
1352 ni_attn586();
1353 WAIT_4_SCB_CMD();
1354 WAIT_4_SCB_CMD_RUC();
1355
1356 dump_cmd->cmd_status = 0;
1357 dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
1358 dump_cmd->dump_offset = make16((dump_cmd + 1));
1359 dump_cmd->cmd_link = 0xffff;
1360
1361 p->scb->cbl_offset = make16(dump_cmd);
1362 p->scb->cmd_cuc = CUC_START;
1363 ni_attn586();
1364 WAIT_4_STAT_COMPL(dump_cmd);
1365
1366 if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
1367 printk("%s: Can't get dump information.\n",dev->name);
1368
1369 for(i=0;i<170;i++) {
1370 printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
1371 if(i % 24 == 23)
1372 printk("\n");
1373 }
1374 printk("\n");
1375}
1376#endif
1377MODULE_LICENSE("GPL"); 1339MODULE_LICENSE("GPL");
1378 1340
1379/* 1341/*
diff --git a/drivers/net/ni52.h b/drivers/net/ni52.h
index a33ea0884aaf..1f28a4d1a319 100644
--- a/drivers/net/ni52.h
+++ b/drivers/net/ni52.h
@@ -36,12 +36,12 @@
36 36
37struct scp_struct 37struct scp_struct
38{ 38{
39 unsigned short zero_dum0; /* has to be zero */ 39 u16 zero_dum0; /* has to be zero */
40 unsigned char sysbus; /* 0=16Bit,1=8Bit */ 40 u8 sysbus; /* 0=16Bit,1=8Bit */
41 unsigned char zero_dum1; /* has to be zero for 586 */ 41 u8 zero_dum1; /* has to be zero for 586 */
42 unsigned short zero_dum2; 42 u8 zero_dum2;
43 unsigned short zero_dum3; 43 u8 zero_dum3;
44 char *iscp; /* pointer to the iscp-block */ 44 u32 iscp; /* pointer to the iscp-block */
45}; 45};
46 46
47 47
@@ -50,10 +50,10 @@ struct scp_struct
50 */ 50 */
51struct iscp_struct 51struct iscp_struct
52{ 52{
53 unsigned char busy; /* 586 clears after successful init */ 53 u8 busy; /* 586 clears after successful init */
54 unsigned char zero_dummy; /* has to be zero */ 54 u8 zero_dummy; /* has to be zero */
55 unsigned short scb_offset; /* pointeroffset to the scb_base */ 55 u16 scb_offset; /* pointeroffset to the scb_base */
56 char *scb_base; /* base-address of all 16-bit offsets */ 56 u32 scb_base; /* base-address of all 16-bit offsets */
57}; 57};
58 58
59/* 59/*
@@ -61,16 +61,16 @@ struct iscp_struct
61 */ 61 */
62struct scb_struct 62struct scb_struct
63{ 63{
64 unsigned char rus; 64 u8 rus;
65 unsigned char cus; 65 u8 cus;
66 unsigned char cmd_ruc; /* command word: RU part */ 66 u8 cmd_ruc; /* command word: RU part */
67 unsigned char cmd_cuc; /* command word: CU part & ACK */ 67 u8 cmd_cuc; /* command word: CU part & ACK */
68 unsigned short cbl_offset; /* pointeroffset, command block list */ 68 u16 cbl_offset; /* pointeroffset, command block list */
69 unsigned short rfa_offset; /* pointeroffset, receive frame area */ 69 u16 rfa_offset; /* pointeroffset, receive frame area */
70 unsigned short crc_errs; /* CRC-Error counter */ 70 u16 crc_errs; /* CRC-Error counter */
71 unsigned short aln_errs; /* alignmenterror counter */ 71 u16 aln_errs; /* alignmenterror counter */
72 unsigned short rsc_errs; /* Resourceerror counter */ 72 u16 rsc_errs; /* Resourceerror counter */
73 unsigned short ovrn_errs; /* OVerrunerror counter */ 73 u16 ovrn_errs; /* OVerrunerror counter */
74}; 74};
75 75
76/* 76/*
@@ -119,16 +119,16 @@ struct scb_struct
119 */ 119 */
120struct rfd_struct 120struct rfd_struct
121{ 121{
122 unsigned char stat_low; /* status word */ 122 u8 stat_low; /* status word */
123 unsigned char stat_high; /* status word */ 123 u8 stat_high; /* status word */
124 unsigned char rfd_sf; /* 82596 mode only */ 124 u8 rfd_sf; /* 82596 mode only */
125 unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ 125 u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
126 unsigned short next; /* linkoffset to next RFD */ 126 u16 next; /* linkoffset to next RFD */
127 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ 127 u16 rbd_offset; /* pointeroffset to RBD-buffer */
128 unsigned char dest[6]; /* ethernet-address, destination */ 128 u8 dest[6]; /* ethernet-address, destination */
129 unsigned char source[6]; /* ethernet-address, source */ 129 u8 source[6]; /* ethernet-address, source */
130 unsigned short length; /* 802.3 frame-length */ 130 u16 length; /* 802.3 frame-length */
131 unsigned short zero_dummy; /* dummy */ 131 u16 zero_dummy; /* dummy */
132}; 132};
133 133
134#define RFD_LAST 0x80 /* last: last rfd in the list */ 134#define RFD_LAST 0x80 /* last: last rfd in the list */
@@ -153,11 +153,11 @@ struct rfd_struct
153 */ 153 */
154struct rbd_struct 154struct rbd_struct
155{ 155{
156 unsigned short status; /* status word,number of used bytes in buff */ 156 u16 status; /* status word,number of used bytes in buff */
157 unsigned short next; /* pointeroffset to next RBD */ 157 u16 next; /* pointeroffset to next RBD */
158 char *buffer; /* receive buffer address pointer */ 158 u32 buffer; /* receive buffer address pointer */
159 unsigned short size; /* size of this buffer */ 159 u16 size; /* size of this buffer */
160 unsigned short zero_dummy; /* dummy */ 160 u16 zero_dummy; /* dummy */
161}; 161};
162 162
163#define RBD_LAST 0x8000 /* last buffer */ 163#define RBD_LAST 0x8000 /* last buffer */
@@ -195,9 +195,9 @@ struct rbd_struct
195 */ 195 */
196struct nop_cmd_struct 196struct nop_cmd_struct
197{ 197{
198 unsigned short cmd_status; /* status of this command */ 198 u16 cmd_status; /* status of this command */
199 unsigned short cmd_cmd; /* the command itself (+bits) */ 199 u16 cmd_cmd; /* the command itself (+bits) */
200 unsigned short cmd_link; /* offsetpointer to next command */ 200 u16 cmd_link; /* offsetpointer to next command */
201}; 201};
202 202
203/* 203/*
@@ -205,10 +205,10 @@ struct nop_cmd_struct
205 */ 205 */
206struct iasetup_cmd_struct 206struct iasetup_cmd_struct
207{ 207{
208 unsigned short cmd_status; 208 u16 cmd_status;
209 unsigned short cmd_cmd; 209 u16 cmd_cmd;
210 unsigned short cmd_link; 210 u16 cmd_link;
211 unsigned char iaddr[6]; 211 u8 iaddr[6];
212}; 212};
213 213
214/* 214/*
@@ -216,21 +216,21 @@ struct iasetup_cmd_struct
216 */ 216 */
217struct configure_cmd_struct 217struct configure_cmd_struct
218{ 218{
219 unsigned short cmd_status; 219 u16 cmd_status;
220 unsigned short cmd_cmd; 220 u16 cmd_cmd;
221 unsigned short cmd_link; 221 u16 cmd_link;
222 unsigned char byte_cnt; /* size of the config-cmd */ 222 u8 byte_cnt; /* size of the config-cmd */
223 unsigned char fifo; /* fifo/recv monitor */ 223 u8 fifo; /* fifo/recv monitor */
224 unsigned char sav_bf; /* save bad frames (bit7=1)*/ 224 u8 sav_bf; /* save bad frames (bit7=1)*/
225 unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ 225 u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
226 unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ 226 u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
227 unsigned char ifs; /* inter frame spacing */ 227 u8 ifs; /* inter frame spacing */
228 unsigned char time_low; /* slot time low */ 228 u8 time_low; /* slot time low */
229 unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ 229 u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
230 unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ 230 u8 promisc; /* promisc-mode(0) , et al (1-7) */
231 unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ 231 u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
232 unsigned char fram_len; /* minimal frame len */ 232 u8 fram_len; /* minimal frame len */
233 unsigned char dummy; /* dummy */ 233 u8 dummy; /* dummy */
234}; 234};
235 235
236/* 236/*
@@ -238,11 +238,11 @@ struct configure_cmd_struct
238 */ 238 */
239struct mcsetup_cmd_struct 239struct mcsetup_cmd_struct
240{ 240{
241 unsigned short cmd_status; 241 u16 cmd_status;
242 unsigned short cmd_cmd; 242 u16 cmd_cmd;
243 unsigned short cmd_link; 243 u16 cmd_link;
244 unsigned short mc_cnt; /* number of bytes in the MC-List */ 244 u16 mc_cnt; /* number of bytes in the MC-List */
245 unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ 245 u8 mc_list[0][6]; /* pointer to 6 bytes entries */
246}; 246};
247 247
248/* 248/*
@@ -250,10 +250,10 @@ struct mcsetup_cmd_struct
250 */ 250 */
251struct dump_cmd_struct 251struct dump_cmd_struct
252{ 252{
253 unsigned short cmd_status; 253 u16 cmd_status;
254 unsigned short cmd_cmd; 254 u16 cmd_cmd;
255 unsigned short cmd_link; 255 u16 cmd_link;
256 unsigned short dump_offset; /* pointeroffset to DUMP space */ 256 u16 dump_offset; /* pointeroffset to DUMP space */
257}; 257};
258 258
259/* 259/*
@@ -261,12 +261,12 @@ struct dump_cmd_struct
261 */ 261 */
262struct transmit_cmd_struct 262struct transmit_cmd_struct
263{ 263{
264 unsigned short cmd_status; 264 u16 cmd_status;
265 unsigned short cmd_cmd; 265 u16 cmd_cmd;
266 unsigned short cmd_link; 266 u16 cmd_link;
267 unsigned short tbd_offset; /* pointeroffset to TBD */ 267 u16 tbd_offset; /* pointeroffset to TBD */
268 unsigned char dest[6]; /* destination address of the frame */ 268 u8 dest[6]; /* destination address of the frame */
269 unsigned short length; /* user defined: 802.3 length / Ether type */ 269 u16 length; /* user defined: 802.3 length / Ether type */
270}; 270};
271 271
272#define TCMD_ERRMASK 0x0fa0 272#define TCMD_ERRMASK 0x0fa0
@@ -281,10 +281,10 @@ struct transmit_cmd_struct
281 281
282struct tdr_cmd_struct 282struct tdr_cmd_struct
283{ 283{
284 unsigned short cmd_status; 284 u16 cmd_status;
285 unsigned short cmd_cmd; 285 u16 cmd_cmd;
286 unsigned short cmd_link; 286 u16 cmd_link;
287 unsigned short status; 287 u16 status;
288}; 288};
289 289
290#define TDR_LNK_OK 0x8000 /* No link problem identified */ 290#define TDR_LNK_OK 0x8000 /* No link problem identified */
@@ -298,9 +298,9 @@ struct tdr_cmd_struct
298 */ 298 */
299struct tbd_struct 299struct tbd_struct
300{ 300{
301 unsigned short size; /* size + EOF-Flag(15) */ 301 u16 size; /* size + EOF-Flag(15) */
302 unsigned short next; /* pointeroffset to next TBD */ 302 u16 next; /* pointeroffset to next TBD */
303 char *buffer; /* pointer to buffer */ 303 u32 buffer; /* pointer to buffer */
304}; 304};
305 305
306#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ 306#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c4b74e9fed20..4eb322e5273d 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -174,7 +174,11 @@ static int homepna[MAX_UNITS];
174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) 175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
176 176
177#define PKT_BUF_SZ 1544 177#define PKT_BUF_SKB 1544
178/* actual buffer length after being aligned */
179#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
180/* chip wants twos complement of the (aligned) buffer length */
181#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
178 182
179/* Offsets from base I/O address. */ 183/* Offsets from base I/O address. */
180#define PCNET32_WIO_RDP 0x10 184#define PCNET32_WIO_RDP 0x10
@@ -604,7 +608,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
604 /* now allocate any new buffers needed */ 608 /* now allocate any new buffers needed */
605 for (; new < size; new++ ) { 609 for (; new < size; new++ ) {
606 struct sk_buff *rx_skbuff; 610 struct sk_buff *rx_skbuff;
607 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); 611 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
608 if (!(rx_skbuff = new_skb_list[new])) { 612 if (!(rx_skbuff = new_skb_list[new])) {
609 /* keep the original lists and buffers */ 613 /* keep the original lists and buffers */
610 if (netif_msg_drv(lp)) 614 if (netif_msg_drv(lp))
@@ -613,20 +617,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
613 dev->name); 617 dev->name);
614 goto free_all_new; 618 goto free_all_new;
615 } 619 }
616 skb_reserve(rx_skbuff, 2); 620 skb_reserve(rx_skbuff, NET_IP_ALIGN);
617 621
618 new_dma_addr_list[new] = 622 new_dma_addr_list[new] =
619 pci_map_single(lp->pci_dev, rx_skbuff->data, 623 pci_map_single(lp->pci_dev, rx_skbuff->data,
620 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 624 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
621 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 625 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
622 new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 626 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
623 new_rx_ring[new].status = cpu_to_le16(0x8000); 627 new_rx_ring[new].status = cpu_to_le16(0x8000);
624 } 628 }
625 /* and free any unneeded buffers */ 629 /* and free any unneeded buffers */
626 for (; new < lp->rx_ring_size; new++) { 630 for (; new < lp->rx_ring_size; new++) {
627 if (lp->rx_skbuff[new]) { 631 if (lp->rx_skbuff[new]) {
628 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 632 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
629 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 633 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
630 dev_kfree_skb(lp->rx_skbuff[new]); 634 dev_kfree_skb(lp->rx_skbuff[new]);
631 } 635 }
632 } 636 }
@@ -651,7 +655,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
651 for (; --new >= lp->rx_ring_size; ) { 655 for (; --new >= lp->rx_ring_size; ) {
652 if (new_skb_list[new]) { 656 if (new_skb_list[new]) {
653 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 657 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
654 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 658 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
655 dev_kfree_skb(new_skb_list[new]); 659 dev_kfree_skb(new_skb_list[new]);
656 } 660 }
657 } 661 }
@@ -678,7 +682,7 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
678 wmb(); /* Make sure adapter sees owner change */ 682 wmb(); /* Make sure adapter sees owner change */
679 if (lp->rx_skbuff[i]) { 683 if (lp->rx_skbuff[i]) {
680 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], 684 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
681 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 685 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(lp->rx_skbuff[i]); 686 dev_kfree_skb_any(lp->rx_skbuff[i]);
683 } 687 }
684 lp->rx_skbuff[i] = NULL; 688 lp->rx_skbuff[i] = NULL;
@@ -1201,7 +1205,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1201 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; 1205 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1202 1206
1203 /* Discard oversize frames. */ 1207 /* Discard oversize frames. */
1204 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { 1208 if (unlikely(pkt_len > PKT_BUF_SIZE)) {
1205 if (netif_msg_drv(lp)) 1209 if (netif_msg_drv(lp))
1206 printk(KERN_ERR "%s: Impossible packet size %d!\n", 1210 printk(KERN_ERR "%s: Impossible packet size %d!\n",
1207 dev->name, pkt_len); 1211 dev->name, pkt_len);
@@ -1218,26 +1222,26 @@ static void pcnet32_rx_entry(struct net_device *dev,
1218 if (pkt_len > rx_copybreak) { 1222 if (pkt_len > rx_copybreak) {
1219 struct sk_buff *newskb; 1223 struct sk_buff *newskb;
1220 1224
1221 if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { 1225 if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
1222 skb_reserve(newskb, 2); 1226 skb_reserve(newskb, NET_IP_ALIGN);
1223 skb = lp->rx_skbuff[entry]; 1227 skb = lp->rx_skbuff[entry];
1224 pci_unmap_single(lp->pci_dev, 1228 pci_unmap_single(lp->pci_dev,
1225 lp->rx_dma_addr[entry], 1229 lp->rx_dma_addr[entry],
1226 PKT_BUF_SZ - 2, 1230 PKT_BUF_SIZE,
1227 PCI_DMA_FROMDEVICE); 1231 PCI_DMA_FROMDEVICE);
1228 skb_put(skb, pkt_len); 1232 skb_put(skb, pkt_len);
1229 lp->rx_skbuff[entry] = newskb; 1233 lp->rx_skbuff[entry] = newskb;
1230 lp->rx_dma_addr[entry] = 1234 lp->rx_dma_addr[entry] =
1231 pci_map_single(lp->pci_dev, 1235 pci_map_single(lp->pci_dev,
1232 newskb->data, 1236 newskb->data,
1233 PKT_BUF_SZ - 2, 1237 PKT_BUF_SIZE,
1234 PCI_DMA_FROMDEVICE); 1238 PCI_DMA_FROMDEVICE);
1235 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); 1239 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
1236 rx_in_place = 1; 1240 rx_in_place = 1;
1237 } else 1241 } else
1238 skb = NULL; 1242 skb = NULL;
1239 } else { 1243 } else {
1240 skb = dev_alloc_skb(pkt_len + 2); 1244 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1241 } 1245 }
1242 1246
1243 if (skb == NULL) { 1247 if (skb == NULL) {
@@ -1250,7 +1254,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1250 } 1254 }
1251 skb->dev = dev; 1255 skb->dev = dev;
1252 if (!rx_in_place) { 1256 if (!rx_in_place) {
1253 skb_reserve(skb, 2); /* 16 byte align */ 1257 skb_reserve(skb, NET_IP_ALIGN);
1254 skb_put(skb, pkt_len); /* Make room */ 1258 skb_put(skb, pkt_len); /* Make room */
1255 pci_dma_sync_single_for_cpu(lp->pci_dev, 1259 pci_dma_sync_single_for_cpu(lp->pci_dev,
1256 lp->rx_dma_addr[entry], 1260 lp->rx_dma_addr[entry],
@@ -1291,7 +1295,7 @@ static int pcnet32_rx(struct net_device *dev, int budget)
1291 * The docs say that the buffer length isn't touched, but Andrew 1295 * The docs say that the buffer length isn't touched, but Andrew
1292 * Boyd of QNX reports that some revs of the 79C965 clear it. 1296 * Boyd of QNX reports that some revs of the 79C965 clear it.
1293 */ 1297 */
1294 rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 1298 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
1295 wmb(); /* Make sure owner changes after others are visible */ 1299 wmb(); /* Make sure owner changes after others are visible */
1296 rxp->status = cpu_to_le16(0x8000); 1300 rxp->status = cpu_to_le16(0x8000);
1297 entry = (++lp->cur_rx) & lp->rx_mod_mask; 1301 entry = (++lp->cur_rx) & lp->rx_mod_mask;
@@ -1774,8 +1778,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1774 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1778 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1775 1779
1776 if (pcnet32_debug & NETIF_MSG_PROBE) { 1780 if (pcnet32_debug & NETIF_MSG_PROBE) {
1777 for (i = 0; i < 6; i++) 1781 DECLARE_MAC_BUF(mac);
1778 printk(" %2.2x", dev->dev_addr[i]); 1782 printk(" %s", print_mac(mac, dev->dev_addr));
1779 1783
1780 /* Version 0x2623 and 0x2624 */ 1784 /* Version 0x2623 and 0x2624 */
1781 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1785 if (((chip_version + 1) & 0xfffe) == 0x2624) {
@@ -2396,7 +2400,7 @@ static int pcnet32_init_ring(struct net_device *dev)
2396 if (rx_skbuff == NULL) { 2400 if (rx_skbuff == NULL) {
2397 if (! 2401 if (!
2398 (rx_skbuff = lp->rx_skbuff[i] = 2402 (rx_skbuff = lp->rx_skbuff[i] =
2399 dev_alloc_skb(PKT_BUF_SZ))) { 2403 dev_alloc_skb(PKT_BUF_SKB))) {
2400 /* there is not much, we can do at this point */ 2404 /* there is not much, we can do at this point */
2401 if (netif_msg_drv(lp)) 2405 if (netif_msg_drv(lp))
2402 printk(KERN_ERR 2406 printk(KERN_ERR
@@ -2404,16 +2408,16 @@ static int pcnet32_init_ring(struct net_device *dev)
2404 dev->name); 2408 dev->name);
2405 return -1; 2409 return -1;
2406 } 2410 }
2407 skb_reserve(rx_skbuff, 2); 2411 skb_reserve(rx_skbuff, NET_IP_ALIGN);
2408 } 2412 }
2409 2413
2410 rmb(); 2414 rmb();
2411 if (lp->rx_dma_addr[i] == 0) 2415 if (lp->rx_dma_addr[i] == 0)
2412 lp->rx_dma_addr[i] = 2416 lp->rx_dma_addr[i] =
2413 pci_map_single(lp->pci_dev, rx_skbuff->data, 2417 pci_map_single(lp->pci_dev, rx_skbuff->data,
2414 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 2418 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2415 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2419 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2416 lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 2420 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2417 wmb(); /* Make sure owner changes after all others are visible */ 2421 wmb(); /* Make sure owner changes after all others are visible */
2418 lp->rx_ring[i].status = cpu_to_le16(0x8000); 2422 lp->rx_ring[i].status = cpu_to_le16(0x8000);
2419 } 2423 }
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 73b6d39ef6b0..ca9b040f9ad9 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -236,12 +236,12 @@ module_init(fixed_mdio_bus_init);
236static void __exit fixed_mdio_bus_exit(void) 236static void __exit fixed_mdio_bus_exit(void)
237{ 237{
238 struct fixed_mdio_bus *fmb = &platform_fmb; 238 struct fixed_mdio_bus *fmb = &platform_fmb;
239 struct fixed_phy *fp; 239 struct fixed_phy *fp, *tmp;
240 240
241 mdiobus_unregister(&fmb->mii_bus); 241 mdiobus_unregister(&fmb->mii_bus);
242 platform_device_unregister(pdev); 242 platform_device_unregister(pdev);
243 243
244 list_for_each_entry(fp, &fmb->phys, node) { 244 list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
245 list_del(&fp->node); 245 list_del(&fp->node);
246 kfree(fp); 246 kfree(fp);
247 } 247 }
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 055af081e027..7eb6e7e848f4 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -46,29 +46,25 @@
46#include <asm/lv1call.h> 46#include <asm/lv1call.h>
47 47
48#include "ps3_gelic_net.h" 48#include "ps3_gelic_net.h"
49#include "ps3_gelic_wireless.h"
49 50
50#define DRV_NAME "Gelic Network Driver" 51#define DRV_NAME "Gelic Network Driver"
51#define DRV_VERSION "1.0" 52#define DRV_VERSION "2.0"
52 53
53MODULE_AUTHOR("SCE Inc."); 54MODULE_AUTHOR("SCE Inc.");
54MODULE_DESCRIPTION("Gelic Network driver"); 55MODULE_DESCRIPTION("Gelic Network driver");
55MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
56 57
57static inline struct device *ctodev(struct gelic_net_card *card) 58
58{ 59static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
59 return &card->dev->core; 60static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
60} 61static inline void gelic_card_disable_txdmac(struct gelic_card *card);
61static inline u64 bus_id(struct gelic_net_card *card) 62static inline void gelic_card_reset_chain(struct gelic_card *card,
62{ 63 struct gelic_descr_chain *chain,
63 return card->dev->bus_id; 64 struct gelic_descr *start_descr);
64}
65static inline u64 dev_id(struct gelic_net_card *card)
66{
67 return card->dev->dev_id;
68}
69 65
70/* set irq_mask */ 66/* set irq_mask */
71static int gelic_net_set_irq_mask(struct gelic_net_card *card, u64 mask) 67int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
72{ 68{
73 int status; 69 int status;
74 70
@@ -76,54 +72,110 @@ static int gelic_net_set_irq_mask(struct gelic_net_card *card, u64 mask)
76 mask, 0); 72 mask, 0);
77 if (status) 73 if (status)
78 dev_info(ctodev(card), 74 dev_info(ctodev(card),
79 "lv1_net_set_interrupt_mask failed %d\n", status); 75 "%s failed %d\n", __func__, status);
80 return status; 76 return status;
81} 77}
82static inline void gelic_net_rx_irq_on(struct gelic_net_card *card) 78
79static inline void gelic_card_rx_irq_on(struct gelic_card *card)
83{ 80{
84 gelic_net_set_irq_mask(card, card->ghiintmask | GELIC_NET_RXINT); 81 card->irq_mask |= GELIC_CARD_RXINT;
82 gelic_card_set_irq_mask(card, card->irq_mask);
85} 83}
86static inline void gelic_net_rx_irq_off(struct gelic_net_card *card) 84static inline void gelic_card_rx_irq_off(struct gelic_card *card)
87{ 85{
88 gelic_net_set_irq_mask(card, card->ghiintmask & ~GELIC_NET_RXINT); 86 card->irq_mask &= ~GELIC_CARD_RXINT;
87 gelic_card_set_irq_mask(card, card->irq_mask);
88}
89
90static void gelic_card_get_ether_port_status(struct gelic_card *card,
91 int inform)
92{
93 u64 v2;
94 struct net_device *ether_netdev;
95
96 lv1_net_control(bus_id(card), dev_id(card),
97 GELIC_LV1_GET_ETH_PORT_STATUS,
98 GELIC_LV1_VLAN_TX_ETHERNET, 0, 0,
99 &card->ether_port_status, &v2);
100
101 if (inform) {
102 ether_netdev = card->netdev[GELIC_PORT_ETHERNET];
103 if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP)
104 netif_carrier_on(ether_netdev);
105 else
106 netif_carrier_off(ether_netdev);
107 }
108}
109
110void gelic_card_up(struct gelic_card *card)
111{
112 pr_debug("%s: called\n", __func__);
113 down(&card->updown_lock);
114 if (atomic_inc_return(&card->users) == 1) {
115 pr_debug("%s: real do\n", __func__);
116 /* enable irq */
117 gelic_card_set_irq_mask(card, card->irq_mask);
118 /* start rx */
119 gelic_card_enable_rxdmac(card);
120
121 napi_enable(&card->napi);
122 }
123 up(&card->updown_lock);
124 pr_debug("%s: done\n", __func__);
89} 125}
126
127void gelic_card_down(struct gelic_card *card)
128{
129 u64 mask;
130 pr_debug("%s: called\n", __func__);
131 down(&card->updown_lock);
132 if (atomic_dec_if_positive(&card->users) == 0) {
133 pr_debug("%s: real do\n", __func__);
134 napi_disable(&card->napi);
135 /*
136 * Disable irq. Wireless interrupts will
137 * be disabled later if any
138 */
139 mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED |
140 GELIC_CARD_WLAN_COMMAND_COMPLETED);
141 gelic_card_set_irq_mask(card, mask);
142 /* stop rx */
143 gelic_card_disable_rxdmac(card);
144 gelic_card_reset_chain(card, &card->rx_chain,
145 card->descr + GELIC_NET_TX_DESCRIPTORS);
146 /* stop tx */
147 gelic_card_disable_txdmac(card);
148 }
149 up(&card->updown_lock);
150 pr_debug("%s: done\n", __func__);
151}
152
90/** 153/**
91 * gelic_net_get_descr_status -- returns the status of a descriptor 154 * gelic_descr_get_status -- returns the status of a descriptor
92 * @descr: descriptor to look at 155 * @descr: descriptor to look at
93 * 156 *
94 * returns the status as in the dmac_cmd_status field of the descriptor 157 * returns the status as in the dmac_cmd_status field of the descriptor
95 */ 158 */
96static enum gelic_net_descr_status 159static enum gelic_descr_dma_status
97gelic_net_get_descr_status(struct gelic_net_descr *descr) 160gelic_descr_get_status(struct gelic_descr *descr)
98{ 161{
99 u32 cmd_status; 162 return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
100
101 cmd_status = descr->dmac_cmd_status;
102 cmd_status >>= GELIC_NET_DESCR_IND_PROC_SHIFT;
103 return cmd_status;
104} 163}
105 164
106/** 165/**
107 * gelic_net_set_descr_status -- sets the status of a descriptor 166 * gelic_descr_set_status -- sets the status of a descriptor
108 * @descr: descriptor to change 167 * @descr: descriptor to change
109 * @status: status to set in the descriptor 168 * @status: status to set in the descriptor
110 * 169 *
111 * changes the status to the specified value. Doesn't change other bits 170 * changes the status to the specified value. Doesn't change other bits
112 * in the status 171 * in the status
113 */ 172 */
114static void gelic_net_set_descr_status(struct gelic_net_descr *descr, 173static void gelic_descr_set_status(struct gelic_descr *descr,
115 enum gelic_net_descr_status status) 174 enum gelic_descr_dma_status status)
116{ 175{
117 u32 cmd_status; 176 descr->dmac_cmd_status = cpu_to_be32(status |
118 177 (be32_to_cpu(descr->dmac_cmd_status) &
119 /* read the status */ 178 ~GELIC_DESCR_DMA_STAT_MASK));
120 cmd_status = descr->dmac_cmd_status;
121 /* clean the upper 4 bits */
122 cmd_status &= GELIC_NET_DESCR_IND_PROC_MASKO;
123 /* add the status to it */
124 cmd_status |= ((u32)status) << GELIC_NET_DESCR_IND_PROC_SHIFT;
125 /* and write it back */
126 descr->dmac_cmd_status = cmd_status;
127 /* 179 /*
128 * dma_cmd_status field is used to indicate whether the descriptor 180 * dma_cmd_status field is used to indicate whether the descriptor
129 * is valid or not. 181 * is valid or not.
@@ -134,24 +186,24 @@ static void gelic_net_set_descr_status(struct gelic_net_descr *descr,
134} 186}
135 187
136/** 188/**
137 * gelic_net_free_chain - free descriptor chain 189 * gelic_card_free_chain - free descriptor chain
138 * @card: card structure 190 * @card: card structure
139 * @descr_in: address of desc 191 * @descr_in: address of desc
140 */ 192 */
141static void gelic_net_free_chain(struct gelic_net_card *card, 193static void gelic_card_free_chain(struct gelic_card *card,
142 struct gelic_net_descr *descr_in) 194 struct gelic_descr *descr_in)
143{ 195{
144 struct gelic_net_descr *descr; 196 struct gelic_descr *descr;
145 197
146 for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) { 198 for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) {
147 dma_unmap_single(ctodev(card), descr->bus_addr, 199 dma_unmap_single(ctodev(card), descr->bus_addr,
148 GELIC_NET_DESCR_SIZE, DMA_BIDIRECTIONAL); 200 GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
149 descr->bus_addr = 0; 201 descr->bus_addr = 0;
150 } 202 }
151} 203}
152 204
153/** 205/**
154 * gelic_net_init_chain - links descriptor chain 206 * gelic_card_init_chain - links descriptor chain
155 * @card: card structure 207 * @card: card structure
156 * @chain: address of chain 208 * @chain: address of chain
157 * @start_descr: address of descriptor array 209 * @start_descr: address of descriptor array
@@ -162,22 +214,22 @@ static void gelic_net_free_chain(struct gelic_net_card *card,
162 * 214 *
163 * returns 0 on success, <0 on failure 215 * returns 0 on success, <0 on failure
164 */ 216 */
165static int gelic_net_init_chain(struct gelic_net_card *card, 217static int gelic_card_init_chain(struct gelic_card *card,
166 struct gelic_net_descr_chain *chain, 218 struct gelic_descr_chain *chain,
167 struct gelic_net_descr *start_descr, int no) 219 struct gelic_descr *start_descr, int no)
168{ 220{
169 int i; 221 int i;
170 struct gelic_net_descr *descr; 222 struct gelic_descr *descr;
171 223
172 descr = start_descr; 224 descr = start_descr;
173 memset(descr, 0, sizeof(*descr) * no); 225 memset(descr, 0, sizeof(*descr) * no);
174 226
175 /* set up the hardware pointers in each descriptor */ 227 /* set up the hardware pointers in each descriptor */
176 for (i = 0; i < no; i++, descr++) { 228 for (i = 0; i < no; i++, descr++) {
177 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 229 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
178 descr->bus_addr = 230 descr->bus_addr =
179 dma_map_single(ctodev(card), descr, 231 dma_map_single(ctodev(card), descr,
180 GELIC_NET_DESCR_SIZE, 232 GELIC_DESCR_SIZE,
181 DMA_BIDIRECTIONAL); 233 DMA_BIDIRECTIONAL);
182 234
183 if (!descr->bus_addr) 235 if (!descr->bus_addr)
@@ -193,7 +245,7 @@ static int gelic_net_init_chain(struct gelic_net_card *card,
193 /* chain bus addr of hw descriptor */ 245 /* chain bus addr of hw descriptor */
194 descr = start_descr; 246 descr = start_descr;
195 for (i = 0; i < no; i++, descr++) { 247 for (i = 0; i < no; i++, descr++) {
196 descr->next_descr_addr = descr->next->bus_addr; 248 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
197 } 249 }
198 250
199 chain->head = start_descr; 251 chain->head = start_descr;
@@ -208,13 +260,38 @@ iommu_error:
208 for (i--, descr--; 0 <= i; i--, descr--) 260 for (i--, descr--; 0 <= i; i--, descr--)
209 if (descr->bus_addr) 261 if (descr->bus_addr)
210 dma_unmap_single(ctodev(card), descr->bus_addr, 262 dma_unmap_single(ctodev(card), descr->bus_addr,
211 GELIC_NET_DESCR_SIZE, 263 GELIC_DESCR_SIZE,
212 DMA_BIDIRECTIONAL); 264 DMA_BIDIRECTIONAL);
213 return -ENOMEM; 265 return -ENOMEM;
214} 266}
215 267
216/** 268/**
217 * gelic_net_prepare_rx_descr - reinitializes a rx descriptor 269 * gelic_card_reset_chain - reset status of a descriptor chain
270 * @card: card structure
271 * @chain: address of chain
272 * @start_descr: address of descriptor array
273 *
274 * Reset the status of dma descriptors to ready state
275 * and re-initialize the hardware chain for later use
276 */
277static void gelic_card_reset_chain(struct gelic_card *card,
278 struct gelic_descr_chain *chain,
279 struct gelic_descr *start_descr)
280{
281 struct gelic_descr *descr;
282
283 for (descr = start_descr; start_descr != descr->next; descr++) {
284 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
285 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
286 }
287
288 chain->head = start_descr;
289 chain->tail = (descr - 1);
290
291 (descr - 1)->next_descr_addr = 0;
292}
293/**
294 * gelic_descr_prepare_rx - reinitializes a rx descriptor
218 * @card: card structure 295 * @card: card structure
219 * @descr: descriptor to re-init 296 * @descr: descriptor to re-init
220 * 297 *
@@ -223,29 +300,27 @@ iommu_error:
223 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. 300 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
224 * Activate the descriptor state-wise 301 * Activate the descriptor state-wise
225 */ 302 */
226static int gelic_net_prepare_rx_descr(struct gelic_net_card *card, 303static int gelic_descr_prepare_rx(struct gelic_card *card,
227 struct gelic_net_descr *descr) 304 struct gelic_descr *descr)
228{ 305{
229 int offset; 306 int offset;
230 unsigned int bufsize; 307 unsigned int bufsize;
231 308
232 if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE) { 309 if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
233 dev_info(ctodev(card), "%s: ERROR status \n", __func__); 310 dev_info(ctodev(card), "%s: ERROR status \n", __func__);
234 }
235 /* we need to round up the buffer size to a multiple of 128 */ 311 /* we need to round up the buffer size to a multiple of 128 */
236 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); 312 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
237 313
238 /* and we need to have it 128 byte aligned, therefore we allocate a 314 /* and we need to have it 128 byte aligned, therefore we allocate a
239 * bit more */ 315 * bit more */
240 descr->skb = netdev_alloc_skb(card->netdev, 316 descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
241 bufsize + GELIC_NET_RXBUF_ALIGN - 1);
242 if (!descr->skb) { 317 if (!descr->skb) {
243 descr->buf_addr = 0; /* tell DMAC don't touch memory */ 318 descr->buf_addr = 0; /* tell DMAC don't touch memory */
244 dev_info(ctodev(card), 319 dev_info(ctodev(card),
245 "%s:allocate skb failed !!\n", __func__); 320 "%s:allocate skb failed !!\n", __func__);
246 return -ENOMEM; 321 return -ENOMEM;
247 } 322 }
248 descr->buf_size = bufsize; 323 descr->buf_size = cpu_to_be32(bufsize);
249 descr->dmac_cmd_status = 0; 324 descr->dmac_cmd_status = 0;
250 descr->result_size = 0; 325 descr->result_size = 0;
251 descr->valid_size = 0; 326 descr->valid_size = 0;
@@ -256,63 +331,64 @@ static int gelic_net_prepare_rx_descr(struct gelic_net_card *card,
256 if (offset) 331 if (offset)
257 skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); 332 skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
258 /* io-mmu-map the skb */ 333 /* io-mmu-map the skb */
259 descr->buf_addr = dma_map_single(ctodev(card), descr->skb->data, 334 descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
260 GELIC_NET_MAX_MTU, 335 descr->skb->data,
261 DMA_FROM_DEVICE); 336 GELIC_NET_MAX_MTU,
337 DMA_FROM_DEVICE));
262 if (!descr->buf_addr) { 338 if (!descr->buf_addr) {
263 dev_kfree_skb_any(descr->skb); 339 dev_kfree_skb_any(descr->skb);
264 descr->skb = NULL; 340 descr->skb = NULL;
265 dev_info(ctodev(card), 341 dev_info(ctodev(card),
266 "%s:Could not iommu-map rx buffer\n", __func__); 342 "%s:Could not iommu-map rx buffer\n", __func__);
267 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 343 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
268 return -ENOMEM; 344 return -ENOMEM;
269 } else { 345 } else {
270 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_CARDOWNED); 346 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
271 return 0; 347 return 0;
272 } 348 }
273} 349}
274 350
275/** 351/**
276 * gelic_net_release_rx_chain - free all skb of rx descr 352 * gelic_card_release_rx_chain - free all skb of rx descr
277 * @card: card structure 353 * @card: card structure
278 * 354 *
279 */ 355 */
280static void gelic_net_release_rx_chain(struct gelic_net_card *card) 356static void gelic_card_release_rx_chain(struct gelic_card *card)
281{ 357{
282 struct gelic_net_descr *descr = card->rx_chain.head; 358 struct gelic_descr *descr = card->rx_chain.head;
283 359
284 do { 360 do {
285 if (descr->skb) { 361 if (descr->skb) {
286 dma_unmap_single(ctodev(card), 362 dma_unmap_single(ctodev(card),
287 descr->buf_addr, 363 be32_to_cpu(descr->buf_addr),
288 descr->skb->len, 364 descr->skb->len,
289 DMA_FROM_DEVICE); 365 DMA_FROM_DEVICE);
290 descr->buf_addr = 0; 366 descr->buf_addr = 0;
291 dev_kfree_skb_any(descr->skb); 367 dev_kfree_skb_any(descr->skb);
292 descr->skb = NULL; 368 descr->skb = NULL;
293 gelic_net_set_descr_status(descr, 369 gelic_descr_set_status(descr,
294 GELIC_NET_DESCR_NOT_IN_USE); 370 GELIC_DESCR_DMA_NOT_IN_USE);
295 } 371 }
296 descr = descr->next; 372 descr = descr->next;
297 } while (descr != card->rx_chain.head); 373 } while (descr != card->rx_chain.head);
298} 374}
299 375
300/** 376/**
301 * gelic_net_fill_rx_chain - fills descriptors/skbs in the rx chains 377 * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains
302 * @card: card structure 378 * @card: card structure
303 * 379 *
304 * fills all descriptors in the rx chain: allocates skbs 380 * fills all descriptors in the rx chain: allocates skbs
305 * and iommu-maps them. 381 * and iommu-maps them.
306 * returns 0 on success, <0 on failure 382 * returns 0 on success, < 0 on failure
307 */ 383 */
308static int gelic_net_fill_rx_chain(struct gelic_net_card *card) 384static int gelic_card_fill_rx_chain(struct gelic_card *card)
309{ 385{
310 struct gelic_net_descr *descr = card->rx_chain.head; 386 struct gelic_descr *descr = card->rx_chain.head;
311 int ret; 387 int ret;
312 388
313 do { 389 do {
314 if (!descr->skb) { 390 if (!descr->skb) {
315 ret = gelic_net_prepare_rx_descr(card, descr); 391 ret = gelic_descr_prepare_rx(card, descr);
316 if (ret) 392 if (ret)
317 goto rewind; 393 goto rewind;
318 } 394 }
@@ -321,41 +397,41 @@ static int gelic_net_fill_rx_chain(struct gelic_net_card *card)
321 397
322 return 0; 398 return 0;
323rewind: 399rewind:
324 gelic_net_release_rx_chain(card); 400 gelic_card_release_rx_chain(card);
325 return ret; 401 return ret;
326} 402}
327 403
328/** 404/**
329 * gelic_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 405 * gelic_card_alloc_rx_skbs - allocates rx skbs in rx descriptor chains
330 * @card: card structure 406 * @card: card structure
331 * 407 *
332 * returns 0 on success, <0 on failure 408 * returns 0 on success, < 0 on failure
333 */ 409 */
334static int gelic_net_alloc_rx_skbs(struct gelic_net_card *card) 410static int gelic_card_alloc_rx_skbs(struct gelic_card *card)
335{ 411{
336 struct gelic_net_descr_chain *chain; 412 struct gelic_descr_chain *chain;
337 int ret; 413 int ret;
338 chain = &card->rx_chain; 414 chain = &card->rx_chain;
339 ret = gelic_net_fill_rx_chain(card); 415 ret = gelic_card_fill_rx_chain(card);
340 chain->head = card->rx_top->prev; /* point to the last */ 416 chain->tail = card->rx_top->prev; /* point to the last */
341 return ret; 417 return ret;
342} 418}
343 419
344/** 420/**
345 * gelic_net_release_tx_descr - processes a used tx descriptor 421 * gelic_descr_release_tx - processes a used tx descriptor
346 * @card: card structure 422 * @card: card structure
347 * @descr: descriptor to release 423 * @descr: descriptor to release
348 * 424 *
349 * releases a used tx descriptor (unmapping, freeing of skb) 425 * releases a used tx descriptor (unmapping, freeing of skb)
350 */ 426 */
351static void gelic_net_release_tx_descr(struct gelic_net_card *card, 427static void gelic_descr_release_tx(struct gelic_card *card,
352 struct gelic_net_descr *descr) 428 struct gelic_descr *descr)
353{ 429{
354 struct sk_buff *skb = descr->skb; 430 struct sk_buff *skb = descr->skb;
355 431
356 BUG_ON(!(descr->data_status & (1 << GELIC_NET_TXDESC_TAIL))); 432 BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL));
357 433
358 dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, 434 dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len,
359 DMA_TO_DEVICE); 435 DMA_TO_DEVICE);
360 dev_kfree_skb_any(skb); 436 dev_kfree_skb_any(skb);
361 437
@@ -369,59 +445,75 @@ static void gelic_net_release_tx_descr(struct gelic_net_card *card,
369 descr->skb = NULL; 445 descr->skb = NULL;
370 446
371 /* set descr status */ 447 /* set descr status */
372 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 448 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
449}
450
451static void gelic_card_stop_queues(struct gelic_card *card)
452{
453 netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET]);
454
455 if (card->netdev[GELIC_PORT_WIRELESS])
456 netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]);
373} 457}
458static void gelic_card_wake_queues(struct gelic_card *card)
459{
460 netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET]);
374 461
462 if (card->netdev[GELIC_PORT_WIRELESS])
463 netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]);
464}
375/** 465/**
376 * gelic_net_release_tx_chain - processes sent tx descriptors 466 * gelic_card_release_tx_chain - processes sent tx descriptors
377 * @card: adapter structure 467 * @card: adapter structure
378 * @stop: net_stop sequence 468 * @stop: net_stop sequence
379 * 469 *
380 * releases the tx descriptors that gelic has finished with 470 * releases the tx descriptors that gelic has finished with
381 */ 471 */
382static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop) 472static void gelic_card_release_tx_chain(struct gelic_card *card, int stop)
383{ 473{
384 struct gelic_net_descr_chain *tx_chain; 474 struct gelic_descr_chain *tx_chain;
385 enum gelic_net_descr_status status; 475 enum gelic_descr_dma_status status;
476 struct net_device *netdev;
386 int release = 0; 477 int release = 0;
387 478
388 for (tx_chain = &card->tx_chain; 479 for (tx_chain = &card->tx_chain;
389 tx_chain->head != tx_chain->tail && tx_chain->tail; 480 tx_chain->head != tx_chain->tail && tx_chain->tail;
390 tx_chain->tail = tx_chain->tail->next) { 481 tx_chain->tail = tx_chain->tail->next) {
391 status = gelic_net_get_descr_status(tx_chain->tail); 482 status = gelic_descr_get_status(tx_chain->tail);
483 netdev = tx_chain->tail->skb->dev;
392 switch (status) { 484 switch (status) {
393 case GELIC_NET_DESCR_RESPONSE_ERROR: 485 case GELIC_DESCR_DMA_RESPONSE_ERROR:
394 case GELIC_NET_DESCR_PROTECTION_ERROR: 486 case GELIC_DESCR_DMA_PROTECTION_ERROR:
395 case GELIC_NET_DESCR_FORCE_END: 487 case GELIC_DESCR_DMA_FORCE_END:
396 if (printk_ratelimit()) 488 if (printk_ratelimit())
397 dev_info(ctodev(card), 489 dev_info(ctodev(card),
398 "%s: forcing end of tx descriptor " \ 490 "%s: forcing end of tx descriptor " \
399 "with status %x\n", 491 "with status %x\n",
400 __func__, status); 492 __func__, status);
401 card->netdev->stats.tx_dropped++; 493 netdev->stats.tx_dropped++;
402 break; 494 break;
403 495
404 case GELIC_NET_DESCR_COMPLETE: 496 case GELIC_DESCR_DMA_COMPLETE:
405 if (tx_chain->tail->skb) { 497 if (tx_chain->tail->skb) {
406 card->netdev->stats.tx_packets++; 498 netdev->stats.tx_packets++;
407 card->netdev->stats.tx_bytes += 499 netdev->stats.tx_bytes +=
408 tx_chain->tail->skb->len; 500 tx_chain->tail->skb->len;
409 } 501 }
410 break; 502 break;
411 503
412 case GELIC_NET_DESCR_CARDOWNED: 504 case GELIC_DESCR_DMA_CARDOWNED:
413 /* pending tx request */ 505 /* pending tx request */
414 default: 506 default:
415 /* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */ 507 /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */
416 if (!stop) 508 if (!stop)
417 goto out; 509 goto out;
418 } 510 }
419 gelic_net_release_tx_descr(card, tx_chain->tail); 511 gelic_descr_release_tx(card, tx_chain->tail);
420 release ++; 512 release ++;
421 } 513 }
422out: 514out:
423 if (!stop && release) 515 if (!stop && release)
424 netif_wake_queue(card->netdev); 516 gelic_card_wake_queues(card);
425} 517}
426 518
427/** 519/**
@@ -432,9 +524,9 @@ out:
432 * netdev interface. It also sets up multicast, allmulti and promisc 524 * netdev interface. It also sets up multicast, allmulti and promisc
433 * flags appropriately 525 * flags appropriately
434 */ 526 */
435static void gelic_net_set_multi(struct net_device *netdev) 527void gelic_net_set_multi(struct net_device *netdev)
436{ 528{
437 struct gelic_net_card *card = netdev_priv(netdev); 529 struct gelic_card *card = netdev_card(netdev);
438 struct dev_mc_list *mc; 530 struct dev_mc_list *mc;
439 unsigned int i; 531 unsigned int i;
440 uint8_t *p; 532 uint8_t *p;
@@ -456,8 +548,8 @@ static void gelic_net_set_multi(struct net_device *netdev)
456 "lv1_net_add_multicast_address failed, %d\n", 548 "lv1_net_add_multicast_address failed, %d\n",
457 status); 549 status);
458 550
459 if (netdev->flags & IFF_ALLMULTI 551 if ((netdev->flags & IFF_ALLMULTI) ||
460 || netdev->mc_count > GELIC_NET_MC_COUNT_MAX) { /* list max */ 552 (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
461 status = lv1_net_add_multicast_address(bus_id(card), 553 status = lv1_net_add_multicast_address(bus_id(card),
462 dev_id(card), 554 dev_id(card),
463 0, 1); 555 0, 1);
@@ -468,7 +560,7 @@ static void gelic_net_set_multi(struct net_device *netdev)
468 return; 560 return;
469 } 561 }
470 562
471 /* set multicast address */ 563 /* set multicast addresses */
472 for (mc = netdev->mc_list; mc; mc = mc->next) { 564 for (mc = netdev->mc_list; mc; mc = mc->next) {
473 addr = 0; 565 addr = 0;
474 p = mc->dmi_addr; 566 p = mc->dmi_addr;
@@ -487,31 +579,42 @@ static void gelic_net_set_multi(struct net_device *netdev)
487} 579}
488 580
489/** 581/**
490 * gelic_net_enable_rxdmac - enables the receive DMA controller 582 * gelic_card_enable_rxdmac - enables the receive DMA controller
491 * @card: card structure 583 * @card: card structure
492 * 584 *
493 * gelic_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 585 * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
494 * in the GDADMACCNTR register 586 * in the GDADMACCNTR register
495 */ 587 */
496static inline void gelic_net_enable_rxdmac(struct gelic_net_card *card) 588static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
497{ 589{
498 int status; 590 int status;
499 591
592#ifdef DEBUG
593 if (gelic_descr_get_status(card->rx_chain.head) !=
594 GELIC_DESCR_DMA_CARDOWNED) {
595 printk(KERN_ERR "%s: status=%x\n", __func__,
596 be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
597 printk(KERN_ERR "%s: nextphy=%x\n", __func__,
598 be32_to_cpu(card->rx_chain.head->next_descr_addr));
599 printk(KERN_ERR "%s: head=%p\n", __func__,
600 card->rx_chain.head);
601 }
602#endif
500 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), 603 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
501 card->rx_chain.tail->bus_addr, 0); 604 card->rx_chain.head->bus_addr, 0);
502 if (status) 605 if (status)
503 dev_info(ctodev(card), 606 dev_info(ctodev(card),
504 "lv1_net_start_rx_dma failed, status=%d\n", status); 607 "lv1_net_start_rx_dma failed, status=%d\n", status);
505} 608}
506 609
507/** 610/**
508 * gelic_net_disable_rxdmac - disables the receive DMA controller 611 * gelic_card_disable_rxdmac - disables the receive DMA controller
509 * @card: card structure 612 * @card: card structure
510 * 613 *
511 * gelic_net_disable_rxdmac terminates processing on the DMA controller by 614 * gelic_card_disable_rxdmac terminates processing on the DMA controller by
512 * turing off DMA and issueing a force end 615 * turing off DMA and issueing a force end
513 */ 616 */
514static inline void gelic_net_disable_rxdmac(struct gelic_net_card *card) 617static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
515{ 618{
516 int status; 619 int status;
517 620
@@ -523,13 +626,13 @@ static inline void gelic_net_disable_rxdmac(struct gelic_net_card *card)
523} 626}
524 627
525/** 628/**
526 * gelic_net_disable_txdmac - disables the transmit DMA controller 629 * gelic_card_disable_txdmac - disables the transmit DMA controller
527 * @card: card structure 630 * @card: card structure
528 * 631 *
529 * gelic_net_disable_txdmac terminates processing on the DMA controller by 632 * gelic_card_disable_txdmac terminates processing on the DMA controller by
530 * turing off DMA and issueing a force end 633 * turing off DMA and issueing a force end
531 */ 634 */
532static inline void gelic_net_disable_txdmac(struct gelic_net_card *card) 635static inline void gelic_card_disable_txdmac(struct gelic_card *card)
533{ 636{
534 int status; 637 int status;
535 638
@@ -546,51 +649,37 @@ static inline void gelic_net_disable_txdmac(struct gelic_net_card *card)
546 * 649 *
547 * always returns 0 650 * always returns 0
548 */ 651 */
549static int gelic_net_stop(struct net_device *netdev) 652int gelic_net_stop(struct net_device *netdev)
550{ 653{
551 struct gelic_net_card *card = netdev_priv(netdev); 654 struct gelic_card *card;
552
553 napi_disable(&card->napi);
554 netif_stop_queue(netdev);
555 655
556 /* turn off DMA, force end */ 656 pr_debug("%s: start\n", __func__);
557 gelic_net_disable_rxdmac(card);
558 gelic_net_disable_txdmac(card);
559
560 gelic_net_set_irq_mask(card, 0);
561
562 /* disconnect event port */
563 free_irq(card->netdev->irq, card->netdev);
564 ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq);
565 card->netdev->irq = NO_IRQ;
566 657
658 netif_stop_queue(netdev);
567 netif_carrier_off(netdev); 659 netif_carrier_off(netdev);
568 660
569 /* release chains */ 661 card = netdev_card(netdev);
570 gelic_net_release_tx_chain(card, 1); 662 gelic_card_down(card);
571 gelic_net_release_rx_chain(card);
572
573 gelic_net_free_chain(card, card->tx_top);
574 gelic_net_free_chain(card, card->rx_top);
575 663
664 pr_debug("%s: done\n", __func__);
576 return 0; 665 return 0;
577} 666}
578 667
579/** 668/**
580 * gelic_net_get_next_tx_descr - returns the next available tx descriptor 669 * gelic_card_get_next_tx_descr - returns the next available tx descriptor
581 * @card: device structure to get descriptor from 670 * @card: device structure to get descriptor from
582 * 671 *
583 * returns the address of the next descriptor, or NULL if not available. 672 * returns the address of the next descriptor, or NULL if not available.
584 */ 673 */
585static struct gelic_net_descr * 674static struct gelic_descr *
586gelic_net_get_next_tx_descr(struct gelic_net_card *card) 675gelic_card_get_next_tx_descr(struct gelic_card *card)
587{ 676{
588 if (!card->tx_chain.head) 677 if (!card->tx_chain.head)
589 return NULL; 678 return NULL;
590 /* see if the next descriptor is free */ 679 /* see if the next descriptor is free */
591 if (card->tx_chain.tail != card->tx_chain.head->next && 680 if (card->tx_chain.tail != card->tx_chain.head->next &&
592 gelic_net_get_descr_status(card->tx_chain.head) == 681 gelic_descr_get_status(card->tx_chain.head) ==
593 GELIC_NET_DESCR_NOT_IN_USE) 682 GELIC_DESCR_DMA_NOT_IN_USE)
594 return card->tx_chain.head; 683 return card->tx_chain.head;
595 else 684 else
596 return NULL; 685 return NULL;
@@ -606,32 +695,33 @@ gelic_net_get_next_tx_descr(struct gelic_net_card *card)
606 * depending on hardware checksum settings. This function assumes a wmb() 695 * depending on hardware checksum settings. This function assumes a wmb()
607 * has executed before. 696 * has executed before.
608 */ 697 */
609static void gelic_net_set_txdescr_cmdstat(struct gelic_net_descr *descr, 698static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
610 struct sk_buff *skb) 699 struct sk_buff *skb)
611{ 700{
612 if (skb->ip_summed != CHECKSUM_PARTIAL) 701 if (skb->ip_summed != CHECKSUM_PARTIAL)
613 descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | 702 descr->dmac_cmd_status =
614 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 703 cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
704 GELIC_DESCR_TX_DMA_FRAME_TAIL);
615 else { 705 else {
616 /* is packet ip? 706 /* is packet ip?
617 * if yes: tcp? udp? */ 707 * if yes: tcp? udp? */
618 if (skb->protocol == htons(ETH_P_IP)) { 708 if (skb->protocol == htons(ETH_P_IP)) {
619 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 709 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
620 descr->dmac_cmd_status = 710 descr->dmac_cmd_status =
621 GELIC_NET_DMAC_CMDSTAT_TCPCS | 711 cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM |
622 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 712 GELIC_DESCR_TX_DMA_FRAME_TAIL);
623 713
624 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 714 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
625 descr->dmac_cmd_status = 715 descr->dmac_cmd_status =
626 GELIC_NET_DMAC_CMDSTAT_UDPCS | 716 cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM |
627 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 717 GELIC_DESCR_TX_DMA_FRAME_TAIL);
628 else /* 718 else /*
629 * the stack should checksum non-tcp and non-udp 719 * the stack should checksum non-tcp and non-udp
630 * packets on his own: NETIF_F_IP_CSUM 720 * packets on his own: NETIF_F_IP_CSUM
631 */ 721 */
632 descr->dmac_cmd_status = 722 descr->dmac_cmd_status =
633 GELIC_NET_DMAC_CMDSTAT_NOCS | 723 cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
634 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 724 GELIC_DESCR_TX_DMA_FRAME_TAIL);
635 } 725 }
636 } 726 }
637} 727}
@@ -662,7 +752,7 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
662} 752}
663 753
664/** 754/**
665 * gelic_net_prepare_tx_descr_v - get dma address of skb_data 755 * gelic_descr_prepare_tx - setup a descriptor for sending packets
666 * @card: card structure 756 * @card: card structure
667 * @descr: descriptor structure 757 * @descr: descriptor structure
668 * @skb: packet to use 758 * @skb: packet to use
@@ -670,16 +760,19 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
670 * returns 0 on success, <0 on failure. 760 * returns 0 on success, <0 on failure.
671 * 761 *
672 */ 762 */
673static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, 763static int gelic_descr_prepare_tx(struct gelic_card *card,
674 struct gelic_net_descr *descr, 764 struct gelic_descr *descr,
675 struct sk_buff *skb) 765 struct sk_buff *skb)
676{ 766{
677 dma_addr_t buf; 767 dma_addr_t buf;
678 768
679 if (card->vlan_index != -1) { 769 if (card->vlan_required) {
680 struct sk_buff *skb_tmp; 770 struct sk_buff *skb_tmp;
771 enum gelic_port_type type;
772
773 type = netdev_port(skb->dev)->type;
681 skb_tmp = gelic_put_vlan_tag(skb, 774 skb_tmp = gelic_put_vlan_tag(skb,
682 card->vlan_id[card->vlan_index]); 775 card->vlan[type].tx);
683 if (!skb_tmp) 776 if (!skb_tmp)
684 return -ENOMEM; 777 return -ENOMEM;
685 skb = skb_tmp; 778 skb = skb_tmp;
@@ -694,12 +787,12 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
694 return -ENOMEM; 787 return -ENOMEM;
695 } 788 }
696 789
697 descr->buf_addr = buf; 790 descr->buf_addr = cpu_to_be32(buf);
698 descr->buf_size = skb->len; 791 descr->buf_size = cpu_to_be32(skb->len);
699 descr->skb = skb; 792 descr->skb = skb;
700 descr->data_status = 0; 793 descr->data_status = 0;
701 descr->next_descr_addr = 0; /* terminate hw descr */ 794 descr->next_descr_addr = 0; /* terminate hw descr */
702 gelic_net_set_txdescr_cmdstat(descr, skb); 795 gelic_descr_set_tx_cmdstat(descr, skb);
703 796
704 /* bump free descriptor pointer */ 797 /* bump free descriptor pointer */
705 card->tx_chain.head = descr->next; 798 card->tx_chain.head = descr->next;
@@ -707,20 +800,20 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
707} 800}
708 801
709/** 802/**
710 * gelic_net_kick_txdma - enables TX DMA processing 803 * gelic_card_kick_txdma - enables TX DMA processing
711 * @card: card structure 804 * @card: card structure
712 * @descr: descriptor address to enable TX processing at 805 * @descr: descriptor address to enable TX processing at
713 * 806 *
714 */ 807 */
715static int gelic_net_kick_txdma(struct gelic_net_card *card, 808static int gelic_card_kick_txdma(struct gelic_card *card,
716 struct gelic_net_descr *descr) 809 struct gelic_descr *descr)
717{ 810{
718 int status = 0; 811 int status = 0;
719 812
720 if (card->tx_dma_progress) 813 if (card->tx_dma_progress)
721 return 0; 814 return 0;
722 815
723 if (gelic_net_get_descr_status(descr) == GELIC_NET_DESCR_CARDOWNED) { 816 if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) {
724 card->tx_dma_progress = 1; 817 card->tx_dma_progress = 1;
725 status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), 818 status = lv1_net_start_tx_dma(bus_id(card), dev_id(card),
726 descr->bus_addr, 0); 819 descr->bus_addr, 0);
@@ -738,56 +831,56 @@ static int gelic_net_kick_txdma(struct gelic_net_card *card,
738 * 831 *
739 * returns 0 on success, <0 on failure 832 * returns 0 on success, <0 on failure
740 */ 833 */
741static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 834int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
742{ 835{
743 struct gelic_net_card *card = netdev_priv(netdev); 836 struct gelic_card *card = netdev_card(netdev);
744 struct gelic_net_descr *descr; 837 struct gelic_descr *descr;
745 int result; 838 int result;
746 unsigned long flags; 839 unsigned long flags;
747 840
748 spin_lock_irqsave(&card->tx_dma_lock, flags); 841 spin_lock_irqsave(&card->tx_lock, flags);
749 842
750 gelic_net_release_tx_chain(card, 0); 843 gelic_card_release_tx_chain(card, 0);
751 844
752 descr = gelic_net_get_next_tx_descr(card); 845 descr = gelic_card_get_next_tx_descr(card);
753 if (!descr) { 846 if (!descr) {
754 /* 847 /*
755 * no more descriptors free 848 * no more descriptors free
756 */ 849 */
757 netif_stop_queue(netdev); 850 gelic_card_stop_queues(card);
758 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 851 spin_unlock_irqrestore(&card->tx_lock, flags);
759 return NETDEV_TX_BUSY; 852 return NETDEV_TX_BUSY;
760 } 853 }
761 854
762 result = gelic_net_prepare_tx_descr_v(card, descr, skb); 855 result = gelic_descr_prepare_tx(card, descr, skb);
763 if (result) { 856 if (result) {
764 /* 857 /*
765 * DMA map failed. As chanses are that failure 858 * DMA map failed. As chanses are that failure
766 * would continue, just release skb and return 859 * would continue, just release skb and return
767 */ 860 */
768 card->netdev->stats.tx_dropped++; 861 netdev->stats.tx_dropped++;
769 dev_kfree_skb_any(skb); 862 dev_kfree_skb_any(skb);
770 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 863 spin_unlock_irqrestore(&card->tx_lock, flags);
771 return NETDEV_TX_OK; 864 return NETDEV_TX_OK;
772 } 865 }
773 /* 866 /*
774 * link this prepared descriptor to previous one 867 * link this prepared descriptor to previous one
775 * to achieve high performance 868 * to achieve high performance
776 */ 869 */
777 descr->prev->next_descr_addr = descr->bus_addr; 870 descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
778 /* 871 /*
779 * as hardware descriptor is modified in the above lines, 872 * as hardware descriptor is modified in the above lines,
780 * ensure that the hardware sees it 873 * ensure that the hardware sees it
781 */ 874 */
782 wmb(); 875 wmb();
783 if (gelic_net_kick_txdma(card, descr)) { 876 if (gelic_card_kick_txdma(card, descr)) {
784 /* 877 /*
785 * kick failed. 878 * kick failed.
786 * release descriptors which were just prepared 879 * release descriptors which were just prepared
787 */ 880 */
788 card->netdev->stats.tx_dropped++; 881 netdev->stats.tx_dropped++;
789 gelic_net_release_tx_descr(card, descr); 882 gelic_descr_release_tx(card, descr);
790 gelic_net_release_tx_descr(card, descr->next); 883 gelic_descr_release_tx(card, descr->next);
791 card->tx_chain.tail = descr->next->next; 884 card->tx_chain.tail = descr->next->next;
792 dev_info(ctodev(card), "%s: kick failure\n", __func__); 885 dev_info(ctodev(card), "%s: kick failure\n", __func__);
793 } else { 886 } else {
@@ -795,7 +888,7 @@ static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
795 netdev->trans_start = jiffies; 888 netdev->trans_start = jiffies;
796 } 889 }
797 890
798 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 891 spin_unlock_irqrestore(&card->tx_lock, flags);
799 return NETDEV_TX_OK; 892 return NETDEV_TX_OK;
800} 893}
801 894
@@ -803,30 +896,34 @@ static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
803 * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on 896 * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on
804 * @descr: descriptor to process 897 * @descr: descriptor to process
805 * @card: card structure 898 * @card: card structure
899 * @netdev: net_device structure to be passed packet
806 * 900 *
807 * iommu-unmaps the skb, fills out skb structure and passes the data to the 901 * iommu-unmaps the skb, fills out skb structure and passes the data to the
808 * stack. The descriptor state is not changed. 902 * stack. The descriptor state is not changed.
809 */ 903 */
810static void gelic_net_pass_skb_up(struct gelic_net_descr *descr, 904static void gelic_net_pass_skb_up(struct gelic_descr *descr,
811 struct gelic_net_card *card) 905 struct gelic_card *card,
906 struct net_device *netdev)
907
812{ 908{
813 struct sk_buff *skb; 909 struct sk_buff *skb = descr->skb;
814 struct net_device *netdev;
815 u32 data_status, data_error; 910 u32 data_status, data_error;
816 911
817 data_status = descr->data_status; 912 data_status = be32_to_cpu(descr->data_status);
818 data_error = descr->data_error; 913 data_error = be32_to_cpu(descr->data_error);
819 netdev = card->netdev;
820 /* unmap skb buffer */ 914 /* unmap skb buffer */
821 skb = descr->skb; 915 dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
822 dma_unmap_single(ctodev(card), descr->buf_addr, GELIC_NET_MAX_MTU, 916 GELIC_NET_MAX_MTU,
823 DMA_FROM_DEVICE); 917 DMA_FROM_DEVICE);
824 918
825 skb_put(skb, descr->valid_size? descr->valid_size : descr->result_size); 919 skb_put(skb, be32_to_cpu(descr->valid_size)?
920 be32_to_cpu(descr->valid_size) :
921 be32_to_cpu(descr->result_size));
826 if (!descr->valid_size) 922 if (!descr->valid_size)
827 dev_info(ctodev(card), "buffer full %x %x %x\n", 923 dev_info(ctodev(card), "buffer full %x %x %x\n",
828 descr->result_size, descr->buf_size, 924 be32_to_cpu(descr->result_size),
829 descr->dmac_cmd_status); 925 be32_to_cpu(descr->buf_size),
926 be32_to_cpu(descr->dmac_cmd_status));
830 927
831 descr->skb = NULL; 928 descr->skb = NULL;
832 /* 929 /*
@@ -838,8 +935,8 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
838 935
839 /* checksum offload */ 936 /* checksum offload */
840 if (card->rx_csum) { 937 if (card->rx_csum) {
841 if ((data_status & GELIC_NET_DATA_STATUS_CHK_MASK) && 938 if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) &&
842 (!(data_error & GELIC_NET_DATA_ERROR_CHK_MASK))) 939 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
843 skb->ip_summed = CHECKSUM_UNNECESSARY; 940 skb->ip_summed = CHECKSUM_UNNECESSARY;
844 else 941 else
845 skb->ip_summed = CHECKSUM_NONE; 942 skb->ip_summed = CHECKSUM_NONE;
@@ -847,15 +944,15 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
847 skb->ip_summed = CHECKSUM_NONE; 944 skb->ip_summed = CHECKSUM_NONE;
848 945
849 /* update netdevice statistics */ 946 /* update netdevice statistics */
850 card->netdev->stats.rx_packets++; 947 netdev->stats.rx_packets++;
851 card->netdev->stats.rx_bytes += skb->len; 948 netdev->stats.rx_bytes += skb->len;
852 949
853 /* pass skb up to stack */ 950 /* pass skb up to stack */
854 netif_receive_skb(skb); 951 netif_receive_skb(skb);
855} 952}
856 953
857/** 954/**
858 * gelic_net_decode_one_descr - processes an rx descriptor 955 * gelic_card_decode_one_descr - processes an rx descriptor
859 * @card: card structure 956 * @card: card structure
860 * 957 *
861 * returns 1 if a packet has been sent to the stack, otherwise 0 958 * returns 1 if a packet has been sent to the stack, otherwise 0
@@ -863,36 +960,56 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
863 * processes an rx descriptor by iommu-unmapping the data buffer and passing 960 * processes an rx descriptor by iommu-unmapping the data buffer and passing
864 * the packet up to the stack 961 * the packet up to the stack
865 */ 962 */
866static int gelic_net_decode_one_descr(struct gelic_net_card *card) 963static int gelic_card_decode_one_descr(struct gelic_card *card)
867{ 964{
868 enum gelic_net_descr_status status; 965 enum gelic_descr_dma_status status;
869 struct gelic_net_descr_chain *chain = &card->rx_chain; 966 struct gelic_descr_chain *chain = &card->rx_chain;
870 struct gelic_net_descr *descr = chain->tail; 967 struct gelic_descr *descr = chain->head;
968 struct net_device *netdev = NULL;
871 int dmac_chain_ended; 969 int dmac_chain_ended;
872 970
873 status = gelic_net_get_descr_status(descr); 971 status = gelic_descr_get_status(descr);
874 /* is this descriptor terminated with next_descr == NULL? */ 972 /* is this descriptor terminated with next_descr == NULL? */
875 dmac_chain_ended = 973 dmac_chain_ended =
876 descr->dmac_cmd_status & GELIC_NET_DMAC_CMDSTAT_RXDCEIS; 974 be32_to_cpu(descr->dmac_cmd_status) &
975 GELIC_DESCR_RX_DMA_CHAIN_END;
877 976
878 if (status == GELIC_NET_DESCR_CARDOWNED) 977 if (status == GELIC_DESCR_DMA_CARDOWNED)
879 return 0; 978 return 0;
880 979
881 if (status == GELIC_NET_DESCR_NOT_IN_USE) { 980 if (status == GELIC_DESCR_DMA_NOT_IN_USE) {
882 dev_dbg(ctodev(card), "dormant descr? %p\n", descr); 981 dev_dbg(ctodev(card), "dormant descr? %p\n", descr);
883 return 0; 982 return 0;
884 } 983 }
885 984
886 if ((status == GELIC_NET_DESCR_RESPONSE_ERROR) || 985 /* netdevice select */
887 (status == GELIC_NET_DESCR_PROTECTION_ERROR) || 986 if (card->vlan_required) {
888 (status == GELIC_NET_DESCR_FORCE_END)) { 987 unsigned int i;
988 u16 vid;
989 vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK;
990 for (i = 0; i < GELIC_PORT_MAX; i++) {
991 if (card->vlan[i].rx == vid) {
992 netdev = card->netdev[i];
993 break;
994 }
995 };
996 if (GELIC_PORT_MAX <= i) {
997 pr_info("%s: unknown packet vid=%x\n", __func__, vid);
998 goto refill;
999 }
1000 } else
1001 netdev = card->netdev[GELIC_PORT_ETHERNET];
1002
1003 if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) ||
1004 (status == GELIC_DESCR_DMA_PROTECTION_ERROR) ||
1005 (status == GELIC_DESCR_DMA_FORCE_END)) {
889 dev_info(ctodev(card), "dropping RX descriptor with state %x\n", 1006 dev_info(ctodev(card), "dropping RX descriptor with state %x\n",
890 status); 1007 status);
891 card->netdev->stats.rx_dropped++; 1008 netdev->stats.rx_dropped++;
892 goto refill; 1009 goto refill;
893 } 1010 }
894 1011
895 if (status == GELIC_NET_DESCR_BUFFER_FULL) { 1012 if (status == GELIC_DESCR_DMA_BUFFER_FULL) {
896 /* 1013 /*
897 * Buffer full would occur if and only if 1014 * Buffer full would occur if and only if
898 * the frame length was longer than the size of this 1015 * the frame length was longer than the size of this
@@ -909,14 +1026,14 @@ static int gelic_net_decode_one_descr(struct gelic_net_card *card)
909 * descriptoers any other than FRAME_END here should 1026 * descriptoers any other than FRAME_END here should
910 * be treated as error. 1027 * be treated as error.
911 */ 1028 */
912 if (status != GELIC_NET_DESCR_FRAME_END) { 1029 if (status != GELIC_DESCR_DMA_FRAME_END) {
913 dev_dbg(ctodev(card), "RX descriptor with state %x\n", 1030 dev_dbg(ctodev(card), "RX descriptor with state %x\n",
914 status); 1031 status);
915 goto refill; 1032 goto refill;
916 } 1033 }
917 1034
918 /* ok, we've got a packet in descr */ 1035 /* ok, we've got a packet in descr */
919 gelic_net_pass_skb_up(descr, card); 1036 gelic_net_pass_skb_up(descr, card, netdev);
920refill: 1037refill:
921 /* 1038 /*
922 * So that always DMAC can see the end 1039 * So that always DMAC can see the end
@@ -926,21 +1043,21 @@ refill:
926 descr->next_descr_addr = 0; 1043 descr->next_descr_addr = 0;
927 1044
928 /* change the descriptor state: */ 1045 /* change the descriptor state: */
929 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 1046 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
930 1047
931 /* 1048 /*
932 * this call can fail, but for now, just leave this 1049 * this call can fail, but for now, just leave this
933 * decriptor without skb 1050 * decriptor without skb
934 */ 1051 */
935 gelic_net_prepare_rx_descr(card, descr); 1052 gelic_descr_prepare_rx(card, descr);
936 1053
937 chain->head = descr; 1054 chain->tail = descr;
938 chain->tail = descr->next; 1055 chain->head = descr->next;
939 1056
940 /* 1057 /*
941 * Set this descriptor the end of the chain. 1058 * Set this descriptor the end of the chain.
942 */ 1059 */
943 descr->prev->next_descr_addr = descr->bus_addr; 1060 descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
944 1061
945 /* 1062 /*
946 * If dmac chain was met, DMAC stopped. 1063 * If dmac chain was met, DMAC stopped.
@@ -956,29 +1073,27 @@ refill:
956 1073
957/** 1074/**
958 * gelic_net_poll - NAPI poll function called by the stack to return packets 1075 * gelic_net_poll - NAPI poll function called by the stack to return packets
959 * @netdev: interface device structure 1076 * @napi: napi structure
960 * @budget: number of packets we can pass to the stack at most 1077 * @budget: number of packets we can pass to the stack at most
961 * 1078 *
962 * returns 0 if no more packets available to the driver/stack. Returns 1, 1079 * returns the number of the processed packets
963 * if the quota is exceeded, but the driver has still packets.
964 * 1080 *
965 */ 1081 */
966static int gelic_net_poll(struct napi_struct *napi, int budget) 1082static int gelic_net_poll(struct napi_struct *napi, int budget)
967{ 1083{
968 struct gelic_net_card *card = container_of(napi, struct gelic_net_card, napi); 1084 struct gelic_card *card = container_of(napi, struct gelic_card, napi);
969 struct net_device *netdev = card->netdev;
970 int packets_done = 0; 1085 int packets_done = 0;
971 1086
972 while (packets_done < budget) { 1087 while (packets_done < budget) {
973 if (!gelic_net_decode_one_descr(card)) 1088 if (!gelic_card_decode_one_descr(card))
974 break; 1089 break;
975 1090
976 packets_done++; 1091 packets_done++;
977 } 1092 }
978 1093
979 if (packets_done < budget) { 1094 if (packets_done < budget) {
980 netif_rx_complete(netdev, napi); 1095 napi_complete(napi);
981 gelic_net_rx_irq_on(card); 1096 gelic_card_rx_irq_on(card);
982 } 1097 }
983 return packets_done; 1098 return packets_done;
984} 1099}
@@ -989,7 +1104,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
989 * 1104 *
990 * returns 0 on success, <0 on failure 1105 * returns 0 on success, <0 on failure
991 */ 1106 */
992static int gelic_net_change_mtu(struct net_device *netdev, int new_mtu) 1107int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
993{ 1108{
994 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k 1109 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
995 * and mtu is outbound only anyway */ 1110 * and mtu is outbound only anyway */
@@ -1002,13 +1117,12 @@ static int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
1002} 1117}
1003 1118
1004/** 1119/**
1005 * gelic_net_interrupt - event handler for gelic_net 1120 * gelic_card_interrupt - event handler for gelic_net
1006 */ 1121 */
1007static irqreturn_t gelic_net_interrupt(int irq, void *ptr) 1122static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
1008{ 1123{
1009 unsigned long flags; 1124 unsigned long flags;
1010 struct net_device *netdev = ptr; 1125 struct gelic_card *card = ptr;
1011 struct gelic_net_card *card = netdev_priv(netdev);
1012 u64 status; 1126 u64 status;
1013 1127
1014 status = card->irq_status; 1128 status = card->irq_status;
@@ -1016,24 +1130,37 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
1016 if (!status) 1130 if (!status)
1017 return IRQ_NONE; 1131 return IRQ_NONE;
1018 1132
1133 status &= card->irq_mask;
1134
1019 if (card->rx_dma_restart_required) { 1135 if (card->rx_dma_restart_required) {
1020 card->rx_dma_restart_required = 0; 1136 card->rx_dma_restart_required = 0;
1021 gelic_net_enable_rxdmac(card); 1137 gelic_card_enable_rxdmac(card);
1022 } 1138 }
1023 1139
1024 if (status & GELIC_NET_RXINT) { 1140 if (status & GELIC_CARD_RXINT) {
1025 gelic_net_rx_irq_off(card); 1141 gelic_card_rx_irq_off(card);
1026 netif_rx_schedule(netdev, &card->napi); 1142 napi_schedule(&card->napi);
1027 } 1143 }
1028 1144
1029 if (status & GELIC_NET_TXINT) { 1145 if (status & GELIC_CARD_TXINT) {
1030 spin_lock_irqsave(&card->tx_dma_lock, flags); 1146 spin_lock_irqsave(&card->tx_lock, flags);
1031 card->tx_dma_progress = 0; 1147 card->tx_dma_progress = 0;
1032 gelic_net_release_tx_chain(card, 0); 1148 gelic_card_release_tx_chain(card, 0);
1033 /* kick outstanding tx descriptor if any */ 1149 /* kick outstanding tx descriptor if any */
1034 gelic_net_kick_txdma(card, card->tx_chain.tail); 1150 gelic_card_kick_txdma(card, card->tx_chain.tail);
1035 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 1151 spin_unlock_irqrestore(&card->tx_lock, flags);
1036 } 1152 }
1153
1154 /* ether port status changed */
1155 if (status & GELIC_CARD_PORT_STATUS_CHANGED)
1156 gelic_card_get_ether_port_status(card, 1);
1157
1158#ifdef CONFIG_GELIC_WIRELESS
1159 if (status & (GELIC_CARD_WLAN_EVENT_RECEIVED |
1160 GELIC_CARD_WLAN_COMMAND_COMPLETED))
1161 gelic_wl_interrupt(card->netdev[GELIC_PORT_WIRELESS], status);
1162#endif
1163
1037 return IRQ_HANDLED; 1164 return IRQ_HANDLED;
1038} 1165}
1039 1166
@@ -1044,55 +1171,17 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
1044 * 1171 *
1045 * see Documentation/networking/netconsole.txt 1172 * see Documentation/networking/netconsole.txt
1046 */ 1173 */
1047static void gelic_net_poll_controller(struct net_device *netdev) 1174void gelic_net_poll_controller(struct net_device *netdev)
1048{ 1175{
1049 struct gelic_net_card *card = netdev_priv(netdev); 1176 struct gelic_card *card = netdev_card(netdev);
1050 1177
1051 gelic_net_set_irq_mask(card, 0); 1178 gelic_card_set_irq_mask(card, 0);
1052 gelic_net_interrupt(netdev->irq, netdev); 1179 gelic_card_interrupt(netdev->irq, netdev);
1053 gelic_net_set_irq_mask(card, card->ghiintmask); 1180 gelic_card_set_irq_mask(card, card->irq_mask);
1054} 1181}
1055#endif /* CONFIG_NET_POLL_CONTROLLER */ 1182#endif /* CONFIG_NET_POLL_CONTROLLER */
1056 1183
1057/** 1184/**
1058 * gelic_net_open_device - open device and map dma region
1059 * @card: card structure
1060 */
1061static int gelic_net_open_device(struct gelic_net_card *card)
1062{
1063 int result;
1064
1065 result = ps3_sb_event_receive_port_setup(card->dev, PS3_BINDING_CPU_ANY,
1066 &card->netdev->irq);
1067
1068 if (result) {
1069 dev_info(ctodev(card),
1070 "%s:%d: gelic_net_open_device failed (%d)\n",
1071 __func__, __LINE__, result);
1072 result = -EPERM;
1073 goto fail_alloc_irq;
1074 }
1075
1076 result = request_irq(card->netdev->irq, gelic_net_interrupt,
1077 IRQF_DISABLED, card->netdev->name, card->netdev);
1078
1079 if (result) {
1080 dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n",
1081 __func__, __LINE__, result);
1082 goto fail_request_irq;
1083 }
1084
1085 return 0;
1086
1087fail_request_irq:
1088 ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq);
1089 card->netdev->irq = NO_IRQ;
1090fail_alloc_irq:
1091 return result;
1092}
1093
1094
1095/**
1096 * gelic_net_open - called upon ifonfig up 1185 * gelic_net_open - called upon ifonfig up
1097 * @netdev: interface device structure 1186 * @netdev: interface device structure
1098 * 1187 *
@@ -1101,169 +1190,88 @@ fail_alloc_irq:
1101 * gelic_net_open allocates all the descriptors and memory needed for 1190 * gelic_net_open allocates all the descriptors and memory needed for
1102 * operation, sets up multicast list and enables interrupts 1191 * operation, sets up multicast list and enables interrupts
1103 */ 1192 */
1104static int gelic_net_open(struct net_device *netdev) 1193int gelic_net_open(struct net_device *netdev)
1105{ 1194{
1106 struct gelic_net_card *card = netdev_priv(netdev); 1195 struct gelic_card *card = netdev_card(netdev);
1107
1108 dev_dbg(ctodev(card), " -> %s:%d\n", __func__, __LINE__);
1109
1110 gelic_net_open_device(card);
1111
1112 if (gelic_net_init_chain(card, &card->tx_chain,
1113 card->descr, GELIC_NET_TX_DESCRIPTORS))
1114 goto alloc_tx_failed;
1115 if (gelic_net_init_chain(card, &card->rx_chain,
1116 card->descr + GELIC_NET_TX_DESCRIPTORS,
1117 GELIC_NET_RX_DESCRIPTORS))
1118 goto alloc_rx_failed;
1119
1120 /* head of chain */
1121 card->tx_top = card->tx_chain.head;
1122 card->rx_top = card->rx_chain.head;
1123 dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n",
1124 card->rx_top, card->tx_top, sizeof(struct gelic_net_descr),
1125 GELIC_NET_RX_DESCRIPTORS);
1126 /* allocate rx skbs */
1127 if (gelic_net_alloc_rx_skbs(card))
1128 goto alloc_skbs_failed;
1129 1196
1130 napi_enable(&card->napi); 1197 dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev);
1131
1132 card->tx_dma_progress = 0;
1133 card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT;
1134 1198
1135 gelic_net_set_irq_mask(card, card->ghiintmask); 1199 gelic_card_up(card);
1136 gelic_net_enable_rxdmac(card);
1137 1200
1138 netif_start_queue(netdev); 1201 netif_start_queue(netdev);
1139 netif_carrier_on(netdev); 1202 gelic_card_get_ether_port_status(card, 1);
1140 1203
1204 dev_dbg(ctodev(card), " <- %s\n", __func__);
1141 return 0; 1205 return 0;
1142
1143alloc_skbs_failed:
1144 gelic_net_free_chain(card, card->rx_top);
1145alloc_rx_failed:
1146 gelic_net_free_chain(card, card->tx_top);
1147alloc_tx_failed:
1148 return -ENOMEM;
1149} 1206}
1150 1207
1151static void gelic_net_get_drvinfo (struct net_device *netdev, 1208void gelic_net_get_drvinfo(struct net_device *netdev,
1152 struct ethtool_drvinfo *info) 1209 struct ethtool_drvinfo *info)
1153{ 1210{
1154 strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); 1211 strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
1155 strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1); 1212 strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
1156} 1213}
1157 1214
1158static int gelic_net_get_settings(struct net_device *netdev, 1215static int gelic_ether_get_settings(struct net_device *netdev,
1159 struct ethtool_cmd *cmd) 1216 struct ethtool_cmd *cmd)
1160{ 1217{
1161 struct gelic_net_card *card = netdev_priv(netdev); 1218 struct gelic_card *card = netdev_card(netdev);
1162 int status;
1163 u64 v1, v2;
1164 int speed, duplex;
1165 1219
1166 speed = duplex = -1; 1220 gelic_card_get_ether_port_status(card, 0);
1167 status = lv1_net_control(bus_id(card), dev_id(card),
1168 GELIC_NET_GET_ETH_PORT_STATUS, GELIC_NET_PORT, 0, 0,
1169 &v1, &v2);
1170 if (status) {
1171 /* link down */
1172 } else {
1173 if (v1 & GELIC_NET_FULL_DUPLEX) {
1174 duplex = DUPLEX_FULL;
1175 } else {
1176 duplex = DUPLEX_HALF;
1177 }
1178 1221
1179 if (v1 & GELIC_NET_SPEED_10 ) { 1222 if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
1180 speed = SPEED_10; 1223 cmd->duplex = DUPLEX_FULL;
1181 } else if (v1 & GELIC_NET_SPEED_100) { 1224 else
1182 speed = SPEED_100; 1225 cmd->duplex = DUPLEX_HALF;
1183 } else if (v1 & GELIC_NET_SPEED_1000) { 1226
1184 speed = SPEED_1000; 1227 switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
1185 } 1228 case GELIC_LV1_ETHER_SPEED_10:
1229 cmd->speed = SPEED_10;
1230 break;
1231 case GELIC_LV1_ETHER_SPEED_100:
1232 cmd->speed = SPEED_100;
1233 break;
1234 case GELIC_LV1_ETHER_SPEED_1000:
1235 cmd->speed = SPEED_1000;
1236 break;
1237 default:
1238 pr_info("%s: speed unknown\n", __func__);
1239 cmd->speed = SPEED_10;
1240 break;
1186 } 1241 }
1242
1187 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | 1243 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
1188 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1244 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1189 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 1245 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1190 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 1246 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
1191 cmd->advertising = cmd->supported; 1247 cmd->advertising = cmd->supported;
1192 cmd->speed = speed;
1193 cmd->duplex = duplex;
1194 cmd->autoneg = AUTONEG_ENABLE; /* always enabled */ 1248 cmd->autoneg = AUTONEG_ENABLE; /* always enabled */
1195 cmd->port = PORT_TP; 1249 cmd->port = PORT_TP;
1196 1250
1197 return 0; 1251 return 0;
1198} 1252}
1199 1253
1200static u32 gelic_net_get_link(struct net_device *netdev) 1254u32 gelic_net_get_rx_csum(struct net_device *netdev)
1201{ 1255{
1202 struct gelic_net_card *card = netdev_priv(netdev); 1256 struct gelic_card *card = netdev_card(netdev);
1203 int status;
1204 u64 v1, v2;
1205 int link;
1206
1207 status = lv1_net_control(bus_id(card), dev_id(card),
1208 GELIC_NET_GET_ETH_PORT_STATUS, GELIC_NET_PORT, 0, 0,
1209 &v1, &v2);
1210 if (status)
1211 return 0; /* link down */
1212
1213 if (v1 & GELIC_NET_LINK_UP)
1214 link = 1;
1215 else
1216 link = 0;
1217
1218 return link;
1219}
1220
1221static int gelic_net_nway_reset(struct net_device *netdev)
1222{
1223 if (netif_running(netdev)) {
1224 gelic_net_stop(netdev);
1225 gelic_net_open(netdev);
1226 }
1227 return 0;
1228}
1229
1230static u32 gelic_net_get_tx_csum(struct net_device *netdev)
1231{
1232 return (netdev->features & NETIF_F_IP_CSUM) != 0;
1233}
1234
1235static int gelic_net_set_tx_csum(struct net_device *netdev, u32 data)
1236{
1237 if (data)
1238 netdev->features |= NETIF_F_IP_CSUM;
1239 else
1240 netdev->features &= ~NETIF_F_IP_CSUM;
1241
1242 return 0;
1243}
1244
1245static u32 gelic_net_get_rx_csum(struct net_device *netdev)
1246{
1247 struct gelic_net_card *card = netdev_priv(netdev);
1248 1257
1249 return card->rx_csum; 1258 return card->rx_csum;
1250} 1259}
1251 1260
1252static int gelic_net_set_rx_csum(struct net_device *netdev, u32 data) 1261int gelic_net_set_rx_csum(struct net_device *netdev, u32 data)
1253{ 1262{
1254 struct gelic_net_card *card = netdev_priv(netdev); 1263 struct gelic_card *card = netdev_card(netdev);
1255 1264
1256 card->rx_csum = data; 1265 card->rx_csum = data;
1257 return 0; 1266 return 0;
1258} 1267}
1259 1268
1260static struct ethtool_ops gelic_net_ethtool_ops = { 1269static struct ethtool_ops gelic_ether_ethtool_ops = {
1261 .get_drvinfo = gelic_net_get_drvinfo, 1270 .get_drvinfo = gelic_net_get_drvinfo,
1262 .get_settings = gelic_net_get_settings, 1271 .get_settings = gelic_ether_get_settings,
1263 .get_link = gelic_net_get_link, 1272 .get_link = ethtool_op_get_link,
1264 .nway_reset = gelic_net_nway_reset, 1273 .get_tx_csum = ethtool_op_get_tx_csum,
1265 .get_tx_csum = gelic_net_get_tx_csum, 1274 .set_tx_csum = ethtool_op_set_tx_csum,
1266 .set_tx_csum = gelic_net_set_tx_csum,
1267 .get_rx_csum = gelic_net_get_rx_csum, 1275 .get_rx_csum = gelic_net_get_rx_csum,
1268 .set_rx_csum = gelic_net_set_rx_csum, 1276 .set_rx_csum = gelic_net_set_rx_csum,
1269}; 1277};
@@ -1277,9 +1285,9 @@ static struct ethtool_ops gelic_net_ethtool_ops = {
1277 */ 1285 */
1278static void gelic_net_tx_timeout_task(struct work_struct *work) 1286static void gelic_net_tx_timeout_task(struct work_struct *work)
1279{ 1287{
1280 struct gelic_net_card *card = 1288 struct gelic_card *card =
1281 container_of(work, struct gelic_net_card, tx_timeout_task); 1289 container_of(work, struct gelic_card, tx_timeout_task);
1282 struct net_device *netdev = card->netdev; 1290 struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET];
1283 1291
1284 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); 1292 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
1285 1293
@@ -1302,11 +1310,11 @@ out:
1302 * 1310 *
1303 * called, if tx hangs. Schedules a task that resets the interface 1311 * called, if tx hangs. Schedules a task that resets the interface
1304 */ 1312 */
1305static void gelic_net_tx_timeout(struct net_device *netdev) 1313void gelic_net_tx_timeout(struct net_device *netdev)
1306{ 1314{
1307 struct gelic_net_card *card; 1315 struct gelic_card *card;
1308 1316
1309 card = netdev_priv(netdev); 1317 card = netdev_card(netdev);
1310 atomic_inc(&card->tx_timeout_task_counter); 1318 atomic_inc(&card->tx_timeout_task_counter);
1311 if (netdev->flags & IFF_UP) 1319 if (netdev->flags & IFF_UP)
1312 schedule_work(&card->tx_timeout_task); 1320 schedule_work(&card->tx_timeout_task);
@@ -1315,12 +1323,13 @@ static void gelic_net_tx_timeout(struct net_device *netdev)
1315} 1323}
1316 1324
1317/** 1325/**
1318 * gelic_net_setup_netdev_ops - initialization of net_device operations 1326 * gelic_ether_setup_netdev_ops - initialization of net_device operations
1319 * @netdev: net_device structure 1327 * @netdev: net_device structure
1320 * 1328 *
1321 * fills out function pointers in the net_device structure 1329 * fills out function pointers in the net_device structure
1322 */ 1330 */
1323static void gelic_net_setup_netdev_ops(struct net_device *netdev) 1331static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1332 struct napi_struct *napi)
1324{ 1333{
1325 netdev->open = &gelic_net_open; 1334 netdev->open = &gelic_net_open;
1326 netdev->stop = &gelic_net_stop; 1335 netdev->stop = &gelic_net_stop;
@@ -1330,163 +1339,239 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev)
1330 /* tx watchdog */ 1339 /* tx watchdog */
1331 netdev->tx_timeout = &gelic_net_tx_timeout; 1340 netdev->tx_timeout = &gelic_net_tx_timeout;
1332 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1341 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1333 netdev->ethtool_ops = &gelic_net_ethtool_ops; 1342 /* NAPI */
1343 netif_napi_add(netdev, napi,
1344 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1345 netdev->ethtool_ops = &gelic_ether_ethtool_ops;
1346#ifdef CONFIG_NET_POLL_CONTROLLER
1347 netdev->poll_controller = gelic_net_poll_controller;
1348#endif
1334} 1349}
1335 1350
1336/** 1351/**
1337 * gelic_net_setup_netdev - initialization of net_device 1352 * gelic_ether_setup_netdev - initialization of net_device
1353 * @netdev: net_device structure
1338 * @card: card structure 1354 * @card: card structure
1339 * 1355 *
1340 * Returns 0 on success or <0 on failure 1356 * Returns 0 on success or <0 on failure
1341 * 1357 *
1342 * gelic_net_setup_netdev initializes the net_device structure 1358 * gelic_ether_setup_netdev initializes the net_device structure
1359 * and register it.
1343 **/ 1360 **/
1344static int gelic_net_setup_netdev(struct gelic_net_card *card) 1361int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
1345{ 1362{
1346 struct net_device *netdev = card->netdev;
1347 struct sockaddr addr;
1348 unsigned int i;
1349 int status; 1363 int status;
1350 u64 v1, v2; 1364 u64 v1, v2;
1351 DECLARE_MAC_BUF(mac); 1365 DECLARE_MAC_BUF(mac);
1352 1366
1353 SET_NETDEV_DEV(netdev, &card->dev->core);
1354 spin_lock_init(&card->tx_dma_lock);
1355
1356 card->rx_csum = GELIC_NET_RX_CSUM_DEFAULT;
1357
1358 gelic_net_setup_netdev_ops(netdev);
1359
1360 netif_napi_add(netdev, &card->napi,
1361 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1362
1363 netdev->features = NETIF_F_IP_CSUM; 1367 netdev->features = NETIF_F_IP_CSUM;
1364 1368
1365 status = lv1_net_control(bus_id(card), dev_id(card), 1369 status = lv1_net_control(bus_id(card), dev_id(card),
1366 GELIC_NET_GET_MAC_ADDRESS, 1370 GELIC_LV1_GET_MAC_ADDRESS,
1367 0, 0, 0, &v1, &v2); 1371 0, 0, 0, &v1, &v2);
1372 v1 <<= 16;
1368 if (status || !is_valid_ether_addr((u8 *)&v1)) { 1373 if (status || !is_valid_ether_addr((u8 *)&v1)) {
1369 dev_info(ctodev(card), 1374 dev_info(ctodev(card),
1370 "%s:lv1_net_control GET_MAC_ADDR failed %d\n", 1375 "%s:lv1_net_control GET_MAC_ADDR failed %d\n",
1371 __func__, status); 1376 __func__, status);
1372 return -EINVAL; 1377 return -EINVAL;
1373 } 1378 }
1374 v1 <<= 16; 1379 memcpy(netdev->dev_addr, &v1, ETH_ALEN);
1375 memcpy(addr.sa_data, &v1, ETH_ALEN);
1376 memcpy(netdev->dev_addr, addr.sa_data, ETH_ALEN);
1377 dev_info(ctodev(card), "MAC addr %s\n",
1378 print_mac(mac, netdev->dev_addr));
1379 1380
1380 card->vlan_index = -1; /* no vlan */ 1381 if (card->vlan_required) {
1381 for (i = 0; i < GELIC_NET_VLAN_MAX; i++) {
1382 status = lv1_net_control(bus_id(card), dev_id(card),
1383 GELIC_NET_GET_VLAN_ID,
1384 i + 1, /* index; one based */
1385 0, 0, &v1, &v2);
1386 if (status == GELIC_NET_VLAN_NO_ENTRY) {
1387 dev_dbg(ctodev(card),
1388 "GELIC_VLAN_ID no entry:%d, VLAN disabled\n",
1389 status);
1390 card->vlan_id[i] = 0;
1391 } else if (status) {
1392 dev_dbg(ctodev(card),
1393 "%s:GELIC_NET_VLAN_ID faild, status=%d\n",
1394 __func__, status);
1395 card->vlan_id[i] = 0;
1396 } else {
1397 card->vlan_id[i] = (u32)v1;
1398 dev_dbg(ctodev(card), "vlan_id:%d, %lx\n", i, v1);
1399 }
1400 }
1401
1402 if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) {
1403 card->vlan_index = GELIC_NET_VLAN_WIRED - 1;
1404 netdev->hard_header_len += VLAN_HLEN; 1382 netdev->hard_header_len += VLAN_HLEN;
1383 /*
1384 * As vlan is internally used,
1385 * we can not receive vlan packets
1386 */
1387 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1405 } 1388 }
1406 1389
1407 status = register_netdev(netdev); 1390 status = register_netdev(netdev);
1408 if (status) { 1391 if (status) {
1409 dev_err(ctodev(card), "%s:Couldn't register net_device: %d\n", 1392 dev_err(ctodev(card), "%s:Couldn't register %s %d\n",
1410 __func__, status); 1393 __func__, netdev->name, status);
1411 return status; 1394 return status;
1412 } 1395 }
1396 dev_info(ctodev(card), "%s: MAC addr %s\n",
1397 netdev->name,
1398 print_mac(mac, netdev->dev_addr));
1413 1399
1414 return 0; 1400 return 0;
1415} 1401}
1416 1402
1417/** 1403/**
1418 * gelic_net_alloc_card - allocates net_device and card structure 1404 * gelic_alloc_card_net - allocates net_device and card structure
1419 * 1405 *
1420 * returns the card structure or NULL in case of errors 1406 * returns the card structure or NULL in case of errors
1421 * 1407 *
1422 * the card and net_device structures are linked to each other 1408 * the card and net_device structures are linked to each other
1423 */ 1409 */
1424static struct gelic_net_card *gelic_net_alloc_card(void) 1410#define GELIC_ALIGN (32)
1411static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1425{ 1412{
1426 struct net_device *netdev; 1413 struct gelic_card *card;
1427 struct gelic_net_card *card; 1414 struct gelic_port *port;
1415 void *p;
1428 size_t alloc_size; 1416 size_t alloc_size;
1429
1430 alloc_size = sizeof (*card) +
1431 sizeof (struct gelic_net_descr) * GELIC_NET_RX_DESCRIPTORS +
1432 sizeof (struct gelic_net_descr) * GELIC_NET_TX_DESCRIPTORS;
1433 /* 1417 /*
1434 * we assume private data is allocated 32 bytes (or more) aligned 1418 * gelic requires dma descriptor is 32 bytes aligned and
1435 * so that gelic_net_descr should be 32 bytes aligned. 1419 * the hypervisor requires irq_status is 8 bytes aligned.
1436 * Current alloc_etherdev() does do it because NETDEV_ALIGN
1437 * is 32.
1438 * check this assumption here.
1439 */ 1420 */
1440 BUILD_BUG_ON(NETDEV_ALIGN < 32); 1421 BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8);
1441 BUILD_BUG_ON(offsetof(struct gelic_net_card, irq_status) % 8); 1422 BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32);
1442 BUILD_BUG_ON(offsetof(struct gelic_net_card, descr) % 32); 1423 alloc_size =
1424 sizeof(struct gelic_card) +
1425 sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS +
1426 sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS +
1427 GELIC_ALIGN - 1;
1428
1429 p = kzalloc(alloc_size, GFP_KERNEL);
1430 if (!p)
1431 return NULL;
1432 card = PTR_ALIGN(p, GELIC_ALIGN);
1433 card->unalign = p;
1443 1434
1444 netdev = alloc_etherdev(alloc_size); 1435 /*
1445 if (!netdev) 1436 * alloc netdev
1437 */
1438 *netdev = alloc_etherdev(sizeof(struct gelic_port));
1439 if (!netdev) {
1440 kfree(card->unalign);
1446 return NULL; 1441 return NULL;
1442 }
1443 port = netdev_priv(*netdev);
1444
1445 /* gelic_port */
1446 port->netdev = *netdev;
1447 port->card = card;
1448 port->type = GELIC_PORT_ETHERNET;
1449
1450 /* gelic_card */
1451 card->netdev[GELIC_PORT_ETHERNET] = *netdev;
1447 1452
1448 card = netdev_priv(netdev);
1449 card->netdev = netdev;
1450 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); 1453 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task);
1451 init_waitqueue_head(&card->waitq); 1454 init_waitqueue_head(&card->waitq);
1452 atomic_set(&card->tx_timeout_task_counter, 0); 1455 atomic_set(&card->tx_timeout_task_counter, 0);
1456 init_MUTEX(&card->updown_lock);
1457 atomic_set(&card->users, 0);
1453 1458
1454 return card; 1459 return card;
1455} 1460}
1456 1461
1462static void gelic_card_get_vlan_info(struct gelic_card *card)
1463{
1464 u64 v1, v2;
1465 int status;
1466 unsigned int i;
1467 struct {
1468 int tx;
1469 int rx;
1470 } vlan_id_ix[2] = {
1471 [GELIC_PORT_ETHERNET] = {
1472 .tx = GELIC_LV1_VLAN_TX_ETHERNET,
1473 .rx = GELIC_LV1_VLAN_RX_ETHERNET
1474 },
1475 [GELIC_PORT_WIRELESS] = {
1476 .tx = GELIC_LV1_VLAN_TX_WIRELESS,
1477 .rx = GELIC_LV1_VLAN_RX_WIRELESS
1478 }
1479 };
1480
1481 for (i = 0; i < ARRAY_SIZE(vlan_id_ix); i++) {
1482 /* tx tag */
1483 status = lv1_net_control(bus_id(card), dev_id(card),
1484 GELIC_LV1_GET_VLAN_ID,
1485 vlan_id_ix[i].tx,
1486 0, 0, &v1, &v2);
1487 if (status || !v1) {
1488 if (status != LV1_NO_ENTRY)
1489 dev_dbg(ctodev(card),
1490 "get vlan id for tx(%d) failed(%d)\n",
1491 vlan_id_ix[i].tx, status);
1492 card->vlan[i].tx = 0;
1493 card->vlan[i].rx = 0;
1494 continue;
1495 }
1496 card->vlan[i].tx = (u16)v1;
1497
1498 /* rx tag */
1499 status = lv1_net_control(bus_id(card), dev_id(card),
1500 GELIC_LV1_GET_VLAN_ID,
1501 vlan_id_ix[i].rx,
1502 0, 0, &v1, &v2);
1503 if (status || !v1) {
1504 if (status != LV1_NO_ENTRY)
1505 dev_info(ctodev(card),
1506 "get vlan id for rx(%d) failed(%d)\n",
1507 vlan_id_ix[i].rx, status);
1508 card->vlan[i].tx = 0;
1509 card->vlan[i].rx = 0;
1510 continue;
1511 }
1512 card->vlan[i].rx = (u16)v1;
1513
1514 dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n",
1515 i, card->vlan[i].tx, card->vlan[i].rx);
1516 }
1517
1518 if (card->vlan[GELIC_PORT_ETHERNET].tx) {
1519 BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx);
1520 card->vlan_required = 1;
1521 } else
1522 card->vlan_required = 0;
1523
1524 /* check wirelss capable firmware */
1525 if (ps3_compare_firmware_version(1, 6, 0) < 0) {
1526 card->vlan[GELIC_PORT_WIRELESS].tx = 0;
1527 card->vlan[GELIC_PORT_WIRELESS].rx = 0;
1528 }
1529
1530 dev_info(ctodev(card), "internal vlan %s\n",
1531 card->vlan_required? "enabled" : "disabled");
1532}
1457/** 1533/**
1458 * ps3_gelic_driver_probe - add a device to the control of this driver 1534 * ps3_gelic_driver_probe - add a device to the control of this driver
1459 */ 1535 */
1460static int ps3_gelic_driver_probe (struct ps3_system_bus_device *dev) 1536static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1461{ 1537{
1462 struct gelic_net_card *card = gelic_net_alloc_card(); 1538 struct gelic_card *card;
1539 struct net_device *netdev;
1463 int result; 1540 int result;
1464 1541
1465 if (!card) { 1542 pr_debug("%s: called\n", __func__);
1466 dev_info(&dev->core, "gelic_net_alloc_card failed\n");
1467 result = -ENOMEM;
1468 goto fail_alloc_card;
1469 }
1470
1471 ps3_system_bus_set_driver_data(dev, card);
1472 card->dev = dev;
1473
1474 result = ps3_open_hv_device(dev); 1543 result = ps3_open_hv_device(dev);
1475 1544
1476 if (result) { 1545 if (result) {
1477 dev_dbg(&dev->core, "ps3_open_hv_device failed\n"); 1546 dev_dbg(&dev->core, "%s:ps3_open_hv_device failed\n",
1547 __func__);
1478 goto fail_open; 1548 goto fail_open;
1479 } 1549 }
1480 1550
1481 result = ps3_dma_region_create(dev->d_region); 1551 result = ps3_dma_region_create(dev->d_region);
1482 1552
1483 if (result) { 1553 if (result) {
1484 dev_dbg(&dev->core, "ps3_dma_region_create failed(%d)\n", 1554 dev_dbg(&dev->core, "%s:ps3_dma_region_create failed(%d)\n",
1485 result); 1555 __func__, result);
1486 BUG_ON("check region type"); 1556 BUG_ON("check region type");
1487 goto fail_dma_region; 1557 goto fail_dma_region;
1488 } 1558 }
1489 1559
1560 /* alloc card/netdevice */
1561 card = gelic_alloc_card_net(&netdev);
1562 if (!card) {
1563 dev_info(&dev->core, "%s:gelic_net_alloc_card failed\n",
1564 __func__);
1565 result = -ENOMEM;
1566 goto fail_alloc_card;
1567 }
1568 ps3_system_bus_set_driver_data(dev, card);
1569 card->dev = dev;
1570
1571 /* get internal vlan info */
1572 gelic_card_get_vlan_info(card);
1573
1574 /* setup interrupt */
1490 result = lv1_net_set_interrupt_status_indicator(bus_id(card), 1575 result = lv1_net_set_interrupt_status_indicator(bus_id(card),
1491 dev_id(card), 1576 dev_id(card),
1492 ps3_mm_phys_to_lpar(__pa(&card->irq_status)), 1577 ps3_mm_phys_to_lpar(__pa(&card->irq_status)),
@@ -1494,34 +1579,101 @@ static int ps3_gelic_driver_probe (struct ps3_system_bus_device *dev)
1494 1579
1495 if (result) { 1580 if (result) {
1496 dev_dbg(&dev->core, 1581 dev_dbg(&dev->core,
1497 "lv1_net_set_interrupt_status_indicator failed: %s\n", 1582 "%s:set_interrupt_status_indicator failed: %s\n",
1498 ps3_result(result)); 1583 __func__, ps3_result(result));
1499 result = -EIO; 1584 result = -EIO;
1500 goto fail_status_indicator; 1585 goto fail_status_indicator;
1501 } 1586 }
1502 1587
1503 result = gelic_net_setup_netdev(card); 1588 result = ps3_sb_event_receive_port_setup(dev, PS3_BINDING_CPU_ANY,
1589 &card->irq);
1590
1591 if (result) {
1592 dev_info(ctodev(card),
1593 "%s:gelic_net_open_device failed (%d)\n",
1594 __func__, result);
1595 result = -EPERM;
1596 goto fail_alloc_irq;
1597 }
1598 result = request_irq(card->irq, gelic_card_interrupt,
1599 IRQF_DISABLED, netdev->name, card);
1600
1601 if (result) {
1602 dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
1603 __func__, result);
1604 goto fail_request_irq;
1605 }
1606
1607 /* setup card structure */
1608 card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT |
1609 GELIC_CARD_PORT_STATUS_CHANGED;
1610 card->rx_csum = GELIC_CARD_RX_CSUM_DEFAULT;
1504 1611
1612
1613 if (gelic_card_init_chain(card, &card->tx_chain,
1614 card->descr, GELIC_NET_TX_DESCRIPTORS))
1615 goto fail_alloc_tx;
1616 if (gelic_card_init_chain(card, &card->rx_chain,
1617 card->descr + GELIC_NET_TX_DESCRIPTORS,
1618 GELIC_NET_RX_DESCRIPTORS))
1619 goto fail_alloc_rx;
1620
1621 /* head of chain */
1622 card->tx_top = card->tx_chain.head;
1623 card->rx_top = card->rx_chain.head;
1624 dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n",
1625 card->rx_top, card->tx_top, sizeof(struct gelic_descr),
1626 GELIC_NET_RX_DESCRIPTORS);
1627 /* allocate rx skbs */
1628 if (gelic_card_alloc_rx_skbs(card))
1629 goto fail_alloc_skbs;
1630
1631 spin_lock_init(&card->tx_lock);
1632 card->tx_dma_progress = 0;
1633
1634 /* setup net_device structure */
1635 netdev->irq = card->irq;
1636 SET_NETDEV_DEV(netdev, &card->dev->core);
1637 gelic_ether_setup_netdev_ops(netdev, &card->napi);
1638 result = gelic_net_setup_netdev(netdev, card);
1505 if (result) { 1639 if (result) {
1506 dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: " 1640 dev_dbg(&dev->core, "%s: setup_netdev failed %d",
1507 "(%d)\n", __func__, __LINE__, result); 1641 __func__, result);
1508 goto fail_setup_netdev; 1642 goto fail_setup_netdev;
1509 } 1643 }
1510 1644
1645#ifdef CONFIG_GELIC_WIRELESS
1646 if (gelic_wl_driver_probe(card)) {
1647 dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
1648 goto fail_setup_netdev;
1649 }
1650#endif
1651 pr_debug("%s: done\n", __func__);
1511 return 0; 1652 return 0;
1512 1653
1513fail_setup_netdev: 1654fail_setup_netdev:
1655fail_alloc_skbs:
1656 gelic_card_free_chain(card, card->rx_chain.head);
1657fail_alloc_rx:
1658 gelic_card_free_chain(card, card->tx_chain.head);
1659fail_alloc_tx:
1660 free_irq(card->irq, card);
1661 netdev->irq = NO_IRQ;
1662fail_request_irq:
1663 ps3_sb_event_receive_port_destroy(dev, card->irq);
1664fail_alloc_irq:
1514 lv1_net_set_interrupt_status_indicator(bus_id(card), 1665 lv1_net_set_interrupt_status_indicator(bus_id(card),
1515 bus_id(card), 1666 bus_id(card),
1516 0 , 0); 1667 0, 0);
1517fail_status_indicator: 1668fail_status_indicator:
1669 ps3_system_bus_set_driver_data(dev, NULL);
1670 kfree(netdev_card(netdev)->unalign);
1671 free_netdev(netdev);
1672fail_alloc_card:
1518 ps3_dma_region_free(dev->d_region); 1673 ps3_dma_region_free(dev->d_region);
1519fail_dma_region: 1674fail_dma_region:
1520 ps3_close_hv_device(dev); 1675 ps3_close_hv_device(dev);
1521fail_open: 1676fail_open:
1522 ps3_system_bus_set_driver_data(dev, NULL);
1523 free_netdev(card->netdev);
1524fail_alloc_card:
1525 return result; 1677 return result;
1526} 1678}
1527 1679
@@ -1529,9 +1681,34 @@ fail_alloc_card:
1529 * ps3_gelic_driver_remove - remove a device from the control of this driver 1681 * ps3_gelic_driver_remove - remove a device from the control of this driver
1530 */ 1682 */
1531 1683
1532static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev) 1684static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
1533{ 1685{
1534 struct gelic_net_card *card = ps3_system_bus_get_driver_data(dev); 1686 struct gelic_card *card = ps3_system_bus_get_driver_data(dev);
1687 struct net_device *netdev0;
1688 pr_debug("%s: called\n", __func__);
1689
1690#ifdef CONFIG_GELIC_WIRELESS
1691 gelic_wl_driver_remove(card);
1692#endif
1693 /* stop interrupt */
1694 gelic_card_set_irq_mask(card, 0);
1695
1696 /* turn off DMA, force end */
1697 gelic_card_disable_rxdmac(card);
1698 gelic_card_disable_txdmac(card);
1699
1700 /* release chains */
1701 gelic_card_release_tx_chain(card, 1);
1702 gelic_card_release_rx_chain(card);
1703
1704 gelic_card_free_chain(card, card->tx_top);
1705 gelic_card_free_chain(card, card->rx_top);
1706
1707 netdev0 = card->netdev[GELIC_PORT_ETHERNET];
1708 /* disconnect event port */
1709 free_irq(card->irq, card);
1710 netdev0->irq = NO_IRQ;
1711 ps3_sb_event_receive_port_destroy(card->dev, card->irq);
1535 1712
1536 wait_event(card->waitq, 1713 wait_event(card->waitq,
1537 atomic_read(&card->tx_timeout_task_counter) == 0); 1714 atomic_read(&card->tx_timeout_task_counter) == 0);
@@ -1539,8 +1716,9 @@ static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev)
1539 lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card), 1716 lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card),
1540 0 , 0); 1717 0 , 0);
1541 1718
1542 unregister_netdev(card->netdev); 1719 unregister_netdev(netdev0);
1543 free_netdev(card->netdev); 1720 kfree(netdev_card(netdev0)->unalign);
1721 free_netdev(netdev0);
1544 1722
1545 ps3_system_bus_set_driver_data(dev, NULL); 1723 ps3_system_bus_set_driver_data(dev, NULL);
1546 1724
@@ -1548,6 +1726,7 @@ static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev)
1548 1726
1549 ps3_close_hv_device(dev); 1727 ps3_close_hv_device(dev);
1550 1728
1729 pr_debug("%s: done\n", __func__);
1551 return 0; 1730 return 0;
1552} 1731}
1553 1732
@@ -1572,8 +1751,8 @@ static void __exit ps3_gelic_driver_exit (void)
1572 ps3_system_bus_driver_unregister(&ps3_gelic_driver); 1751 ps3_system_bus_driver_unregister(&ps3_gelic_driver);
1573} 1752}
1574 1753
1575module_init (ps3_gelic_driver_init); 1754module_init(ps3_gelic_driver_init);
1576module_exit (ps3_gelic_driver_exit); 1755module_exit(ps3_gelic_driver_exit);
1577 1756
1578MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC); 1757MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC);
1579 1758
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 968560269a3b..1d39d06797e4 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -35,198 +35,323 @@
35#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN 35#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
36#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN 36#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
37#define GELIC_NET_RXBUF_ALIGN 128 37#define GELIC_NET_RXBUF_ALIGN 128
38#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */ 38#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
40#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) 40#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
41#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL 41#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
42#define GELIC_NET_VLAN_POS (VLAN_ETH_ALEN * 2) 42
43#define GELIC_NET_VLAN_MAX 4
44#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ 43#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
45 44
46enum gelic_net_int0_status { 45/* virtual interrupt status register bits */
47 GELIC_NET_GDTDCEINT = 24, 46 /* INT1 */
48 GELIC_NET_GRFANMINT = 28, 47#define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L
49}; 48#define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L
49#define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L
50#define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L
51#define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L
52#define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L
53#define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L
54#define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L
55#define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L
56#define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L
57#define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L
58#define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L
59#define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L
60#define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L
61#define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L
62 /* INT 0 */
63#define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L
64#define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L
65#define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L
66#define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L
67#define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L
68#define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L
69#define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L
70
71/* initial interrupt mask */
72#define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END
50 73
51/* GHIINT1STS bits */ 74#define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \
52enum gelic_net_int1_status { 75 GELIC_CARD_NUMBER_OF_RX_FRAME)
53 GELIC_NET_GDADCEINT = 14, 76
77 /* RX descriptor data_status bits */
78enum gelic_descr_rx_status {
79 GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */
80 GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */
81 GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */
82 GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */
83 GELIC_DESCR_RXWTPKT = 0x00C00000, /*
84 * wakeup trigger packet
85 * 01: Magic Packet (TM)
86 * 10: ARP packet
87 * 11: Multicast MAC addr
88 */
89 GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */
90 /* bit 20..16 reserved */
91 GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */
92 /* bit 7..0 reserved */
54}; 93};
55 94
56/* interrupt mask */ 95#define GELIC_DESCR_DATA_STATUS_CHK_MASK \
57#define GELIC_NET_TXINT (1L << (GELIC_NET_GDTDCEINT + 32)) 96 (GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK)
58 97
59#define GELIC_NET_RXINT0 (1L << (GELIC_NET_GRFANMINT + 32)) 98 /* TX descriptor data_status bits */
60#define GELIC_NET_RXINT1 (1L << GELIC_NET_GDADCEINT) 99enum gelic_descr_tx_status {
61#define GELIC_NET_RXINT (GELIC_NET_RXINT0 | GELIC_NET_RXINT1) 100 GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this
101 * descriptor was end of
102 * a tx frame
103 */
104};
62 105
63 /* RX descriptor data_status bits */ 106/* RX descriptor data error bits */
64#define GELIC_NET_RXDMADU 0x80000000 /* destination MAC addr unknown */ 107enum gelic_descr_rx_error {
65#define GELIC_NET_RXLSTFBF 0x40000000 /* last frame buffer */ 108 /* bit 31 reserved */
66#define GELIC_NET_RXIPCHK 0x20000000 /* IP checksum performed */ 109 GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */
67#define GELIC_NET_RXTCPCHK 0x10000000 /* TCP/UDP checksup performed */ 110 GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */
68#define GELIC_NET_RXIPSPKT 0x08000000 /* IPsec packet */ 111 GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */
69#define GELIC_NET_RXIPSAHPRT 0x04000000 /* IPsec AH protocol performed */ 112 GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */
70#define GELIC_NET_RXIPSESPPRT 0x02000000 /* IPsec ESP protocol performed */ 113 GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */
71#define GELIC_NET_RXSESPAH 0x01000000 /* 114 GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */
72 * IPsec ESP protocol auth 115 GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */
73 * performed 116 /* bit 18 reserved */
74 */ 117 GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */
75 118 GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length
76#define GELIC_NET_RXWTPKT 0x00C00000 /* 119 * error */
77 * wakeup trigger packet 120 GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extention error */
78 * 01: Magic Packet (TM) 121 GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */
79 * 10: ARP packet 122 /* bit 13..0 reserved */
80 * 11: Multicast MAC addr 123};
81 */ 124#define GELIC_DESCR_DATA_ERROR_CHK_MASK \
82#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */ 125 (GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR)
83/* bit 20..16 reserved */
84#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */
85#define GELIC_NET_RXRRECNUM_SHIFT 8
86/* bit 7..0 reserved */
87
88#define GELIC_NET_TXDESC_TAIL 0
89#define GELIC_NET_DATA_STATUS_CHK_MASK (GELIC_NET_RXIPCHK | GELIC_NET_RXTCPCHK)
90
91/* RX descriptor data_error bits */
92/* bit 31 reserved */
93#define GELIC_NET_RXALNERR 0x40000000 /* alignement error 10/100M */
94#define GELIC_NET_RXOVERERR 0x20000000 /* oversize error */
95#define GELIC_NET_RXRNTERR 0x10000000 /* Runt error */
96#define GELIC_NET_RXIPCHKERR 0x08000000 /* IP checksum error */
97#define GELIC_NET_RXTCPCHKERR 0x04000000 /* TCP/UDP checksum error */
98#define GELIC_NET_RXUMCHSP 0x02000000 /* unmatched sp on sp */
99#define GELIC_NET_RXUMCHSPI 0x01000000 /* unmatched SPI on SAD */
100#define GELIC_NET_RXUMCHSAD 0x00800000 /* unmatched SAD */
101#define GELIC_NET_RXIPSAHERR 0x00400000 /* auth error on AH protocol
102 * processing */
103#define GELIC_NET_RXIPSESPAHERR 0x00200000 /* auth error on ESP protocol
104 * processing */
105#define GELIC_NET_RXDRPPKT 0x00100000 /* drop packet */
106#define GELIC_NET_RXIPFMTERR 0x00080000 /* IP packet format error */
107/* bit 18 reserved */
108#define GELIC_NET_RXDATAERR 0x00020000 /* IP packet format error */
109#define GELIC_NET_RXCALERR 0x00010000 /* cariier extension length
110 * error */
111#define GELIC_NET_RXCREXERR 0x00008000 /* carrier extention error */
112#define GELIC_NET_RXMLTCST 0x00004000 /* multicast address frame */
113/* bit 13..0 reserved */
114#define GELIC_NET_DATA_ERROR_CHK_MASK \
115 (GELIC_NET_RXIPCHKERR | GELIC_NET_RXTCPCHKERR)
116 126
127/* DMA command and status (RX and TX)*/
128enum gelic_descr_dma_status {
129 GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */
130 GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */
131 GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */
132 GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */
133 GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */
134 GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */
135 GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */
136 GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */
137};
138
139#define GELIC_DESCR_DMA_STAT_MASK (0xf0000000)
117 140
118/* tx descriptor command and status */ 141/* tx descriptor command and status */
119#define GELIC_NET_DMAC_CMDSTAT_NOCS 0xa0080000 /* middle of frame */ 142enum gelic_descr_tx_dma_status {
120#define GELIC_NET_DMAC_CMDSTAT_TCPCS 0xa00a0000 143 /* [19] */
121#define GELIC_NET_DMAC_CMDSTAT_UDPCS 0xa00b0000 144 GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */
122#define GELIC_NET_DMAC_CMDSTAT_END_FRAME 0x00040000 /* end of frame */ 145 /* [18] */
123 146 GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of
124#define GELIC_NET_DMAC_CMDSTAT_RXDCEIS 0x00000002 /* descriptor chain end 147 * the packet
125 * interrupt status */ 148 */
126 149 /* [17..16] */
127#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */ 150 GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */
128#define GELIC_NET_DESCR_IND_PROC_SHIFT 28 151 GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */
129#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff 152 GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */
130 153
131 154 /* [1] */
132enum gelic_net_descr_status { 155 GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
133 GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */ 156 * due to chain end
134 GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */ 157 */
135 GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
136 GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
137 GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */
138 GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
139 GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
140 GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */
141}; 158};
159
160#define GELIC_DESCR_DMA_CMD_NO_CHKSUM \
161 (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
162 GELIC_DESCR_TX_DMA_NO_CHKSUM)
163
164#define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \
165 (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
166 GELIC_DESCR_TX_DMA_TCP_CHKSUM)
167
168#define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \
169 (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
170 GELIC_DESCR_TX_DMA_UDP_CHKSUM)
171
172enum gelic_descr_rx_dma_status {
173 /* [ 1 ] */
174 GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
175 * due to chain end
176 */
177};
178
142/* for lv1_net_control */ 179/* for lv1_net_control */
143#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001 180enum gelic_lv1_net_control_code {
144#define GELIC_NET_GET_ETH_PORT_STATUS 0x0000000000000002 181 GELIC_LV1_GET_MAC_ADDRESS = 1,
145#define GELIC_NET_SET_NEGOTIATION_MODE 0x0000000000000003 182 GELIC_LV1_GET_ETH_PORT_STATUS = 2,
146#define GELIC_NET_GET_VLAN_ID 0x0000000000000004 183 GELIC_LV1_SET_NEGOTIATION_MODE = 3,
147 184 GELIC_LV1_GET_VLAN_ID = 4,
148#define GELIC_NET_LINK_UP 0x0000000000000001 185 GELIC_LV1_GET_CHANNEL = 6,
149#define GELIC_NET_FULL_DUPLEX 0x0000000000000002 186 GELIC_LV1_POST_WLAN_CMD = 9,
150#define GELIC_NET_AUTO_NEG 0x0000000000000004 187 GELIC_LV1_GET_WLAN_CMD_RESULT = 10,
151#define GELIC_NET_SPEED_10 0x0000000000000010 188 GELIC_LV1_GET_WLAN_EVENT = 11
152#define GELIC_NET_SPEED_100 0x0000000000000020 189};
153#define GELIC_NET_SPEED_1000 0x0000000000000040 190
154 191/* status returened from GET_ETH_PORT_STATUS */
155#define GELIC_NET_VLAN_ALL 0x0000000000000001 192enum gelic_lv1_ether_port_status {
156#define GELIC_NET_VLAN_WIRED 0x0000000000000002 193 GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L,
157#define GELIC_NET_VLAN_WIRELESS 0x0000000000000003 194 GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L,
158#define GELIC_NET_VLAN_PSP 0x0000000000000004 195 GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L,
159#define GELIC_NET_VLAN_PORT0 0x0000000000000010 196
160#define GELIC_NET_VLAN_PORT1 0x0000000000000011 197 GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L,
161#define GELIC_NET_VLAN_PORT2 0x0000000000000012 198 GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L,
162#define GELIC_NET_VLAN_DAEMON_CLIENT_BSS 0x0000000000000013 199 GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L,
163#define GELIC_NET_VLAN_LIBERO_CLIENT_BSS 0x0000000000000014 200 GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L
164#define GELIC_NET_VLAN_NO_ENTRY -6 201};
165 202
166#define GELIC_NET_PORT 2 /* for port status */ 203enum gelic_lv1_vlan_index {
204 /* for outgoing packets */
205 GELIC_LV1_VLAN_TX_ETHERNET = 0x0000000000000002L,
206 GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L,
207 /* for incoming packets */
208 GELIC_LV1_VLAN_RX_ETHERNET = 0x0000000000000012L,
209 GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L
210};
167 211
168/* size of hardware part of gelic descriptor */ 212/* size of hardware part of gelic descriptor */
169#define GELIC_NET_DESCR_SIZE (32) 213#define GELIC_DESCR_SIZE (32)
170struct gelic_net_descr { 214
215enum gelic_port_type {
216 GELIC_PORT_ETHERNET = 0,
217 GELIC_PORT_WIRELESS = 1,
218 GELIC_PORT_MAX
219};
220
221struct gelic_descr {
171 /* as defined by the hardware */ 222 /* as defined by the hardware */
172 u32 buf_addr; 223 __be32 buf_addr;
173 u32 buf_size; 224 __be32 buf_size;
174 u32 next_descr_addr; 225 __be32 next_descr_addr;
175 u32 dmac_cmd_status; 226 __be32 dmac_cmd_status;
176 u32 result_size; 227 __be32 result_size;
177 u32 valid_size; /* all zeroes for tx */ 228 __be32 valid_size; /* all zeroes for tx */
178 u32 data_status; 229 __be32 data_status;
179 u32 data_error; /* all zeroes for tx */ 230 __be32 data_error; /* all zeroes for tx */
180 231
181 /* used in the driver */ 232 /* used in the driver */
182 struct sk_buff *skb; 233 struct sk_buff *skb;
183 dma_addr_t bus_addr; 234 dma_addr_t bus_addr;
184 struct gelic_net_descr *next; 235 struct gelic_descr *next;
185 struct gelic_net_descr *prev; 236 struct gelic_descr *prev;
186 struct vlan_ethhdr vlan;
187} __attribute__((aligned(32))); 237} __attribute__((aligned(32)));
188 238
189struct gelic_net_descr_chain { 239struct gelic_descr_chain {
190 /* we walk from tail to head */ 240 /* we walk from tail to head */
191 struct gelic_net_descr *head; 241 struct gelic_descr *head;
192 struct gelic_net_descr *tail; 242 struct gelic_descr *tail;
193}; 243};
194 244
195struct gelic_net_card { 245struct gelic_vlan_id {
196 struct net_device *netdev; 246 u16 tx;
247 u16 rx;
248};
249
250struct gelic_card {
197 struct napi_struct napi; 251 struct napi_struct napi;
252 struct net_device *netdev[GELIC_PORT_MAX];
198 /* 253 /*
199 * hypervisor requires irq_status should be 254 * hypervisor requires irq_status should be
200 * 8 bytes aligned, but u64 member is 255 * 8 bytes aligned, but u64 member is
201 * always disposed in that manner 256 * always disposed in that manner
202 */ 257 */
203 u64 irq_status; 258 u64 irq_status;
204 u64 ghiintmask; 259 u64 irq_mask;
205 260
206 struct ps3_system_bus_device *dev; 261 struct ps3_system_bus_device *dev;
207 u32 vlan_id[GELIC_NET_VLAN_MAX]; 262 struct gelic_vlan_id vlan[GELIC_PORT_MAX];
208 int vlan_index; 263 int vlan_required;
209 264
210 struct gelic_net_descr_chain tx_chain; 265 struct gelic_descr_chain tx_chain;
211 struct gelic_net_descr_chain rx_chain; 266 struct gelic_descr_chain rx_chain;
212 int rx_dma_restart_required; 267 int rx_dma_restart_required;
213 /* gurad dmac descriptor chain*/
214 spinlock_t chain_lock;
215
216 int rx_csum; 268 int rx_csum;
217 /* guard tx_dma_progress */ 269 /*
218 spinlock_t tx_dma_lock; 270 * tx_lock guards tx descriptor list and
271 * tx_dma_progress.
272 */
273 spinlock_t tx_lock;
219 int tx_dma_progress; 274 int tx_dma_progress;
220 275
221 struct work_struct tx_timeout_task; 276 struct work_struct tx_timeout_task;
222 atomic_t tx_timeout_task_counter; 277 atomic_t tx_timeout_task_counter;
223 wait_queue_head_t waitq; 278 wait_queue_head_t waitq;
224 279
225 struct gelic_net_descr *tx_top, *rx_top; 280 /* only first user should up the card */
226 struct gelic_net_descr descr[0]; 281 struct semaphore updown_lock;
282 atomic_t users;
283
284 u64 ether_port_status;
285 /* original address returned by kzalloc */
286 void *unalign;
287
288 /*
289 * each netdevice has copy of irq
290 */
291 unsigned int irq;
292 struct gelic_descr *tx_top, *rx_top;
293 struct gelic_descr descr[0]; /* must be the last */
294};
295
296struct gelic_port {
297 struct gelic_card *card;
298 struct net_device *netdev;
299 enum gelic_port_type type;
300 long priv[0]; /* long for alignment */
227}; 301};
228 302
303static inline struct gelic_card *port_to_card(struct gelic_port *p)
304{
305 return p->card;
306}
307static inline struct net_device *port_to_netdev(struct gelic_port *p)
308{
309 return p->netdev;
310}
311static inline struct gelic_card *netdev_card(struct net_device *d)
312{
313 return ((struct gelic_port *)netdev_priv(d))->card;
314}
315static inline struct gelic_port *netdev_port(struct net_device *d)
316{
317 return (struct gelic_port *)netdev_priv(d);
318}
319static inline struct device *ctodev(struct gelic_card *card)
320{
321 return &card->dev->core;
322}
323static inline u64 bus_id(struct gelic_card *card)
324{
325 return card->dev->bus_id;
326}
327static inline u64 dev_id(struct gelic_card *card)
328{
329 return card->dev->dev_id;
330}
331
332static inline void *port_priv(struct gelic_port *port)
333{
334 return port->priv;
335}
336
337extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
338/* shared netdev ops */
339extern void gelic_card_up(struct gelic_card *card);
340extern void gelic_card_down(struct gelic_card *card);
341extern int gelic_net_open(struct net_device *netdev);
342extern int gelic_net_stop(struct net_device *netdev);
343extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
344extern void gelic_net_set_multi(struct net_device *netdev);
345extern void gelic_net_tx_timeout(struct net_device *netdev);
346extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
347extern int gelic_net_setup_netdev(struct net_device *netdev,
348 struct gelic_card *card);
229 349
230extern unsigned long p_to_lp(long pa); 350/* shared ethtool ops */
351extern void gelic_net_get_drvinfo(struct net_device *netdev,
352 struct ethtool_drvinfo *info);
353extern u32 gelic_net_get_rx_csum(struct net_device *netdev);
354extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data);
355extern void gelic_net_poll_controller(struct net_device *netdev);
231 356
232#endif /* _GELIC_NET_H */ 357#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
new file mode 100644
index 000000000000..750d2a99cb4f
--- /dev/null
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -0,0 +1,2753 @@
1/*
2 * PS3 gelic network driver.
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#undef DEBUG
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/wireless.h>
33#include <linux/ctype.h>
34#include <linux/string.h>
35#include <net/iw_handler.h>
36#include <net/ieee80211.h>
37
38#include <linux/dma-mapping.h>
39#include <net/checksum.h>
40#include <asm/firmware.h>
41#include <asm/ps3.h>
42#include <asm/lv1call.h>
43
44#include "ps3_gelic_net.h"
45#include "ps3_gelic_wireless.h"
46
47
48static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan);
49static int gelic_wl_try_associate(struct net_device *netdev);
50
51/*
52 * tables
53 */
54
55/* 802.11b/g channel to freq in MHz */
56static const int channel_freq[] = {
57 2412, 2417, 2422, 2427, 2432,
58 2437, 2442, 2447, 2452, 2457,
59 2462, 2467, 2472, 2484
60};
61#define NUM_CHANNELS ARRAY_SIZE(channel_freq)
62
63/* in bps */
64static const int bitrate_list[] = {
65 1000000,
66 2000000,
67 5500000,
68 11000000,
69 6000000,
70 9000000,
71 12000000,
72 18000000,
73 24000000,
74 36000000,
75 48000000,
76 54000000
77};
78#define NUM_BITRATES ARRAY_SIZE(bitrate_list)
79
80/*
81 * wpa2 support requires the hypervisor version 2.0 or later
82 */
83static inline int wpa2_capable(void)
84{
85 return (0 <= ps3_compare_firmware_version(2, 0, 0));
86}
87
88static inline int precise_ie(void)
89{
90 return 0; /* FIXME */
91}
92/*
93 * post_eurus_cmd helpers
94 */
95struct eurus_cmd_arg_info {
96 int pre_arg; /* command requres arg1, arg2 at POST COMMAND */
97 int post_arg; /* command requires arg1, arg2 at GET_RESULT */
98};
99
100static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = {
101 [GELIC_EURUS_CMD_SET_COMMON_CFG] = { .pre_arg = 1},
102 [GELIC_EURUS_CMD_SET_WEP_CFG] = { .pre_arg = 1},
103 [GELIC_EURUS_CMD_SET_WPA_CFG] = { .pre_arg = 1},
104 [GELIC_EURUS_CMD_GET_COMMON_CFG] = { .post_arg = 1},
105 [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1},
106 [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1},
107 [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1},
108 [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1},
109};
110
111#ifdef DEBUG
112static const char *cmdstr(enum gelic_eurus_command ix)
113{
114 switch (ix) {
115 case GELIC_EURUS_CMD_ASSOC:
116 return "ASSOC";
117 case GELIC_EURUS_CMD_DISASSOC:
118 return "DISASSOC";
119 case GELIC_EURUS_CMD_START_SCAN:
120 return "SCAN";
121 case GELIC_EURUS_CMD_GET_SCAN:
122 return "GET SCAN";
123 case GELIC_EURUS_CMD_SET_COMMON_CFG:
124 return "SET_COMMON_CFG";
125 case GELIC_EURUS_CMD_GET_COMMON_CFG:
126 return "GET_COMMON_CFG";
127 case GELIC_EURUS_CMD_SET_WEP_CFG:
128 return "SET_WEP_CFG";
129 case GELIC_EURUS_CMD_GET_WEP_CFG:
130 return "GET_WEP_CFG";
131 case GELIC_EURUS_CMD_SET_WPA_CFG:
132 return "SET_WPA_CFG";
133 case GELIC_EURUS_CMD_GET_WPA_CFG:
134 return "GET_WPA_CFG";
135 case GELIC_EURUS_CMD_GET_RSSI_CFG:
136 return "GET_RSSI";
137 default:
138 break;
139 }
140 return "";
141};
142#else
143static inline const char *cmdstr(enum gelic_eurus_command ix)
144{
145 return "";
146}
147#endif
148
149/* synchronously do eurus commands */
150static void gelic_eurus_sync_cmd_worker(struct work_struct *work)
151{
152 struct gelic_eurus_cmd *cmd;
153 struct gelic_card *card;
154 struct gelic_wl_info *wl;
155
156 u64 arg1, arg2;
157
158 pr_debug("%s: <-\n", __func__);
159 cmd = container_of(work, struct gelic_eurus_cmd, work);
160 BUG_ON(cmd_info[cmd->cmd].pre_arg &&
161 cmd_info[cmd->cmd].post_arg);
162 wl = cmd->wl;
163 card = port_to_card(wl_port(wl));
164
165 if (cmd_info[cmd->cmd].pre_arg) {
166 arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer));
167 arg2 = cmd->buf_size;
168 } else {
169 arg1 = 0;
170 arg2 = 0;
171 }
172 init_completion(&wl->cmd_done_intr);
173 pr_debug("%s: cmd='%s' start\n", __func__, cmdstr(cmd->cmd));
174 cmd->status = lv1_net_control(bus_id(card), dev_id(card),
175 GELIC_LV1_POST_WLAN_CMD,
176 cmd->cmd, arg1, arg2,
177 &cmd->tag, &cmd->size);
178 if (cmd->status) {
179 complete(&cmd->done);
180 pr_info("%s: cmd issue failed\n", __func__);
181 return;
182 }
183
184 wait_for_completion(&wl->cmd_done_intr);
185
186 if (cmd_info[cmd->cmd].post_arg) {
187 arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer));
188 arg2 = cmd->buf_size;
189 } else {
190 arg1 = 0;
191 arg2 = 0;
192 }
193
194 cmd->status = lv1_net_control(bus_id(card), dev_id(card),
195 GELIC_LV1_GET_WLAN_CMD_RESULT,
196 cmd->tag, arg1, arg2,
197 &cmd->cmd_status, &cmd->size);
198#ifdef DEBUG
199 if (cmd->status || cmd->cmd_status) {
200 pr_debug("%s: cmd done tag=%#lx arg1=%#lx, arg2=%#lx\n", __func__,
201 cmd->tag, arg1, arg2);
202 pr_debug("%s: cmd done status=%#x cmd_status=%#lx size=%#lx\n",
203 __func__, cmd->status, cmd->cmd_status, cmd->size);
204 }
205#endif
206 complete(&cmd->done);
207 pr_debug("%s: cmd='%s' done\n", __func__, cmdstr(cmd->cmd));
208}
209
210static struct gelic_eurus_cmd *gelic_eurus_sync_cmd(struct gelic_wl_info *wl,
211 unsigned int eurus_cmd,
212 void *buffer,
213 unsigned int buf_size)
214{
215 struct gelic_eurus_cmd *cmd;
216
217 /* allocate cmd */
218 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
219 if (!cmd)
220 return NULL;
221
222 /* initialize members */
223 cmd->cmd = eurus_cmd;
224 cmd->buffer = buffer;
225 cmd->buf_size = buf_size;
226 cmd->wl = wl;
227 INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker);
228 init_completion(&cmd->done);
229 queue_work(wl->eurus_cmd_queue, &cmd->work);
230
231 /* wait for command completion */
232 wait_for_completion(&cmd->done);
233
234 return cmd;
235}
236
237static u32 gelic_wl_get_link(struct net_device *netdev)
238{
239 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
240 u32 ret;
241
242 pr_debug("%s: <-\n", __func__);
243 down(&wl->assoc_stat_lock);
244 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
245 ret = 1;
246 else
247 ret = 0;
248 up(&wl->assoc_stat_lock);
249 pr_debug("%s: ->\n", __func__);
250 return ret;
251}
252
253static void gelic_wl_send_iwap_event(struct gelic_wl_info *wl, u8 *bssid)
254{
255 union iwreq_data data;
256
257 memset(&data, 0, sizeof(data));
258 if (bssid)
259 memcpy(data.ap_addr.sa_data, bssid, ETH_ALEN);
260 data.ap_addr.sa_family = ARPHRD_ETHER;
261 wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWAP,
262 &data, NULL);
263}
264
265/*
266 * wireless extension handlers and helpers
267 */
268
269/* SIOGIWNAME */
270static int gelic_wl_get_name(struct net_device *dev,
271 struct iw_request_info *info,
272 union iwreq_data *iwreq, char *extra)
273{
274 strcpy(iwreq->name, "IEEE 802.11bg");
275 return 0;
276}
277
278static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
279{
280 struct gelic_card *card = port_to_card(wl_port(wl));
281 u64 ch_info_raw, tmp;
282 int status;
283
284 if (!test_and_set_bit(GELIC_WL_STAT_CH_INFO, &wl->stat)) {
285 status = lv1_net_control(bus_id(card), dev_id(card),
286 GELIC_LV1_GET_CHANNEL, 0, 0, 0,
287 &ch_info_raw,
288 &tmp);
289 /* some fw versions may return error */
290 if (status) {
291 if (status != LV1_NO_ENTRY)
292 pr_info("%s: available ch unknown\n", __func__);
293 wl->ch_info = 0x07ff;/* 11 ch */
294 } else
295 /* 16 bits of MSB has available channels */
296 wl->ch_info = ch_info_raw >> 48;
297 }
298 return;
299}
300
301/* SIOGIWRANGE */
302static int gelic_wl_get_range(struct net_device *netdev,
303 struct iw_request_info *info,
304 union iwreq_data *iwreq, char *extra)
305{
306 struct iw_point *point = &iwreq->data;
307 struct iw_range *range = (struct iw_range *)extra;
308 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
309 unsigned int i, chs;
310
311 pr_debug("%s: <-\n", __func__);
312 point->length = sizeof(struct iw_range);
313 memset(range, 0, sizeof(struct iw_range));
314
315 range->we_version_compiled = WIRELESS_EXT;
316 range->we_version_source = 22;
317
318 /* available channels and frequencies */
319 gelic_wl_get_ch_info(wl);
320
321 for (i = 0, chs = 0;
322 i < NUM_CHANNELS && chs < IW_MAX_FREQUENCIES; i++)
323 if (wl->ch_info & (1 << i)) {
324 range->freq[chs].i = i + 1;
325 range->freq[chs].m = channel_freq[i];
326 range->freq[chs].e = 6;
327 chs++;
328 }
329 range->num_frequency = chs;
330 range->old_num_frequency = chs;
331 range->num_channels = chs;
332 range->old_num_channels = chs;
333
334 /* bitrates */
335 for (i = 0; i < NUM_BITRATES; i++)
336 range->bitrate[i] = bitrate_list[i];
337 range->num_bitrates = i;
338
339 /* signal levels */
340 range->max_qual.qual = 100; /* relative value */
341 range->max_qual.level = 100;
342 range->avg_qual.qual = 50;
343 range->avg_qual.level = 50;
344 range->sensitivity = 0;
345
346 /* Event capability */
347 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
348 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
349 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
350
351 /* encryption capability */
352 range->enc_capa = IW_ENC_CAPA_WPA |
353 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
354 if (wpa2_capable())
355 range->enc_capa |= IW_ENC_CAPA_WPA2;
356 range->encoding_size[0] = 5; /* 40bit WEP */
357 range->encoding_size[1] = 13; /* 104bit WEP */
358 range->encoding_size[2] = 32; /* WPA-PSK */
359 range->num_encoding_sizes = 3;
360 range->max_encoding_tokens = GELIC_WEP_KEYS;
361
362 pr_debug("%s: ->\n", __func__);
363 return 0;
364
365}
366
367/* SIOC{G,S}IWSCAN */
368static int gelic_wl_set_scan(struct net_device *netdev,
369 struct iw_request_info *info,
370 union iwreq_data *wrqu, char *extra)
371{
372 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
373
374 return gelic_wl_start_scan(wl, 1);
375}
376
377#define OUI_LEN 3
378static const u8 rsn_oui[OUI_LEN] = { 0x00, 0x0f, 0xac };
379static const u8 wpa_oui[OUI_LEN] = { 0x00, 0x50, 0xf2 };
380
381/*
382 * synthesize WPA/RSN IE data
383 * See WiFi WPA specification and IEEE 802.11-2007 7.3.2.25
384 * for the format
385 */
386static size_t gelic_wl_synthesize_ie(u8 *buf,
387 struct gelic_eurus_scan_info *scan)
388{
389
390 const u8 *oui_header;
391 u8 *start = buf;
392 int rsn;
393 int ccmp;
394
395 pr_debug("%s: <- sec=%16x\n", __func__, scan->security);
396 switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_MASK) {
397 case GELIC_EURUS_SCAN_SEC_WPA:
398 rsn = 0;
399 break;
400 case GELIC_EURUS_SCAN_SEC_WPA2:
401 rsn = 1;
402 break;
403 default:
404 /* WEP or none. No IE returned */
405 return 0;
406 }
407
408 switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_WPA_MASK) {
409 case GELIC_EURUS_SCAN_SEC_WPA_TKIP:
410 ccmp = 0;
411 break;
412 case GELIC_EURUS_SCAN_SEC_WPA_AES:
413 ccmp = 1;
414 break;
415 default:
416 if (rsn) {
417 ccmp = 1;
418 pr_info("%s: no cipher info. defaulted to CCMP\n",
419 __func__);
420 } else {
421 ccmp = 0;
422 pr_info("%s: no cipher info. defaulted to TKIP\n",
423 __func__);
424 }
425 }
426
427 if (rsn)
428 oui_header = rsn_oui;
429 else
430 oui_header = wpa_oui;
431
432 /* element id */
433 if (rsn)
434 *buf++ = MFIE_TYPE_RSN;
435 else
436 *buf++ = MFIE_TYPE_GENERIC;
437
438 /* length filed; set later */
439 buf++;
440
441 /* wpa special header */
442 if (!rsn) {
443 memcpy(buf, wpa_oui, OUI_LEN);
444 buf += OUI_LEN;
445 *buf++ = 0x01;
446 }
447
448 /* version */
449 *buf++ = 0x01; /* version 1.0 */
450 *buf++ = 0x00;
451
452 /* group cipher */
453 memcpy(buf, oui_header, OUI_LEN);
454 buf += OUI_LEN;
455
456 if (ccmp)
457 *buf++ = 0x04; /* CCMP */
458 else
459 *buf++ = 0x02; /* TKIP */
460
461 /* pairwise key count always 1 */
462 *buf++ = 0x01;
463 *buf++ = 0x00;
464
465 /* pairwise key suit */
466 memcpy(buf, oui_header, OUI_LEN);
467 buf += OUI_LEN;
468 if (ccmp)
469 *buf++ = 0x04; /* CCMP */
470 else
471 *buf++ = 0x02; /* TKIP */
472
473 /* AKM count is 1 */
474 *buf++ = 0x01;
475 *buf++ = 0x00;
476
477 /* AKM suite is assumed as PSK*/
478 memcpy(buf, oui_header, OUI_LEN);
479 buf += OUI_LEN;
480 *buf++ = 0x02; /* PSK */
481
482 /* RSN capabilities is 0 */
483 *buf++ = 0x00;
484 *buf++ = 0x00;
485
486 /* set length field */
487 start[1] = (buf - start - 2);
488
489 pr_debug("%s: ->\n", __func__);
490 return (buf - start);
491}
492
493struct ie_item {
494 u8 *data;
495 u8 len;
496};
497
498struct ie_info {
499 struct ie_item wpa;
500 struct ie_item rsn;
501};
502
503static void gelic_wl_parse_ie(u8 *data, size_t len,
504 struct ie_info *ie_info)
505{
506 size_t data_left = len;
507 u8 *pos = data;
508 u8 item_len;
509 u8 item_id;
510
511 pr_debug("%s: data=%p len=%ld \n", __func__,
512 data, len);
513 memset(ie_info, 0, sizeof(struct ie_info));
514
515 while (0 < data_left) {
516 item_id = *pos++;
517 item_len = *pos++;
518
519 switch (item_id) {
520 case MFIE_TYPE_GENERIC:
521 if (!memcmp(pos, wpa_oui, OUI_LEN) &&
522 pos[OUI_LEN] == 0x01) {
523 ie_info->wpa.data = pos - 2;
524 ie_info->wpa.len = item_len + 2;
525 }
526 break;
527 case MFIE_TYPE_RSN:
528 ie_info->rsn.data = pos - 2;
529 /* length includes the header */
530 ie_info->rsn.len = item_len + 2;
531 break;
532 default:
533 pr_debug("%s: ignore %#x,%d\n", __func__,
534 item_id, item_len);
535 break;
536 }
537 pos += item_len;
538 data_left -= item_len + 2;
539 }
540 pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__,
541 ie_info->wpa.data, ie_info->wpa.len,
542 ie_info->rsn.data, ie_info->rsn.len);
543}
544
545
546/*
547 * translate the scan informations from hypervisor to a
548 * independent format
549 */
550static char *gelic_wl_translate_scan(struct net_device *netdev,
551 char *ev,
552 char *stop,
553 struct gelic_wl_scan_info *network)
554{
555 struct iw_event iwe;
556 struct gelic_eurus_scan_info *scan = network->hwinfo;
557 char *tmp;
558 u8 rate;
559 unsigned int i, j, len;
560 u8 buf[MAX_WPA_IE_LEN];
561
562 pr_debug("%s: <-\n", __func__);
563
564 /* first entry should be AP's mac address */
565 iwe.cmd = SIOCGIWAP;
566 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
567 memcpy(iwe.u.ap_addr.sa_data, &scan->bssid[2], ETH_ALEN);
568 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_ADDR_LEN);
569
570 /* ESSID */
571 iwe.cmd = SIOCGIWESSID;
572 iwe.u.data.flags = 1;
573 iwe.u.data.length = strnlen(scan->essid, 32);
574 ev = iwe_stream_add_point(ev, stop, &iwe, scan->essid);
575
576 /* FREQUENCY */
577 iwe.cmd = SIOCGIWFREQ;
578 iwe.u.freq.m = be16_to_cpu(scan->channel);
579 iwe.u.freq.e = 0; /* table value in MHz */
580 iwe.u.freq.i = 0;
581 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_FREQ_LEN);
582
583 /* RATES */
584 iwe.cmd = SIOCGIWRATE;
585 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
586 /* to stuff multiple values in one event */
587 tmp = ev + IW_EV_LCP_LEN;
588 /* put them in ascendant order (older is first) */
589 i = 0;
590 j = 0;
591 pr_debug("%s: rates=%d rate=%d\n", __func__,
592 network->rate_len, network->rate_ext_len);
593 while (i < network->rate_len) {
594 if (j < network->rate_ext_len &&
595 ((scan->ext_rate[j] & 0x7f) < (scan->rate[i] & 0x7f)))
596 rate = scan->ext_rate[j++] & 0x7f;
597 else
598 rate = scan->rate[i++] & 0x7f;
599 iwe.u.bitrate.value = rate * 500000; /* 500kbps unit */
600 tmp = iwe_stream_add_value(ev, tmp, stop, &iwe,
601 IW_EV_PARAM_LEN);
602 }
603 while (j < network->rate_ext_len) {
604 iwe.u.bitrate.value = (scan->ext_rate[j++] & 0x7f) * 500000;
605 tmp = iwe_stream_add_value(ev, tmp, stop, &iwe,
606 IW_EV_PARAM_LEN);
607 }
608 /* Check if we added any rate */
609 if (IW_EV_LCP_LEN < (tmp - ev))
610 ev = tmp;
611
612 /* ENCODE */
613 iwe.cmd = SIOCGIWENCODE;
614 if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_PRIVACY)
615 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
616 else
617 iwe.u.data.flags = IW_ENCODE_DISABLED;
618 iwe.u.data.length = 0;
619 ev = iwe_stream_add_point(ev, stop, &iwe, scan->essid);
620
621 /* MODE */
622 iwe.cmd = SIOCGIWMODE;
623 if (be16_to_cpu(scan->capability) &
624 (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
625 if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_ESS)
626 iwe.u.mode = IW_MODE_MASTER;
627 else
628 iwe.u.mode = IW_MODE_ADHOC;
629 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_UINT_LEN);
630 }
631
632 /* QUAL */
633 iwe.cmd = IWEVQUAL;
634 iwe.u.qual.updated = IW_QUAL_ALL_UPDATED |
635 IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
636 iwe.u.qual.level = be16_to_cpu(scan->rssi);
637 iwe.u.qual.qual = be16_to_cpu(scan->rssi);
638 iwe.u.qual.noise = 0;
639 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_QUAL_LEN);
640
641 /* RSN */
642 memset(&iwe, 0, sizeof(iwe));
643 if (be16_to_cpu(scan->size) <= sizeof(*scan)) {
644 /* If wpa[2] capable station, synthesize IE and put it */
645 len = gelic_wl_synthesize_ie(buf, scan);
646 if (len) {
647 iwe.cmd = IWEVGENIE;
648 iwe.u.data.length = len;
649 ev = iwe_stream_add_point(ev, stop, &iwe, buf);
650 }
651 } else {
652 /* this scan info has IE data */
653 struct ie_info ie_info;
654 size_t data_len;
655
656 data_len = be16_to_cpu(scan->size) - sizeof(*scan);
657
658 gelic_wl_parse_ie(scan->elements, data_len, &ie_info);
659
660 if (ie_info.wpa.len && (ie_info.wpa.len <= sizeof(buf))) {
661 memcpy(buf, ie_info.wpa.data, ie_info.wpa.len);
662 iwe.cmd = IWEVGENIE;
663 iwe.u.data.length = ie_info.wpa.len;
664 ev = iwe_stream_add_point(ev, stop, &iwe, buf);
665 }
666
667 if (ie_info.rsn.len && (ie_info.rsn.len <= sizeof(buf))) {
668 memset(&iwe, 0, sizeof(iwe));
669 memcpy(buf, ie_info.rsn.data, ie_info.rsn.len);
670 iwe.cmd = IWEVGENIE;
671 iwe.u.data.length = ie_info.rsn.len;
672 ev = iwe_stream_add_point(ev, stop, &iwe, buf);
673 }
674 }
675
676 pr_debug("%s: ->\n", __func__);
677 return ev;
678}
679
680
681static int gelic_wl_get_scan(struct net_device *netdev,
682 struct iw_request_info *info,
683 union iwreq_data *wrqu, char *extra)
684{
685 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
686 struct gelic_wl_scan_info *scan_info;
687 char *ev = extra;
688 char *stop = ev + wrqu->data.length;
689 int ret = 0;
690 unsigned long this_time = jiffies;
691
692 pr_debug("%s: <-\n", __func__);
693 if (down_interruptible(&wl->scan_lock))
694 return -EAGAIN;
695
696 switch (wl->scan_stat) {
697 case GELIC_WL_SCAN_STAT_SCANNING:
698 /* If a scan in progress, caller should call me again */
699 ret = -EAGAIN;
700 goto out;
701 break;
702
703 case GELIC_WL_SCAN_STAT_INIT:
704 /* last scan request failed or never issued */
705 ret = -ENODEV;
706 goto out;
707 break;
708 case GELIC_WL_SCAN_STAT_GOT_LIST:
709 /* ok, use current list */
710 break;
711 }
712
713 list_for_each_entry(scan_info, &wl->network_list, list) {
714 if (wl->scan_age == 0 ||
715 time_after(scan_info->last_scanned + wl->scan_age,
716 this_time))
717 ev = gelic_wl_translate_scan(netdev, ev, stop,
718 scan_info);
719 else
720 pr_debug("%s:entry too old\n", __func__);
721
722 if (stop - ev <= IW_EV_ADDR_LEN) {
723 ret = -E2BIG;
724 goto out;
725 }
726 }
727
728 wrqu->data.length = ev - extra;
729 wrqu->data.flags = 0;
730out:
731 up(&wl->scan_lock);
732 pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length);
733 return ret;
734}
735
736#ifdef DEBUG
737static void scan_list_dump(struct gelic_wl_info *wl)
738{
739 struct gelic_wl_scan_info *scan_info;
740 int i;
741 DECLARE_MAC_BUF(mac);
742
743 i = 0;
744 list_for_each_entry(scan_info, &wl->network_list, list) {
745 pr_debug("%s: item %d\n", __func__, i++);
746 pr_debug("valid=%d eurusindex=%d last=%lx\n",
747 scan_info->valid, scan_info->eurus_index,
748 scan_info->last_scanned);
749 pr_debug("r_len=%d r_ext_len=%d essid_len=%d\n",
750 scan_info->rate_len, scan_info->rate_ext_len,
751 scan_info->essid_len);
752 /* -- */
753 pr_debug("bssid=%s\n",
754 print_mac(mac, &scan_info->hwinfo->bssid[2]));
755 pr_debug("essid=%s\n", scan_info->hwinfo->essid);
756 }
757}
758#endif
759
760static int gelic_wl_set_auth(struct net_device *netdev,
761 struct iw_request_info *info,
762 union iwreq_data *data, char *extra)
763{
764 struct iw_param *param = &data->param;
765 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
766 unsigned long irqflag;
767 int ret = 0;
768
769 pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX);
770 spin_lock_irqsave(&wl->lock, irqflag);
771 switch (param->flags & IW_AUTH_INDEX) {
772 case IW_AUTH_WPA_VERSION:
773 if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
774 pr_debug("%s: NO WPA selected\n", __func__);
775 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
776 wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
777 wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
778 }
779 if (param->value & IW_AUTH_WPA_VERSION_WPA) {
780 pr_debug("%s: WPA version 1 selected\n", __func__);
781 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
782 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
783 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
784 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
785 }
786 if (param->value & IW_AUTH_WPA_VERSION_WPA2) {
787 /*
788 * As the hypervisor may not tell the cipher
789 * information of the AP if it is WPA2,
790 * you will not decide suitable cipher from
791 * its beacon.
792 * You should have knowledge about the AP's
793 * cipher infomation in other method prior to
794 * the association.
795 */
796 if (!precise_ie())
797 pr_info("%s: WPA2 may not work\n", __func__);
798 if (wpa2_capable()) {
799 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2;
800 wl->group_cipher_method = GELIC_WL_CIPHER_AES;
801 wl->pairwise_cipher_method =
802 GELIC_WL_CIPHER_AES;
803 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
804 } else
805 ret = -EINVAL;
806 }
807 break;
808
809 case IW_AUTH_CIPHER_PAIRWISE:
810 if (param->value &
811 (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) {
812 pr_debug("%s: WEP selected\n", __func__);
813 wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
814 }
815 if (param->value & IW_AUTH_CIPHER_TKIP) {
816 pr_debug("%s: TKIP selected\n", __func__);
817 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
818 }
819 if (param->value & IW_AUTH_CIPHER_CCMP) {
820 pr_debug("%s: CCMP selected\n", __func__);
821 wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES;
822 }
823 if (param->value & IW_AUTH_CIPHER_NONE) {
824 pr_debug("%s: no auth selected\n", __func__);
825 wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
826 }
827 break;
828 case IW_AUTH_CIPHER_GROUP:
829 if (param->value &
830 (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) {
831 pr_debug("%s: WEP selected\n", __func__);
832 wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
833 }
834 if (param->value & IW_AUTH_CIPHER_TKIP) {
835 pr_debug("%s: TKIP selected\n", __func__);
836 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
837 }
838 if (param->value & IW_AUTH_CIPHER_CCMP) {
839 pr_debug("%s: CCMP selected\n", __func__);
840 wl->group_cipher_method = GELIC_WL_CIPHER_AES;
841 }
842 if (param->value & IW_AUTH_CIPHER_NONE) {
843 pr_debug("%s: no auth selected\n", __func__);
844 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
845 }
846 break;
847 case IW_AUTH_80211_AUTH_ALG:
848 if (param->value & IW_AUTH_ALG_SHARED_KEY) {
849 pr_debug("%s: shared key specified\n", __func__);
850 wl->auth_method = GELIC_EURUS_AUTH_SHARED;
851 } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
852 pr_debug("%s: open system specified\n", __func__);
853 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
854 } else
855 ret = -EINVAL;
856 break;
857
858 case IW_AUTH_WPA_ENABLED:
859 if (param->value) {
860 pr_debug("%s: WPA enabled\n", __func__);
861 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
862 } else {
863 pr_debug("%s: WPA disabled\n", __func__);
864 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
865 }
866 break;
867
868 case IW_AUTH_KEY_MGMT:
869 if (param->value & IW_AUTH_KEY_MGMT_PSK)
870 break;
871 /* intentionally fall through */
872 default:
873 ret = -EOPNOTSUPP;
874 break;
875 };
876
877 if (!ret)
878 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
879
880 spin_unlock_irqrestore(&wl->lock, irqflag);
881 pr_debug("%s: -> %d\n", __func__, ret);
882 return ret;
883}
884
885static int gelic_wl_get_auth(struct net_device *netdev,
886 struct iw_request_info *info,
887 union iwreq_data *iwreq, char *extra)
888{
889 struct iw_param *param = &iwreq->param;
890 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
891 unsigned long irqflag;
892 int ret = 0;
893
894 pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX);
895 spin_lock_irqsave(&wl->lock, irqflag);
896 switch (param->flags & IW_AUTH_INDEX) {
897 case IW_AUTH_WPA_VERSION:
898 switch (wl->wpa_level) {
899 case GELIC_WL_WPA_LEVEL_WPA:
900 param->value |= IW_AUTH_WPA_VERSION_WPA;
901 break;
902 case GELIC_WL_WPA_LEVEL_WPA2:
903 param->value |= IW_AUTH_WPA_VERSION_WPA2;
904 break;
905 default:
906 param->value |= IW_AUTH_WPA_VERSION_DISABLED;
907 }
908 break;
909
910 case IW_AUTH_80211_AUTH_ALG:
911 if (wl->auth_method == GELIC_EURUS_AUTH_SHARED)
912 param->value = IW_AUTH_ALG_SHARED_KEY;
913 else if (wl->auth_method == GELIC_EURUS_AUTH_OPEN)
914 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
915 break;
916
917 case IW_AUTH_WPA_ENABLED:
918 switch (wl->wpa_level) {
919 case GELIC_WL_WPA_LEVEL_WPA:
920 case GELIC_WL_WPA_LEVEL_WPA2:
921 param->value = 1;
922 break;
923 default:
924 param->value = 0;
925 break;
926 }
927 break;
928 default:
929 ret = -EOPNOTSUPP;
930 }
931
932 spin_unlock_irqrestore(&wl->lock, irqflag);
933 pr_debug("%s: -> %d\n", __func__, ret);
934 return ret;
935}
936
937/* SIOC{S,G}IWESSID */
938static int gelic_wl_set_essid(struct net_device *netdev,
939 struct iw_request_info *info,
940 union iwreq_data *data, char *extra)
941{
942 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
943 unsigned long irqflag;
944
945 pr_debug("%s: <- l=%d f=%d\n", __func__,
946 data->essid.length, data->essid.flags);
947 if (IW_ESSID_MAX_SIZE < data->essid.length)
948 return -EINVAL;
949
950 spin_lock_irqsave(&wl->lock, irqflag);
951 if (data->essid.flags) {
952 wl->essid_len = data->essid.length;
953 memcpy(wl->essid, extra, wl->essid_len);
954 pr_debug("%s: essid = '%s'\n", __func__, extra);
955 set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
956 } else {
957 pr_debug("%s: ESSID any \n", __func__);
958 clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
959 }
960 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
961 spin_unlock_irqrestore(&wl->lock, irqflag);
962
963
964 gelic_wl_try_associate(netdev); /* FIXME */
965 pr_debug("%s: -> \n", __func__);
966 return 0;
967}
968
969static int gelic_wl_get_essid(struct net_device *netdev,
970 struct iw_request_info *info,
971 union iwreq_data *data, char *extra)
972{
973 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
974 unsigned long irqflag;
975
976 pr_debug("%s: <- \n", __func__);
977 down(&wl->assoc_stat_lock);
978 spin_lock_irqsave(&wl->lock, irqflag);
979 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
980 wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
981 memcpy(extra, wl->essid, wl->essid_len);
982 data->essid.length = wl->essid_len;
983 data->essid.flags = 1;
984 } else
985 data->essid.flags = 0;
986
987 up(&wl->assoc_stat_lock);
988 spin_unlock_irqrestore(&wl->lock, irqflag);
989 pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
990
991 return 0;
992}
993
994/* SIO{S,G}IWENCODE */
995static int gelic_wl_set_encode(struct net_device *netdev,
996 struct iw_request_info *info,
997 union iwreq_data *data, char *extra)
998{
999 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1000 struct iw_point *enc = &data->encoding;
1001 __u16 flags;
1002 unsigned int irqflag;
1003 int key_index, index_specified;
1004 int ret = 0;
1005
1006 pr_debug("%s: <- \n", __func__);
1007 flags = enc->flags & IW_ENCODE_FLAGS;
1008 key_index = enc->flags & IW_ENCODE_INDEX;
1009
1010 pr_debug("%s: key_index = %d\n", __func__, key_index);
1011 pr_debug("%s: key_len = %d\n", __func__, enc->length);
1012 pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
1013
1014 if (GELIC_WEP_KEYS < key_index)
1015 return -EINVAL;
1016
1017 spin_lock_irqsave(&wl->lock, irqflag);
1018 if (key_index) {
1019 index_specified = 1;
1020 key_index--;
1021 } else {
1022 index_specified = 0;
1023 key_index = wl->current_key;
1024 }
1025
1026 if (flags & IW_ENCODE_NOKEY) {
1027 /* if just IW_ENCODE_NOKEY, change current key index */
1028 if (!flags && index_specified) {
1029 wl->current_key = key_index;
1030 goto done;
1031 }
1032
1033 if (flags & IW_ENCODE_DISABLED) {
1034 if (!index_specified) {
1035 /* disable encryption */
1036 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
1037 wl->pairwise_cipher_method =
1038 GELIC_WL_CIPHER_NONE;
1039 /* invalidate all key */
1040 wl->key_enabled = 0;
1041 } else
1042 clear_bit(key_index, &wl->key_enabled);
1043 }
1044
1045 if (flags & IW_ENCODE_OPEN)
1046 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1047 if (flags & IW_ENCODE_RESTRICTED) {
1048 pr_info("%s: shared key mode enabled\n", __func__);
1049 wl->auth_method = GELIC_EURUS_AUTH_SHARED;
1050 }
1051 } else {
1052 if (IW_ENCODING_TOKEN_MAX < enc->length) {
1053 ret = -EINVAL;
1054 goto done;
1055 }
1056 wl->key_len[key_index] = enc->length;
1057 memcpy(wl->key[key_index], extra, enc->length);
1058 set_bit(key_index, &wl->key_enabled);
1059 wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
1060 wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
1061 }
1062 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1063done:
1064 spin_unlock_irqrestore(&wl->lock, irqflag);
1065 pr_debug("%s: -> \n", __func__);
1066 return ret;
1067}
1068
1069static int gelic_wl_get_encode(struct net_device *netdev,
1070 struct iw_request_info *info,
1071 union iwreq_data *data, char *extra)
1072{
1073 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1074 struct iw_point *enc = &data->encoding;
1075 unsigned int irqflag;
1076 unsigned int key_index, index_specified;
1077 int ret = 0;
1078
1079 pr_debug("%s: <- \n", __func__);
1080 key_index = enc->flags & IW_ENCODE_INDEX;
1081 pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
1082 enc->flags, enc->pointer, enc->length, extra);
1083 if (GELIC_WEP_KEYS < key_index)
1084 return -EINVAL;
1085
1086 spin_lock_irqsave(&wl->lock, irqflag);
1087 if (key_index) {
1088 index_specified = 1;
1089 key_index--;
1090 } else {
1091 index_specified = 0;
1092 key_index = wl->current_key;
1093 }
1094
1095 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
1096 switch (wl->auth_method) {
1097 case GELIC_EURUS_AUTH_OPEN:
1098 enc->flags = IW_ENCODE_OPEN;
1099 break;
1100 case GELIC_EURUS_AUTH_SHARED:
1101 enc->flags = IW_ENCODE_RESTRICTED;
1102 break;
1103 }
1104 } else
1105 enc->flags = IW_ENCODE_DISABLED;
1106
1107 if (test_bit(key_index, &wl->key_enabled)) {
1108 if (enc->length < wl->key_len[key_index]) {
1109 ret = -EINVAL;
1110 goto done;
1111 }
1112 enc->length = wl->key_len[key_index];
1113 memcpy(extra, wl->key[key_index], wl->key_len[key_index]);
1114 } else {
1115 enc->length = 0;
1116 enc->flags |= IW_ENCODE_NOKEY;
1117 }
1118 enc->flags |= key_index + 1;
1119 pr_debug("%s: -> flag=%x len=%d\n", __func__,
1120 enc->flags, enc->length);
1121
1122done:
1123 spin_unlock_irqrestore(&wl->lock, irqflag);
1124 return ret;
1125}
1126
1127/* SIOC{S,G}IWAP */
1128static int gelic_wl_set_ap(struct net_device *netdev,
1129 struct iw_request_info *info,
1130 union iwreq_data *data, char *extra)
1131{
1132 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1133 unsigned long irqflag;
1134
1135 pr_debug("%s: <-\n", __func__);
1136 if (data->ap_addr.sa_family != ARPHRD_ETHER)
1137 return -EINVAL;
1138
1139 spin_lock_irqsave(&wl->lock, irqflag);
1140 if (is_valid_ether_addr(data->ap_addr.sa_data)) {
1141 memcpy(wl->bssid, data->ap_addr.sa_data,
1142 ETH_ALEN);
1143 set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
1144 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1145 pr_debug("%s: bss=%02x:%02x:%02x:%02x:%02x:%02x\n",
1146 __func__,
1147 wl->bssid[0], wl->bssid[1],
1148 wl->bssid[2], wl->bssid[3],
1149 wl->bssid[4], wl->bssid[5]);
1150 } else {
1151 pr_debug("%s: clear bssid\n", __func__);
1152 clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
1153 memset(wl->bssid, 0, ETH_ALEN);
1154 }
1155 spin_unlock_irqrestore(&wl->lock, irqflag);
1156 pr_debug("%s: ->\n", __func__);
1157 return 0;
1158}
1159
1160static int gelic_wl_get_ap(struct net_device *netdev,
1161 struct iw_request_info *info,
1162 union iwreq_data *data, char *extra)
1163{
1164 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1165 unsigned long irqflag;
1166
1167 pr_debug("%s: <-\n", __func__);
1168 down(&wl->assoc_stat_lock);
1169 spin_lock_irqsave(&wl->lock, irqflag);
1170 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
1171 data->ap_addr.sa_family = ARPHRD_ETHER;
1172 memcpy(data->ap_addr.sa_data, wl->active_bssid,
1173 ETH_ALEN);
1174 } else
1175 memset(data->ap_addr.sa_data, 0, ETH_ALEN);
1176
1177 spin_unlock_irqrestore(&wl->lock, irqflag);
1178 up(&wl->assoc_stat_lock);
1179 pr_debug("%s: ->\n", __func__);
1180 return 0;
1181}
1182
1183/* SIOC{S,G}IWENCODEEXT */
1184static int gelic_wl_set_encodeext(struct net_device *netdev,
1185 struct iw_request_info *info,
1186 union iwreq_data *data, char *extra)
1187{
1188 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1189 struct iw_point *enc = &data->encoding;
1190 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1191 __u16 alg;
1192 __u16 flags;
1193 unsigned int irqflag;
1194 int key_index;
1195 int ret = 0;
1196
1197 pr_debug("%s: <- \n", __func__);
1198 flags = enc->flags & IW_ENCODE_FLAGS;
1199 alg = ext->alg;
1200 key_index = enc->flags & IW_ENCODE_INDEX;
1201
1202 pr_debug("%s: key_index = %d\n", __func__, key_index);
1203 pr_debug("%s: key_len = %d\n", __func__, enc->length);
1204 pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
1205 pr_debug("%s: ext_flag=%x\n", __func__, ext->ext_flags);
1206 pr_debug("%s: ext_key_len=%x\n", __func__, ext->key_len);
1207
1208 if (GELIC_WEP_KEYS < key_index)
1209 return -EINVAL;
1210
1211 spin_lock_irqsave(&wl->lock, irqflag);
1212 if (key_index)
1213 key_index--;
1214 else
1215 key_index = wl->current_key;
1216
1217 if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
1218 /* reques to change default key index */
1219 pr_debug("%s: request to change default key to %d\n",
1220 __func__, key_index);
1221 wl->current_key = key_index;
1222 goto done;
1223 }
1224
1225 if (alg == IW_ENCODE_ALG_NONE || (flags & IW_ENCODE_DISABLED)) {
1226 pr_debug("%s: alg disabled\n", __func__);
1227 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
1228 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
1229 wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
1230 wl->auth_method = GELIC_EURUS_AUTH_OPEN; /* should be open */
1231 } else if (alg == IW_ENCODE_ALG_WEP) {
1232 pr_debug("%s: WEP requested\n", __func__);
1233 if (flags & IW_ENCODE_OPEN) {
1234 pr_debug("%s: open key mode\n", __func__);
1235 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1236 }
1237 if (flags & IW_ENCODE_RESTRICTED) {
1238 pr_debug("%s: shared key mode\n", __func__);
1239 wl->auth_method = GELIC_EURUS_AUTH_SHARED;
1240 }
1241 if (IW_ENCODING_TOKEN_MAX < ext->key_len) {
1242 pr_info("%s: key is too long %d\n", __func__,
1243 ext->key_len);
1244 ret = -EINVAL;
1245 goto done;
1246 }
1247 /* OK, update the key */
1248 wl->key_len[key_index] = ext->key_len;
1249 memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX);
1250 memcpy(wl->key[key_index], ext->key, ext->key_len);
1251 set_bit(key_index, &wl->key_enabled);
1252 /* remember wep info changed */
1253 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1254 } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) {
1255 pr_debug("%s: TKIP/CCMP requested alg=%d\n", __func__, alg);
1256 /* check key length */
1257 if (IW_ENCODING_TOKEN_MAX < ext->key_len) {
1258 pr_info("%s: key is too long %d\n", __func__,
1259 ext->key_len);
1260 ret = -EINVAL;
1261 goto done;
1262 }
1263 if (alg == IW_ENCODE_ALG_CCMP) {
1264 pr_debug("%s: AES selected\n", __func__);
1265 wl->group_cipher_method = GELIC_WL_CIPHER_AES;
1266 wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES;
1267 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2;
1268 } else {
1269 pr_debug("%s: TKIP selected, WPA forced\n", __func__);
1270 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
1271 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
1272 /* FIXME: how do we do if WPA2 + TKIP? */
1273 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
1274 }
1275 if (flags & IW_ENCODE_RESTRICTED)
1276 BUG();
1277 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1278 /* We should use same key for both and unicast */
1279 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
1280 pr_debug("%s: group key \n", __func__);
1281 else
1282 pr_debug("%s: unicast key \n", __func__);
1283 /* OK, update the key */
1284 wl->key_len[key_index] = ext->key_len;
1285 memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX);
1286 memcpy(wl->key[key_index], ext->key, ext->key_len);
1287 set_bit(key_index, &wl->key_enabled);
1288 /* remember info changed */
1289 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1290 }
1291done:
1292 spin_unlock_irqrestore(&wl->lock, irqflag);
1293 pr_debug("%s: -> \n", __func__);
1294 return ret;
1295}
1296
1297static int gelic_wl_get_encodeext(struct net_device *netdev,
1298 struct iw_request_info *info,
1299 union iwreq_data *data, char *extra)
1300{
1301 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1302 struct iw_point *enc = &data->encoding;
1303 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1304 unsigned int irqflag;
1305 int key_index;
1306 int ret = 0;
1307 int max_key_len;
1308
1309 pr_debug("%s: <- \n", __func__);
1310
1311 max_key_len = enc->length - sizeof(struct iw_encode_ext);
1312 if (max_key_len < 0)
1313 return -EINVAL;
1314 key_index = enc->flags & IW_ENCODE_INDEX;
1315
1316 pr_debug("%s: key_index = %d\n", __func__, key_index);
1317 pr_debug("%s: key_len = %d\n", __func__, enc->length);
1318 pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
1319
1320 if (GELIC_WEP_KEYS < key_index)
1321 return -EINVAL;
1322
1323 spin_lock_irqsave(&wl->lock, irqflag);
1324 if (key_index)
1325 key_index--;
1326 else
1327 key_index = wl->current_key;
1328
1329 memset(ext, 0, sizeof(struct iw_encode_ext));
1330 switch (wl->group_cipher_method) {
1331 case GELIC_WL_CIPHER_WEP:
1332 ext->alg = IW_ENCODE_ALG_WEP;
1333 enc->flags |= IW_ENCODE_ENABLED;
1334 break;
1335 case GELIC_WL_CIPHER_TKIP:
1336 ext->alg = IW_ENCODE_ALG_TKIP;
1337 enc->flags |= IW_ENCODE_ENABLED;
1338 break;
1339 case GELIC_WL_CIPHER_AES:
1340 ext->alg = IW_ENCODE_ALG_CCMP;
1341 enc->flags |= IW_ENCODE_ENABLED;
1342 break;
1343 case GELIC_WL_CIPHER_NONE:
1344 default:
1345 ext->alg = IW_ENCODE_ALG_NONE;
1346 enc->flags |= IW_ENCODE_NOKEY;
1347 break;
1348 }
1349
1350 if (!(enc->flags & IW_ENCODE_NOKEY)) {
1351 if (max_key_len < wl->key_len[key_index]) {
1352 ret = -E2BIG;
1353 goto out;
1354 }
1355 if (test_bit(key_index, &wl->key_enabled))
1356 memcpy(ext->key, wl->key[key_index],
1357 wl->key_len[key_index]);
1358 else
1359 pr_debug("%s: disabled key requested ix=%d\n",
1360 __func__, key_index);
1361 }
1362out:
1363 spin_unlock_irqrestore(&wl->lock, irqflag);
1364 pr_debug("%s: -> \n", __func__);
1365 return ret;
1366}
1367/* SIOC{S,G}IWMODE */
1368static int gelic_wl_set_mode(struct net_device *netdev,
1369 struct iw_request_info *info,
1370 union iwreq_data *data, char *extra)
1371{
1372 __u32 mode = data->mode;
1373 int ret;
1374
1375 pr_debug("%s: <- \n", __func__);
1376 if (mode == IW_MODE_INFRA)
1377 ret = 0;
1378 else
1379 ret = -EOPNOTSUPP;
1380 pr_debug("%s: -> %d\n", __func__, ret);
1381 return ret;
1382}
1383
1384static int gelic_wl_get_mode(struct net_device *netdev,
1385 struct iw_request_info *info,
1386 union iwreq_data *data, char *extra)
1387{
1388 __u32 *mode = &data->mode;
1389 pr_debug("%s: <- \n", __func__);
1390 *mode = IW_MODE_INFRA;
1391 pr_debug("%s: ->\n", __func__);
1392 return 0;
1393}
1394
1395/* SIOCIWFIRSTPRIV */
1396static int hex2bin(u8 *str, u8 *bin, unsigned int len)
1397{
1398 unsigned int i;
1399 static unsigned char *hex = "0123456789ABCDEF";
1400 unsigned char *p, *q;
1401 u8 tmp;
1402
1403 if (len != WPA_PSK_LEN * 2)
1404 return -EINVAL;
1405
1406 for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
1407 p = strchr(hex, toupper(str[i]));
1408 q = strchr(hex, toupper(str[i + 1]));
1409 if (!p || !q) {
1410 pr_info("%s: unconvertible PSK digit=%d\n",
1411 __func__, i);
1412 return -EINVAL;
1413 }
1414 tmp = ((p - hex) << 4) + (q - hex);
1415 *bin++ = tmp;
1416 }
1417 return 0;
1418};
1419
1420static int gelic_wl_priv_set_psk(struct net_device *net_dev,
1421 struct iw_request_info *info,
1422 union iwreq_data *data, char *extra)
1423{
1424 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1425 unsigned int len;
1426 unsigned int irqflag;
1427 int ret = 0;
1428
1429 pr_debug("%s:<- len=%d\n", __func__, data->data.length);
1430 len = data->data.length - 1;
1431 if (len <= 2)
1432 return -EINVAL;
1433
1434 spin_lock_irqsave(&wl->lock, irqflag);
1435 if (extra[0] == '"' && extra[len - 1] == '"') {
1436 pr_debug("%s: passphrase mode\n", __func__);
1437 /* pass phrase */
1438 if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
1439 pr_info("%s: passphrase too long\n", __func__);
1440 ret = -E2BIG;
1441 goto out;
1442 }
1443 memset(wl->psk, 0, sizeof(wl->psk));
1444 wl->psk_len = len - 2;
1445 memcpy(wl->psk, &(extra[1]), wl->psk_len);
1446 wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
1447 } else {
1448 ret = hex2bin(extra, wl->psk, len);
1449 if (ret)
1450 goto out;
1451 wl->psk_len = WPA_PSK_LEN;
1452 wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
1453 }
1454 set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
1455out:
1456 spin_unlock_irqrestore(&wl->lock, irqflag);
1457 pr_debug("%s:->\n", __func__);
1458 return ret;
1459}
1460
1461static int gelic_wl_priv_get_psk(struct net_device *net_dev,
1462 struct iw_request_info *info,
1463 union iwreq_data *data, char *extra)
1464{
1465 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1466 char *p;
1467 unsigned int irqflag;
1468 unsigned int i;
1469
1470 pr_debug("%s:<-\n", __func__);
1471 if (!capable(CAP_NET_ADMIN))
1472 return -EPERM;
1473
1474 spin_lock_irqsave(&wl->lock, irqflag);
1475 p = extra;
1476 if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
1477 if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
1478 for (i = 0; i < wl->psk_len; i++) {
1479 sprintf(p, "%02xu", wl->psk[i]);
1480 p += 2;
1481 }
1482 *p = '\0';
1483 data->data.length = wl->psk_len * 2;
1484 } else {
1485 *p++ = '"';
1486 memcpy(p, wl->psk, wl->psk_len);
1487 p += wl->psk_len;
1488 *p++ = '"';
1489 *p = '\0';
1490 data->data.length = wl->psk_len + 2;
1491 }
1492 } else
1493 /* no psk set */
1494 data->data.length = 0;
1495 spin_unlock_irqrestore(&wl->lock, irqflag);
1496 pr_debug("%s:-> %d\n", __func__, data->data.length);
1497 return 0;
1498}
1499
1500/* SIOCGIWNICKN */
1501static int gelic_wl_get_nick(struct net_device *net_dev,
1502 struct iw_request_info *info,
1503 union iwreq_data *data, char *extra)
1504{
1505 strcpy(extra, "gelic_wl");
1506 data->data.length = strlen(extra);
1507 data->data.flags = 1;
1508 return 0;
1509}
1510
1511
1512/* --- */
1513
1514static struct iw_statistics *gelic_wl_get_wireless_stats(
1515 struct net_device *netdev)
1516{
1517
1518 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1519 struct gelic_eurus_cmd *cmd;
1520 struct iw_statistics *is;
1521 struct gelic_eurus_rssi_info *rssi;
1522
1523 pr_debug("%s: <-\n", __func__);
1524
1525 is = &wl->iwstat;
1526 memset(is, 0, sizeof(*is));
1527 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG,
1528 wl->buf, sizeof(*rssi));
1529 if (cmd && !cmd->status && !cmd->cmd_status) {
1530 rssi = wl->buf;
1531 is->qual.level = be16_to_cpu(rssi->rssi);
1532 is->qual.updated = IW_QUAL_LEVEL_UPDATED |
1533 IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
1534 } else
1535 /* not associated */
1536 is->qual.updated = IW_QUAL_ALL_INVALID;
1537
1538 kfree(cmd);
1539 pr_debug("%s: ->\n", __func__);
1540 return is;
1541}
1542
1543/*
1544 * scanning helpers
1545 */
1546static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan)
1547{
1548 struct gelic_eurus_cmd *cmd;
1549 int ret = 0;
1550
1551 pr_debug("%s: <- always=%d\n", __func__, always_scan);
1552 if (down_interruptible(&wl->scan_lock))
1553 return -ERESTARTSYS;
1554
1555 /*
1556 * If already a scan in progress, do not trigger more
1557 */
1558 if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) {
1559 pr_debug("%s: scanning now\n", __func__);
1560 goto out;
1561 }
1562
1563 init_completion(&wl->scan_done);
1564 /*
1565 * If we have already a bss list, don't try to get new
1566 */
1567 if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
1568 pr_debug("%s: already has the list\n", __func__);
1569 complete(&wl->scan_done);
1570 goto out;
1571 }
1572 /*
1573 * issue start scan request
1574 */
1575 wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING;
1576 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN,
1577 NULL, 0);
1578 if (!cmd || cmd->status || cmd->cmd_status) {
1579 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
1580 complete(&wl->scan_done);
1581 ret = -ENOMEM;
1582 goto out;
1583 }
1584 kfree(cmd);
1585out:
1586 up(&wl->scan_lock);
1587 pr_debug("%s: ->\n", __func__);
1588 return ret;
1589}
1590
1591/*
1592 * retrieve scan result from the chip (hypervisor)
1593 * this function is invoked by schedule work.
1594 */
1595static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1596{
1597 struct gelic_eurus_cmd *cmd = NULL;
1598 struct gelic_wl_scan_info *target, *tmp;
1599 struct gelic_wl_scan_info *oldest = NULL;
1600 struct gelic_eurus_scan_info *scan_info;
1601 unsigned int scan_info_size;
1602 union iwreq_data data;
1603 unsigned long this_time = jiffies;
1604 unsigned int data_len, i, found, r;
1605 DECLARE_MAC_BUF(mac);
1606
1607 pr_debug("%s:start\n", __func__);
1608 down(&wl->scan_lock);
1609
1610 if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) {
1611 /*
1612 * stop() may be called while scanning, ignore result
1613 */
1614 pr_debug("%s: scan complete when stat != scanning(%d)\n",
1615 __func__, wl->scan_stat);
1616 goto out;
1617 }
1618
1619 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN,
1620 wl->buf, PAGE_SIZE);
1621 if (!cmd || cmd->status || cmd->cmd_status) {
1622 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
1623 pr_info("%s:cmd failed\n", __func__);
1624 kfree(cmd);
1625 goto out;
1626 }
1627 data_len = cmd->size;
1628 pr_debug("%s: data_len = %d\n", __func__, data_len);
1629 kfree(cmd);
1630
1631 /* OK, bss list retrieved */
1632 wl->scan_stat = GELIC_WL_SCAN_STAT_GOT_LIST;
1633
1634 /* mark all entries are old */
1635 list_for_each_entry_safe(target, tmp, &wl->network_list, list) {
1636 target->valid = 0;
1637 /* expire too old entries */
1638 if (time_before(target->last_scanned + wl->scan_age,
1639 this_time)) {
1640 kfree(target->hwinfo);
1641 target->hwinfo = NULL;
1642 list_move_tail(&target->list, &wl->network_free_list);
1643 }
1644 }
1645
1646 /* put them in the newtork_list */
1647 scan_info = wl->buf;
1648 scan_info_size = 0;
1649 i = 0;
1650 while (scan_info_size < data_len) {
1651 pr_debug("%s:size=%d bssid=%s scan_info=%p\n", __func__,
1652 be16_to_cpu(scan_info->size),
1653 print_mac(mac, &scan_info->bssid[2]), scan_info);
1654 found = 0;
1655 oldest = NULL;
1656 list_for_each_entry(target, &wl->network_list, list) {
1657 if (!compare_ether_addr(&target->hwinfo->bssid[2],
1658 &scan_info->bssid[2])) {
1659 found = 1;
1660 pr_debug("%s: same BBS found scanned list\n",
1661 __func__);
1662 break;
1663 }
1664 if (!oldest ||
1665 (target->last_scanned < oldest->last_scanned))
1666 oldest = target;
1667 }
1668
1669 if (!found) {
1670 /* not found in the list */
1671 if (list_empty(&wl->network_free_list)) {
1672 /* expire oldest */
1673 target = oldest;
1674 } else {
1675 target = list_entry(wl->network_free_list.next,
1676 struct gelic_wl_scan_info,
1677 list);
1678 }
1679 }
1680
1681 /* update the item */
1682 target->last_scanned = this_time;
1683 target->valid = 1;
1684 target->eurus_index = i;
1685 kfree(target->hwinfo);
1686 target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
1687 GFP_KERNEL);
1688 if (!target->hwinfo) {
1689 pr_info("%s: kzalloc failed\n", __func__);
1690 i++;
1691 scan_info_size += be16_to_cpu(scan_info->size);
1692 scan_info = (void *)scan_info +
1693 be16_to_cpu(scan_info->size);
1694 continue;
1695 }
1696 /* copy hw scan info */
1697 memcpy(target->hwinfo, scan_info, scan_info->size);
1698 target->essid_len = strnlen(scan_info->essid,
1699 sizeof(scan_info->essid));
1700 target->rate_len = 0;
1701 for (r = 0; r < MAX_RATES_LENGTH; r++)
1702 if (scan_info->rate[r])
1703 target->rate_len++;
1704 if (8 < target->rate_len)
1705 pr_info("%s: AP returns %d rates\n", __func__,
1706 target->rate_len);
1707 target->rate_ext_len = 0;
1708 for (r = 0; r < MAX_RATES_EX_LENGTH; r++)
1709 if (scan_info->ext_rate[r])
1710 target->rate_ext_len++;
1711 list_move_tail(&target->list, &wl->network_list);
1712 /* bump pointer */
1713 i++;
1714 scan_info_size += be16_to_cpu(scan_info->size);
1715 scan_info = (void *)scan_info + be16_to_cpu(scan_info->size);
1716 }
1717 memset(&data, 0, sizeof(data));
1718 wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data,
1719 NULL);
1720out:
1721 complete(&wl->scan_done);
1722 up(&wl->scan_lock);
1723 pr_debug("%s:end\n", __func__);
1724}
1725
1726/*
1727 * Select an appropriate bss from current scan list regarding
1728 * current settings from userspace.
1729 * The caller must hold wl->scan_lock,
1730 * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST
1731 */
1732static void update_best(struct gelic_wl_scan_info **best,
1733 struct gelic_wl_scan_info *candid,
1734 int *best_weight,
1735 int *weight)
1736{
1737 if (*best_weight < ++(*weight)) {
1738 *best_weight = *weight;
1739 *best = candid;
1740 }
1741}
1742
1743static
1744struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1745{
1746 struct gelic_wl_scan_info *scan_info;
1747 struct gelic_wl_scan_info *best_bss;
1748 int weight, best_weight;
1749 u16 security;
1750 DECLARE_MAC_BUF(mac);
1751
1752 pr_debug("%s: <-\n", __func__);
1753
1754 best_bss = NULL;
1755 best_weight = 0;
1756
1757 list_for_each_entry(scan_info, &wl->network_list, list) {
1758 pr_debug("%s: station %p\n", __func__, scan_info);
1759
1760 if (!scan_info->valid) {
1761 pr_debug("%s: station invalid\n", __func__);
1762 continue;
1763 }
1764
1765 /* If bss specified, check it only */
1766 if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) {
1767 if (!compare_ether_addr(&scan_info->hwinfo->bssid[2],
1768 wl->bssid)) {
1769 best_bss = scan_info;
1770 pr_debug("%s: bssid matched\n", __func__);
1771 break;
1772 } else {
1773 pr_debug("%s: bssid unmached\n", __func__);
1774 continue;
1775 }
1776 }
1777
1778 weight = 0;
1779
1780 /* security */
1781 security = be16_to_cpu(scan_info->hwinfo->security) &
1782 GELIC_EURUS_SCAN_SEC_MASK;
1783 if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) {
1784 if (security == GELIC_EURUS_SCAN_SEC_WPA2)
1785 update_best(&best_bss, scan_info,
1786 &best_weight, &weight);
1787 else
1788 continue;
1789 } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA) {
1790 if (security == GELIC_EURUS_SCAN_SEC_WPA)
1791 update_best(&best_bss, scan_info,
1792 &best_weight, &weight);
1793 else
1794 continue;
1795 } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_NONE &&
1796 wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
1797 if (security == GELIC_EURUS_SCAN_SEC_WEP)
1798 update_best(&best_bss, scan_info,
1799 &best_weight, &weight);
1800 else
1801 continue;
1802 }
1803
1804 /* If ESSID is set, check it */
1805 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) {
1806 if ((scan_info->essid_len == wl->essid_len) &&
1807 !strncmp(wl->essid,
1808 scan_info->hwinfo->essid,
1809 scan_info->essid_len))
1810 update_best(&best_bss, scan_info,
1811 &best_weight, &weight);
1812 else
1813 continue;
1814 }
1815 }
1816
1817#ifdef DEBUG
1818 pr_debug("%s: -> bss=%p\n", __func__, best_bss);
1819 if (best_bss) {
1820 pr_debug("%s:addr=%s\n", __func__,
1821 print_mac(mac, &best_bss->hwinfo->bssid[2]));
1822 }
1823#endif
1824 return best_bss;
1825}
1826
1827/*
1828 * Setup WEP configuration to the chip
1829 * The caller must hold wl->scan_lock,
1830 * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST
1831 */
1832static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl)
1833{
1834 unsigned int i;
1835 struct gelic_eurus_wep_cfg *wep;
1836 struct gelic_eurus_cmd *cmd;
1837 int wep104 = 0;
1838 int have_key = 0;
1839 int ret = 0;
1840
1841 pr_debug("%s: <-\n", __func__);
1842 /* we can assume no one should uses the buffer */
1843 wep = wl->buf;
1844 memset(wep, 0, sizeof(*wep));
1845
1846 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
1847 pr_debug("%s: WEP mode\n", __func__);
1848 for (i = 0; i < GELIC_WEP_KEYS; i++) {
1849 if (!test_bit(i, &wl->key_enabled))
1850 continue;
1851
1852 pr_debug("%s: key#%d enabled\n", __func__, i);
1853 have_key = 1;
1854 if (wl->key_len[i] == 13)
1855 wep104 = 1;
1856 else if (wl->key_len[i] != 5) {
1857 pr_info("%s: wrong wep key[%d]=%d\n",
1858 __func__, i, wl->key_len[i]);
1859 ret = -EINVAL;
1860 goto out;
1861 }
1862 memcpy(wep->key[i], wl->key[i], wl->key_len[i]);
1863 }
1864
1865 if (!have_key) {
1866 pr_info("%s: all wep key disabled\n", __func__);
1867 ret = -EINVAL;
1868 goto out;
1869 }
1870
1871 if (wep104) {
1872 pr_debug("%s: 104bit key\n", __func__);
1873 wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_104BIT);
1874 } else {
1875 pr_debug("%s: 40bit key\n", __func__);
1876 wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_40BIT);
1877 }
1878 } else {
1879 pr_debug("%s: NO encryption\n", __func__);
1880 wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_NONE);
1881 }
1882
1883 /* issue wep setup */
1884 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WEP_CFG,
1885 wep, sizeof(*wep));
1886 if (!cmd)
1887 ret = -ENOMEM;
1888 else if (cmd->status || cmd->cmd_status)
1889 ret = -ENXIO;
1890
1891 kfree(cmd);
1892out:
1893 pr_debug("%s: ->\n", __func__);
1894 return ret;
1895}
1896
1897#ifdef DEBUG
1898static const char *wpasecstr(enum gelic_eurus_wpa_security sec)
1899{
1900 switch (sec) {
1901 case GELIC_EURUS_WPA_SEC_NONE:
1902 return "NONE";
1903 break;
1904 case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP:
1905 return "WPA_TKIP_TKIP";
1906 break;
1907 case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES:
1908 return "WPA_TKIP_AES";
1909 break;
1910 case GELIC_EURUS_WPA_SEC_WPA_AES_AES:
1911 return "WPA_AES_AES";
1912 break;
1913 case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP:
1914 return "WPA2_TKIP_TKIP";
1915 break;
1916 case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES:
1917 return "WPA2_TKIP_AES";
1918 break;
1919 case GELIC_EURUS_WPA_SEC_WPA2_AES_AES:
1920 return "WPA2_AES_AES";
1921 break;
1922 }
1923 return "";
1924};
1925#endif
1926
1927static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
1928{
1929 struct gelic_eurus_wpa_cfg *wpa;
1930 struct gelic_eurus_cmd *cmd;
1931 u16 security;
1932 int ret = 0;
1933
1934 pr_debug("%s: <-\n", __func__);
1935 /* we can assume no one should uses the buffer */
1936 wpa = wl->buf;
1937 memset(wpa, 0, sizeof(*wpa));
1938
1939 if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat))
1940 pr_info("%s: PSK not configured yet\n", __func__);
1941
1942 /* copy key */
1943 memcpy(wpa->psk, wl->psk, wl->psk_len);
1944
1945 /* set security level */
1946 if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) {
1947 if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) {
1948 security = GELIC_EURUS_WPA_SEC_WPA2_AES_AES;
1949 } else {
1950 if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES &&
1951 precise_ie())
1952 security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES;
1953 else
1954 security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP;
1955 }
1956 } else {
1957 if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) {
1958 security = GELIC_EURUS_WPA_SEC_WPA_AES_AES;
1959 } else {
1960 if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES &&
1961 precise_ie())
1962 security = GELIC_EURUS_WPA_SEC_WPA_TKIP_AES;
1963 else
1964 security = GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP;
1965 }
1966 }
1967 wpa->security = cpu_to_be16(security);
1968
1969 /* PSK type */
1970 wpa->psk_type = cpu_to_be16(wl->psk_type);
1971#ifdef DEBUG
1972 pr_debug("%s: sec=%s psktype=%s\nn", __func__,
1973 wpasecstr(wpa->security),
1974 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
1975 "BIN" : "passphrase");
1976#if 0
1977 /*
1978 * don't enable here if you plan to submit
1979 * the debug log because this dumps your precious
1980 * passphrase/key.
1981 */
1982 pr_debug("%s: psk=%s\n",
1983 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
1984 (char *)"N/A" : (char *)wpa->psk);
1985#endif
1986#endif
1987 /* issue wpa setup */
1988 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WPA_CFG,
1989 wpa, sizeof(*wpa));
1990 if (!cmd)
1991 ret = -ENOMEM;
1992 else if (cmd->status || cmd->cmd_status)
1993 ret = -ENXIO;
1994 kfree(cmd);
1995 pr_debug("%s: --> %d\n", __func__, ret);
1996 return ret;
1997}
1998
1999/*
2000 * Start association. caller must hold assoc_stat_lock
2001 */
2002static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
2003 struct gelic_wl_scan_info *bss)
2004{
2005 struct gelic_eurus_cmd *cmd;
2006 struct gelic_eurus_common_cfg *common;
2007 int ret = 0;
2008 unsigned long rc;
2009
2010 pr_debug("%s: <-\n", __func__);
2011
2012 /* do common config */
2013 common = wl->buf;
2014 memset(common, 0, sizeof(*common));
2015 common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA);
2016 common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG);
2017
2018 common->scan_index = cpu_to_be16(bss->eurus_index);
2019 switch (wl->auth_method) {
2020 case GELIC_EURUS_AUTH_OPEN:
2021 common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_OPEN);
2022 break;
2023 case GELIC_EURUS_AUTH_SHARED:
2024 common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_SHARED);
2025 break;
2026 }
2027
2028#ifdef DEBUG
2029 scan_list_dump(wl);
2030#endif
2031 pr_debug("%s: common cfg index=%d bsstype=%d auth=%d\n", __func__,
2032 be16_to_cpu(common->scan_index),
2033 be16_to_cpu(common->bss_type),
2034 be16_to_cpu(common->auth_method));
2035
2036 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_COMMON_CFG,
2037 common, sizeof(*common));
2038 if (!cmd || cmd->status || cmd->cmd_status) {
2039 ret = -ENOMEM;
2040 kfree(cmd);
2041 goto out;
2042 }
2043 kfree(cmd);
2044
2045 /* WEP/WPA */
2046 switch (wl->wpa_level) {
2047 case GELIC_WL_WPA_LEVEL_NONE:
2048 /* If WEP or no security, setup WEP config */
2049 ret = gelic_wl_do_wep_setup(wl);
2050 break;
2051 case GELIC_WL_WPA_LEVEL_WPA:
2052 case GELIC_WL_WPA_LEVEL_WPA2:
2053 ret = gelic_wl_do_wpa_setup(wl);
2054 break;
2055 };
2056
2057 if (ret) {
2058 pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
2059 ret);
2060 }
2061
2062 /* start association */
2063 init_completion(&wl->assoc_done);
2064 wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATING;
2065 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_ASSOC,
2066 NULL, 0);
2067 if (!cmd || cmd->status || cmd->cmd_status) {
2068 pr_debug("%s: assoc request failed\n", __func__);
2069 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2070 kfree(cmd);
2071 ret = -ENOMEM;
2072 gelic_wl_send_iwap_event(wl, NULL);
2073 goto out;
2074 }
2075 kfree(cmd);
2076
2077 /* wait for connected event */
2078 rc = wait_for_completion_timeout(&wl->assoc_done, HZ * 4);/*FIXME*/
2079
2080 if (!rc) {
2081 /* timeouted. Maybe key or cyrpt mode is wrong */
2082 pr_info("%s: connect timeout \n", __func__);
2083 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
2084 NULL, 0);
2085 kfree(cmd);
2086 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2087 gelic_wl_send_iwap_event(wl, NULL);
2088 ret = -ENXIO;
2089 } else {
2090 wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATED;
2091 /* copy bssid */
2092 memcpy(wl->active_bssid, &bss->hwinfo->bssid[2], ETH_ALEN);
2093
2094 /* send connect event */
2095 gelic_wl_send_iwap_event(wl, wl->active_bssid);
2096 pr_info("%s: connected\n", __func__);
2097 }
2098out:
2099 pr_debug("%s: ->\n", __func__);
2100 return ret;
2101}
2102
2103/*
2104 * connected event
2105 */
2106static void gelic_wl_connected_event(struct gelic_wl_info *wl,
2107 u64 event)
2108{
2109 u64 desired_event = 0;
2110
2111 switch (wl->wpa_level) {
2112 case GELIC_WL_WPA_LEVEL_NONE:
2113 desired_event = GELIC_LV1_WL_EVENT_CONNECTED;
2114 break;
2115 case GELIC_WL_WPA_LEVEL_WPA:
2116 case GELIC_WL_WPA_LEVEL_WPA2:
2117 desired_event = GELIC_LV1_WL_EVENT_WPA_CONNECTED;
2118 break;
2119 }
2120
2121 if (desired_event == event) {
2122 pr_debug("%s: completed \n", __func__);
2123 complete(&wl->assoc_done);
2124 netif_carrier_on(port_to_netdev(wl_port(wl)));
2125 } else
2126 pr_debug("%s: event %#lx under wpa\n",
2127 __func__, event);
2128}
2129
2130/*
2131 * disconnect event
2132 */
2133static void gelic_wl_disconnect_event(struct gelic_wl_info *wl,
2134 u64 event)
2135{
2136 struct gelic_eurus_cmd *cmd;
2137 int lock;
2138
2139 /*
2140 * If we fall here in the middle of association,
2141 * associate_bss() should be waiting for complation of
2142 * wl->assoc_done.
2143 * As it waits with timeout, just leave assoc_done
2144 * uncompleted, then it terminates with timeout
2145 */
2146 if (down_trylock(&wl->assoc_stat_lock)) {
2147 pr_debug("%s: already locked\n", __func__);
2148 lock = 0;
2149 } else {
2150 pr_debug("%s: obtain lock\n", __func__);
2151 lock = 1;
2152 }
2153
2154 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0);
2155 kfree(cmd);
2156
2157 /* send disconnected event to the supplicant */
2158 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
2159 gelic_wl_send_iwap_event(wl, NULL);
2160
2161 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2162 netif_carrier_off(port_to_netdev(wl_port(wl)));
2163
2164 if (lock)
2165 up(&wl->assoc_stat_lock);
2166}
2167/*
2168 * event worker
2169 */
2170#ifdef DEBUG
2171static const char *eventstr(enum gelic_lv1_wl_event event)
2172{
2173 static char buf[32];
2174 char *ret;
2175 if (event & GELIC_LV1_WL_EVENT_DEVICE_READY)
2176 ret = "EURUS_READY";
2177 else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED)
2178 ret = "SCAN_COMPLETED";
2179 else if (event & GELIC_LV1_WL_EVENT_DEAUTH)
2180 ret = "DEAUTH";
2181 else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST)
2182 ret = "BEACON_LOST";
2183 else if (event & GELIC_LV1_WL_EVENT_CONNECTED)
2184 ret = "CONNECTED";
2185 else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED)
2186 ret = "WPA_CONNECTED";
2187 else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR)
2188 ret = "WPA_ERROR";
2189 else {
2190 sprintf(buf, "Unknown(%#x)", event);
2191 ret = buf;
2192 }
2193 return ret;
2194}
2195#else
2196static const char *eventstr(enum gelic_lv1_wl_event event)
2197{
2198 return NULL;
2199}
2200#endif
2201static void gelic_wl_event_worker(struct work_struct *work)
2202{
2203 struct gelic_wl_info *wl;
2204 struct gelic_port *port;
2205 u64 event, tmp;
2206 int status;
2207
2208 pr_debug("%s:start\n", __func__);
2209 wl = container_of(work, struct gelic_wl_info, event_work.work);
2210 port = wl_port(wl);
2211 while (1) {
2212 status = lv1_net_control(bus_id(port->card), dev_id(port->card),
2213 GELIC_LV1_GET_WLAN_EVENT, 0, 0, 0,
2214 &event, &tmp);
2215 if (status) {
2216 if (status != LV1_NO_ENTRY)
2217 pr_debug("%s:wlan event failed %d\n",
2218 __func__, status);
2219 /* got all events */
2220 pr_debug("%s:end\n", __func__);
2221 return;
2222 }
2223 pr_debug("%s: event=%s\n", __func__, eventstr(event));
2224 switch (event) {
2225 case GELIC_LV1_WL_EVENT_SCAN_COMPLETED:
2226 gelic_wl_scan_complete_event(wl);
2227 break;
2228 case GELIC_LV1_WL_EVENT_BEACON_LOST:
2229 case GELIC_LV1_WL_EVENT_DEAUTH:
2230 gelic_wl_disconnect_event(wl, event);
2231 break;
2232 case GELIC_LV1_WL_EVENT_CONNECTED:
2233 case GELIC_LV1_WL_EVENT_WPA_CONNECTED:
2234 gelic_wl_connected_event(wl, event);
2235 break;
2236 default:
2237 break;
2238 }
2239 } /* while */
2240}
2241/*
2242 * association worker
2243 */
2244static void gelic_wl_assoc_worker(struct work_struct *work)
2245{
2246 struct gelic_wl_info *wl;
2247
2248 struct gelic_wl_scan_info *best_bss;
2249 int ret;
2250
2251 wl = container_of(work, struct gelic_wl_info, assoc_work.work);
2252
2253 down(&wl->assoc_stat_lock);
2254
2255 if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN)
2256 goto out;
2257
2258 ret = gelic_wl_start_scan(wl, 0);
2259 if (ret == -ERESTARTSYS) {
2260 pr_debug("%s: scan start failed association\n", __func__);
2261 schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/
2262 goto out;
2263 } else if (ret) {
2264 pr_info("%s: scan prerequisite failed\n", __func__);
2265 goto out;
2266 }
2267
2268 /*
2269 * Wait for bss scan completion
2270 * If we have scan list already, gelic_wl_start_scan()
2271 * returns OK and raises the complete. Thus,
2272 * it's ok to wait unconditionally here
2273 */
2274 wait_for_completion(&wl->scan_done);
2275
2276 pr_debug("%s: scan done\n", __func__);
2277 down(&wl->scan_lock);
2278 if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) {
2279 gelic_wl_send_iwap_event(wl, NULL);
2280 pr_info("%s: no scan list. association failed\n", __func__);
2281 goto scan_lock_out;
2282 }
2283
2284 /* find best matching bss */
2285 best_bss = gelic_wl_find_best_bss(wl);
2286 if (!best_bss) {
2287 gelic_wl_send_iwap_event(wl, NULL);
2288 pr_info("%s: no bss matched. association failed\n", __func__);
2289 goto scan_lock_out;
2290 }
2291
2292 /* ok, do association */
2293 ret = gelic_wl_associate_bss(wl, best_bss);
2294 if (ret)
2295 pr_info("%s: association failed %d\n", __func__, ret);
2296scan_lock_out:
2297 up(&wl->scan_lock);
2298out:
2299 up(&wl->assoc_stat_lock);
2300}
2301/*
2302 * Interrupt handler
2303 * Called from the ethernet interrupt handler
2304 * Processes wireless specific virtual interrupts only
2305 */
2306void gelic_wl_interrupt(struct net_device *netdev, u64 status)
2307{
2308 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
2309
2310 if (status & GELIC_CARD_WLAN_COMMAND_COMPLETED) {
2311 pr_debug("%s:cmd complete\n", __func__);
2312 complete(&wl->cmd_done_intr);
2313 }
2314
2315 if (status & GELIC_CARD_WLAN_EVENT_RECEIVED) {
2316 pr_debug("%s:event received\n", __func__);
2317 queue_delayed_work(wl->event_queue, &wl->event_work, 0);
2318 }
2319}
2320
2321/*
2322 * driver helpers
2323 */
2324#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
2325static const iw_handler gelic_wl_wext_handler[] =
2326{
2327 IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name,
2328 IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range,
2329 IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan,
2330 IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan,
2331 IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth,
2332 IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth,
2333 IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid,
2334 IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid,
2335 IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode,
2336 IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode,
2337 IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap,
2338 IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap,
2339 IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext,
2340 IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext,
2341 IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode,
2342 IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode,
2343 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
2344};
2345
2346static struct iw_priv_args gelic_wl_private_args[] =
2347{
2348 {
2349 .cmd = GELIC_WL_PRIV_SET_PSK,
2350 .set_args = IW_PRIV_TYPE_CHAR |
2351 (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
2352 .name = "set_psk"
2353 },
2354 {
2355 .cmd = GELIC_WL_PRIV_GET_PSK,
2356 .get_args = IW_PRIV_TYPE_CHAR |
2357 (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
2358 .name = "get_psk"
2359 }
2360};
2361
2362static const iw_handler gelic_wl_private_handler[] =
2363{
2364 gelic_wl_priv_set_psk,
2365 gelic_wl_priv_get_psk,
2366};
2367
2368static const struct iw_handler_def gelic_wl_wext_handler_def = {
2369 .num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
2370 .standard = gelic_wl_wext_handler,
2371 .get_wireless_stats = gelic_wl_get_wireless_stats,
2372 .num_private = ARRAY_SIZE(gelic_wl_private_handler),
2373 .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
2374 .private = gelic_wl_private_handler,
2375 .private_args = gelic_wl_private_args,
2376};
2377
2378static struct net_device *gelic_wl_alloc(struct gelic_card *card)
2379{
2380 struct net_device *netdev;
2381 struct gelic_port *port;
2382 struct gelic_wl_info *wl;
2383 unsigned int i;
2384
2385 pr_debug("%s:start\n", __func__);
2386 netdev = alloc_etherdev(sizeof(struct gelic_port) +
2387 sizeof(struct gelic_wl_info));
2388 pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card);
2389 if (!netdev)
2390 return NULL;
2391
2392 port = netdev_priv(netdev);
2393 port->netdev = netdev;
2394 port->card = card;
2395 port->type = GELIC_PORT_WIRELESS;
2396
2397 wl = port_wl(port);
2398 pr_debug("%s: wl=%p port=%p\n", __func__, wl, port);
2399
2400 /* allocate scan list */
2401 wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) *
2402 GELIC_WL_BSS_MAX_ENT, GFP_KERNEL);
2403
2404 if (!wl->networks)
2405 goto fail_bss;
2406
2407 wl->eurus_cmd_queue = create_singlethread_workqueue("gelic_cmd");
2408 if (!wl->eurus_cmd_queue)
2409 goto fail_cmd_workqueue;
2410
2411 wl->event_queue = create_singlethread_workqueue("gelic_event");
2412 if (!wl->event_queue)
2413 goto fail_event_workqueue;
2414
2415 INIT_LIST_HEAD(&wl->network_free_list);
2416 INIT_LIST_HEAD(&wl->network_list);
2417 for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++)
2418 list_add_tail(&wl->networks[i].list,
2419 &wl->network_free_list);
2420 init_completion(&wl->cmd_done_intr);
2421
2422 INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker);
2423 INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker);
2424 init_MUTEX(&wl->scan_lock);
2425 init_MUTEX(&wl->assoc_stat_lock);
2426
2427 init_completion(&wl->scan_done);
2428 /* for the case that no scan request is issued and stop() is called */
2429 complete(&wl->scan_done);
2430
2431 spin_lock_init(&wl->lock);
2432
2433 wl->scan_age = 5*HZ; /* FIXME */
2434
2435 /* buffer for receiving scanned list etc */
2436 BUILD_BUG_ON(PAGE_SIZE <
2437 sizeof(struct gelic_eurus_scan_info) *
2438 GELIC_EURUS_MAX_SCAN);
2439 wl->buf = (void *)get_zeroed_page(GFP_KERNEL);
2440 if (!wl->buf) {
2441 pr_info("%s:buffer allocation failed\n", __func__);
2442 goto fail_getpage;
2443 }
2444 pr_debug("%s:end\n", __func__);
2445 return netdev;
2446
2447fail_getpage:
2448 destroy_workqueue(wl->event_queue);
2449fail_event_workqueue:
2450 destroy_workqueue(wl->eurus_cmd_queue);
2451fail_cmd_workqueue:
2452 kfree(wl->networks);
2453fail_bss:
2454 free_netdev(netdev);
2455 pr_debug("%s:end error\n", __func__);
2456 return NULL;
2457
2458}
2459
2460static void gelic_wl_free(struct gelic_wl_info *wl)
2461{
2462 struct gelic_wl_scan_info *scan_info;
2463 unsigned int i;
2464
2465 pr_debug("%s: <-\n", __func__);
2466
2467 pr_debug("%s: destroy queues\n", __func__);
2468 destroy_workqueue(wl->eurus_cmd_queue);
2469 destroy_workqueue(wl->event_queue);
2470
2471 scan_info = wl->networks;
2472 for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++, scan_info++)
2473 kfree(scan_info->hwinfo);
2474 kfree(wl->networks);
2475
2476 free_netdev(port_to_netdev(wl_port(wl)));
2477
2478 pr_debug("%s: ->\n", __func__);
2479}
2480
2481static int gelic_wl_try_associate(struct net_device *netdev)
2482{
2483 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
2484 int ret = -1;
2485 unsigned int i;
2486
2487 pr_debug("%s: <-\n", __func__);
2488
2489 /* check constraits for start association */
2490 /* for no access restriction AP */
2491 if (wl->group_cipher_method == GELIC_WL_CIPHER_NONE) {
2492 if (test_bit(GELIC_WL_STAT_CONFIGURED,
2493 &wl->stat))
2494 goto do_associate;
2495 else {
2496 pr_debug("%s: no wep, not configured\n", __func__);
2497 return ret;
2498 }
2499 }
2500
2501 /* for WEP, one of four keys should be set */
2502 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
2503 /* one of keys set */
2504 for (i = 0; i < GELIC_WEP_KEYS; i++) {
2505 if (test_bit(i, &wl->key_enabled))
2506 goto do_associate;
2507 }
2508 pr_debug("%s: WEP, but no key specified\n", __func__);
2509 return ret;
2510 }
2511
2512 /* for WPA[2], psk should be set */
2513 if ((wl->group_cipher_method == GELIC_WL_CIPHER_TKIP) ||
2514 (wl->group_cipher_method == GELIC_WL_CIPHER_AES)) {
2515 if (test_bit(GELIC_WL_STAT_WPA_PSK_SET,
2516 &wl->stat))
2517 goto do_associate;
2518 else {
2519 pr_debug("%s: AES/TKIP, but PSK not configured\n",
2520 __func__);
2521 return ret;
2522 }
2523 }
2524
2525do_associate:
2526 ret = schedule_delayed_work(&wl->assoc_work, 0);
2527 pr_debug("%s: start association work %d\n", __func__, ret);
2528 return ret;
2529}
2530
2531/*
2532 * netdev handlers
2533 */
2534static int gelic_wl_open(struct net_device *netdev)
2535{
2536 struct gelic_card *card = netdev_card(netdev);
2537
2538 pr_debug("%s:->%p\n", __func__, netdev);
2539
2540 gelic_card_up(card);
2541
2542 /* try to associate */
2543 gelic_wl_try_associate(netdev);
2544
2545 netif_start_queue(netdev);
2546
2547 pr_debug("%s:<-\n", __func__);
2548 return 0;
2549}
2550
2551/*
2552 * reset state machine
2553 */
2554static int gelic_wl_reset_state(struct gelic_wl_info *wl)
2555{
2556 struct gelic_wl_scan_info *target;
2557 struct gelic_wl_scan_info *tmp;
2558
2559 /* empty scan list */
2560 list_for_each_entry_safe(target, tmp, &wl->network_list, list) {
2561 list_move_tail(&target->list, &wl->network_free_list);
2562 }
2563 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
2564
2565 /* clear configuration */
2566 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
2567 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
2568 wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
2569 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
2570
2571 wl->key_enabled = 0;
2572 wl->current_key = 0;
2573
2574 wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
2575 wl->psk_len = 0;
2576
2577 wl->essid_len = 0;
2578 memset(wl->essid, 0, sizeof(wl->essid));
2579 memset(wl->bssid, 0, sizeof(wl->bssid));
2580 memset(wl->active_bssid, 0, sizeof(wl->active_bssid));
2581
2582 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2583
2584 memset(&wl->iwstat, 0, sizeof(wl->iwstat));
2585 /* all status bit clear */
2586 wl->stat = 0;
2587 return 0;
2588}
2589
2590/*
2591 * Tell eurus to terminate association
2592 */
2593static void gelic_wl_disconnect(struct net_device *netdev)
2594{
2595 struct gelic_port *port = netdev_priv(netdev);
2596 struct gelic_wl_info *wl = port_wl(port);
2597 struct gelic_eurus_cmd *cmd;
2598
2599 /*
2600 * If scann process is running on chip,
2601 * further requests will be rejected
2602 */
2603 if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING)
2604 wait_for_completion_timeout(&wl->scan_done, HZ);
2605
2606 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0);
2607 kfree(cmd);
2608 gelic_wl_send_iwap_event(wl, NULL);
2609};
2610
2611static int gelic_wl_stop(struct net_device *netdev)
2612{
2613 struct gelic_port *port = netdev_priv(netdev);
2614 struct gelic_wl_info *wl = port_wl(port);
2615 struct gelic_card *card = netdev_card(netdev);
2616
2617 pr_debug("%s:<-\n", __func__);
2618
2619 /*
2620 * Cancel pending association work.
2621 * event work can run after netdev down
2622 */
2623 cancel_delayed_work(&wl->assoc_work);
2624
2625 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
2626 gelic_wl_disconnect(netdev);
2627
2628 /* reset our state machine */
2629 gelic_wl_reset_state(wl);
2630
2631 netif_stop_queue(netdev);
2632
2633 gelic_card_down(card);
2634
2635 pr_debug("%s:->\n", __func__);
2636 return 0;
2637}
2638
2639/* -- */
2640
2641static struct ethtool_ops gelic_wl_ethtool_ops = {
2642 .get_drvinfo = gelic_net_get_drvinfo,
2643 .get_link = gelic_wl_get_link,
2644 .get_tx_csum = ethtool_op_get_tx_csum,
2645 .set_tx_csum = ethtool_op_set_tx_csum,
2646 .get_rx_csum = gelic_net_get_rx_csum,
2647 .set_rx_csum = gelic_net_set_rx_csum,
2648};
2649
2650static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
2651{
2652 struct gelic_wl_info *wl;
2653 wl = port_wl(netdev_priv(netdev));
2654 BUG_ON(!wl);
2655 netdev->open = &gelic_wl_open;
2656 netdev->stop = &gelic_wl_stop;
2657 netdev->hard_start_xmit = &gelic_net_xmit;
2658 netdev->set_multicast_list = &gelic_net_set_multi;
2659 netdev->change_mtu = &gelic_net_change_mtu;
2660 netdev->wireless_data = &wl->wireless_data;
2661 netdev->wireless_handlers = &gelic_wl_wext_handler_def;
2662 /* tx watchdog */
2663 netdev->tx_timeout = &gelic_net_tx_timeout;
2664 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
2665
2666 netdev->ethtool_ops = &gelic_wl_ethtool_ops;
2667#ifdef CONFIG_NET_POLL_CONTROLLER
2668 netdev->poll_controller = gelic_net_poll_controller;
2669#endif
2670}
2671
2672/*
2673 * driver probe/remove
2674 */
2675int gelic_wl_driver_probe(struct gelic_card *card)
2676{
2677 int ret;
2678 struct net_device *netdev;
2679
2680 pr_debug("%s:start\n", __func__);
2681
2682 if (ps3_compare_firmware_version(1, 6, 0) < 0)
2683 return 0;
2684 if (!card->vlan[GELIC_PORT_WIRELESS].tx)
2685 return 0;
2686
2687 /* alloc netdevice for wireless */
2688 netdev = gelic_wl_alloc(card);
2689 if (!netdev)
2690 return -ENOMEM;
2691
2692 /* setup net_device structure */
2693 gelic_wl_setup_netdev_ops(netdev);
2694
2695 /* setup some of net_device and register it */
2696 ret = gelic_net_setup_netdev(netdev, card);
2697 if (ret)
2698 goto fail_setup;
2699 card->netdev[GELIC_PORT_WIRELESS] = netdev;
2700
2701 /* add enable wireless interrupt */
2702 card->irq_mask |= GELIC_CARD_WLAN_EVENT_RECEIVED |
2703 GELIC_CARD_WLAN_COMMAND_COMPLETED;
2704 /* to allow wireless commands while both interfaces are down */
2705 gelic_card_set_irq_mask(card, GELIC_CARD_WLAN_EVENT_RECEIVED |
2706 GELIC_CARD_WLAN_COMMAND_COMPLETED);
2707 pr_debug("%s:end\n", __func__);
2708 return 0;
2709
2710fail_setup:
2711 gelic_wl_free(port_wl(netdev_port(netdev)));
2712
2713 return ret;
2714}
2715
2716int gelic_wl_driver_remove(struct gelic_card *card)
2717{
2718 struct gelic_wl_info *wl;
2719 struct net_device *netdev;
2720
2721 pr_debug("%s:start\n", __func__);
2722
2723 if (ps3_compare_firmware_version(1, 6, 0) < 0)
2724 return 0;
2725 if (!card->vlan[GELIC_PORT_WIRELESS].tx)
2726 return 0;
2727
2728 netdev = card->netdev[GELIC_PORT_WIRELESS];
2729 wl = port_wl(netdev_priv(netdev));
2730
2731 /* if the interface was not up, but associated */
2732 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
2733 gelic_wl_disconnect(netdev);
2734
2735 complete(&wl->cmd_done_intr);
2736
2737 /* cancel all work queue */
2738 cancel_delayed_work(&wl->assoc_work);
2739 cancel_delayed_work(&wl->event_work);
2740 flush_workqueue(wl->eurus_cmd_queue);
2741 flush_workqueue(wl->event_queue);
2742
2743 unregister_netdev(netdev);
2744
2745 /* disable wireless interrupt */
2746 pr_debug("%s: disable intr\n", __func__);
2747 card->irq_mask &= ~(GELIC_CARD_WLAN_EVENT_RECEIVED |
2748 GELIC_CARD_WLAN_COMMAND_COMPLETED);
2749 /* free bss list, netdev*/
2750 gelic_wl_free(wl);
2751 pr_debug("%s:end\n", __func__);
2752 return 0;
2753}
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
new file mode 100644
index 000000000000..103697166720
--- /dev/null
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -0,0 +1,329 @@
1/*
2 * PS3 gelic network driver.
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation version 2.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef _GELIC_WIRELESS_H
21#define _GELIC_WIRELESS_H
22
23#include <linux/wireless.h>
24#include <net/iw_handler.h>
25
26
27/* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */
28enum gelic_lv1_wl_event {
29 GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */
30 GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */
31 GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */
32 GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */
33 GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */
34 GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */
35 GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */
36};
37
38/* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */
39enum gelic_eurus_command {
40 GELIC_EURUS_CMD_ASSOC = 1, /* association start */
41 GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */
42 GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */
43 GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */
44 GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */
45 GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */
46 GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */
47 GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */
48 GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */
49 GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */
50 GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */
51 GELIC_EURUS_CMD_MAX_INDEX
52};
53
54/* for GELIC_EURUS_CMD_COMMON_CFG */
55enum gelic_eurus_bss_type {
56 GELIC_EURUS_BSS_INFRA = 0,
57 GELIC_EURUS_BSS_ADHOC = 1, /* not supported */
58};
59
60enum gelic_eurus_auth_method {
61 GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */
62 GELIC_EURUS_AUTH_SHARED = 1, /* not supported */
63};
64
65enum gelic_eurus_opmode {
66 GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */
67 GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */
68 GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */
69};
70
71struct gelic_eurus_common_cfg {
72 /* all fields are big endian */
73 u16 scan_index;
74 u16 bss_type; /* infra or adhoc */
75 u16 auth_method; /* shared key or open */
76 u16 op_mode; /* B/G */
77} __attribute__((packed));
78
79
80/* for GELIC_EURUS_CMD_WEP_CFG */
81enum gelic_eurus_wep_security {
82 GELIC_EURUS_WEP_SEC_NONE = 0,
83 GELIC_EURUS_WEP_SEC_40BIT = 1,
84 GELIC_EURUS_WEP_SEC_104BIT = 2,
85};
86
87struct gelic_eurus_wep_cfg {
88 /* all fields are big endian */
89 u16 security;
90 u8 key[4][16];
91} __attribute__((packed));
92
93/* for GELIC_EURUS_CMD_WPA_CFG */
94enum gelic_eurus_wpa_security {
95 GELIC_EURUS_WPA_SEC_NONE = 0x0000,
96 /* group=TKIP, pairwise=TKIP */
97 GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001,
98 /* group=AES, pairwise=AES */
99 GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002,
100 /* group=TKIP, pairwise=TKIP */
101 GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004,
102 /* group=AES, pairwise=AES */
103 GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008,
104 /* group=TKIP, pairwise=AES */
105 GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010,
106 /* group=TKIP, pairwise=AES */
107 GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020,
108};
109
110enum gelic_eurus_wpa_psk_type {
111 GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */
112 GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */
113};
114
115#define GELIC_WL_EURUS_PSK_MAX_LEN 64
116#define WPA_PSK_LEN 32 /* WPA spec says 256bit */
117
118struct gelic_eurus_wpa_cfg {
119 /* all fields are big endian */
120 u16 security;
121 u16 psk_type; /* psk key encoding type */
122 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
123} __attribute__((packed));
124
125/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
126enum gelic_eurus_scan_capability {
127 GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000,
128 GELIC_EURUS_SCAN_CAP_INFRA = 0x0001,
129 GELIC_EURUS_SCAN_CAP_MASK = 0x0001,
130};
131
132enum gelic_eurus_scan_sec_type {
133 GELIC_EURUS_SCAN_SEC_NONE = 0x0000,
134 GELIC_EURUS_SCAN_SEC_WEP = 0x0100,
135 GELIC_EURUS_SCAN_SEC_WPA = 0x0200,
136 GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400,
137 GELIC_EURUS_SCAN_SEC_MASK = 0x0f00,
138};
139
140enum gelic_eurus_scan_sec_wep_type {
141 GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000,
142 GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001,
143 GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002,
144 GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003,
145};
146
147enum gelic_eurus_scan_sec_wpa_type {
148 GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000,
149 GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001,
150 GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002,
151 GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003,
152};
153
154/*
155 * hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN
156 */
157struct gelic_eurus_scan_info {
158 /* all fields are big endian */
159 __be16 size;
160 __be16 rssi; /* percentage */
161 __be16 channel; /* channel number */
162 __be16 beacon_period; /* FIXME: in msec unit */
163 __be16 capability;
164 __be16 security;
165 u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */
166 u8 essid[32]; /* IW_ESSID_MAX_SIZE */
167 u8 rate[16]; /* first MAX_RATES_LENGTH(12) are valid */
168 u8 ext_rate[16]; /* first MAX_RATES_EX_LENGTH(16) are valid */
169 __be32 reserved1;
170 __be32 reserved2;
171 __be32 reserved3;
172 __be32 reserved4;
173 u8 elements[0]; /* ie */
174} __attribute__ ((packed));
175
176/* the hypervisor returns bbs up to 16 */
177#define GELIC_EURUS_MAX_SCAN (16)
178struct gelic_wl_scan_info {
179 struct list_head list;
180 struct gelic_eurus_scan_info *hwinfo;
181
182 int valid; /* set 1 if this entry was in latest scanned list
183 * from Eurus */
184 unsigned int eurus_index; /* index in the Eurus list */
185 unsigned long last_scanned; /* acquired time */
186
187 unsigned int rate_len;
188 unsigned int rate_ext_len;
189 unsigned int essid_len;
190};
191
192/* for GELIC_EURUS_CMD_GET_RSSI */
193struct gelic_eurus_rssi_info {
194 /* big endian */
195 __be16 rssi;
196} __attribute__ ((packed));
197
198
199/* for 'stat' member of gelic_wl_info */
200enum gelic_wl_info_status_bit {
201 GELIC_WL_STAT_CONFIGURED,
202 GELIC_WL_STAT_CH_INFO, /* ch info aquired */
203 GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */
204 GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */
205 GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */
206 GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */
207};
208
209/* for 'scan_stat' member of gelic_wl_info */
210enum gelic_wl_scan_state {
211 /* just initialized or get last scan result failed */
212 GELIC_WL_SCAN_STAT_INIT,
213 /* scan request issued, accepted or chip is scanning */
214 GELIC_WL_SCAN_STAT_SCANNING,
215 /* scan results retrieved */
216 GELIC_WL_SCAN_STAT_GOT_LIST,
217};
218
219/* for 'cipher_method' */
220enum gelic_wl_cipher_method {
221 GELIC_WL_CIPHER_NONE,
222 GELIC_WL_CIPHER_WEP,
223 GELIC_WL_CIPHER_TKIP,
224 GELIC_WL_CIPHER_AES,
225};
226
227/* for 'wpa_level' */
228enum gelic_wl_wpa_level {
229 GELIC_WL_WPA_LEVEL_NONE,
230 GELIC_WL_WPA_LEVEL_WPA,
231 GELIC_WL_WPA_LEVEL_WPA2,
232};
233
234/* for 'assoc_stat' */
235enum gelic_wl_assoc_state {
236 GELIC_WL_ASSOC_STAT_DISCONN,
237 GELIC_WL_ASSOC_STAT_ASSOCIATING,
238 GELIC_WL_ASSOC_STAT_ASSOCIATED,
239};
240/* part of private data alloc_etherdev() allocated */
241#define GELIC_WEP_KEYS 4
242struct gelic_wl_info {
243 /* bss list */
244 struct semaphore scan_lock;
245 struct list_head network_list;
246 struct list_head network_free_list;
247 struct gelic_wl_scan_info *networks;
248
249 unsigned long scan_age; /* last scanned time */
250 enum gelic_wl_scan_state scan_stat;
251 struct completion scan_done;
252
253 /* eurus command queue */
254 struct workqueue_struct *eurus_cmd_queue;
255 struct completion cmd_done_intr;
256
257 /* eurus event handling */
258 struct workqueue_struct *event_queue;
259 struct delayed_work event_work;
260
261 /* wl status bits */
262 unsigned long stat;
263 enum gelic_eurus_auth_method auth_method; /* open/shared */
264 enum gelic_wl_cipher_method group_cipher_method;
265 enum gelic_wl_cipher_method pairwise_cipher_method;
266 enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */
267
268 /* association handling */
269 struct semaphore assoc_stat_lock;
270 struct delayed_work assoc_work;
271 enum gelic_wl_assoc_state assoc_stat;
272 struct completion assoc_done;
273
274 spinlock_t lock;
275 u16 ch_info; /* available channels. bit0 = ch1 */
276 /* WEP keys */
277 u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX];
278 unsigned long key_enabled;
279 unsigned int key_len[GELIC_WEP_KEYS];
280 unsigned int current_key;
281 /* WWPA PSK */
282 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN];
283 enum gelic_eurus_wpa_psk_type psk_type;
284 unsigned int psk_len;
285
286 u8 essid[IW_ESSID_MAX_SIZE];
287 u8 bssid[ETH_ALEN]; /* userland requested */
288 u8 active_bssid[ETH_ALEN]; /* associated bssid */
289 unsigned int essid_len;
290
291 /* buffer for hypervisor IO */
292 void *buf;
293
294 struct iw_public_data wireless_data;
295 struct iw_statistics iwstat;
296};
297
298#define GELIC_WL_BSS_MAX_ENT 32
299#define GELIC_WL_ASSOC_RETRY 50
300static inline struct gelic_port *wl_port(struct gelic_wl_info *wl)
301{
302 return container_of((void *)wl, struct gelic_port, priv);
303}
304static inline struct gelic_wl_info *port_wl(struct gelic_port *port)
305{
306 return port_priv(port);
307}
308
309struct gelic_eurus_cmd {
310 struct work_struct work;
311 struct gelic_wl_info *wl;
312 unsigned int cmd; /* command code */
313 u64 tag;
314 u64 size;
315 void *buffer;
316 unsigned int buf_size;
317 struct completion done;
318 int status;
319 u64 cmd_status;
320};
321
322/* private ioctls to pass PSK */
323#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
324#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
325
326extern int gelic_wl_driver_probe(struct gelic_card *card);
327extern int gelic_wl_driver_remove(struct gelic_card *card);
328extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
329#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 2334f4ebf907..19184e486ae9 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -61,7 +61,6 @@
61 61
62/* Time in jiffies before concluding the transmitter is hung. */ 62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (6000 * HZ / 1000) 63#define TX_TIMEOUT (6000 * HZ / 1000)
64#define TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
65 64
66/* RDC MAC I/O Size */ 65/* RDC MAC I/O Size */
67#define R6040_IO_SIZE 256 66#define R6040_IO_SIZE 256
@@ -174,8 +173,6 @@ struct r6040_private {
174 struct net_device *dev; 173 struct net_device *dev;
175 struct mii_if_info mii_if; 174 struct mii_if_info mii_if;
176 struct napi_struct napi; 175 struct napi_struct napi;
177 struct net_device_stats stats;
178 u16 napi_rx_running;
179 void __iomem *base; 176 void __iomem *base;
180}; 177};
181 178
@@ -235,17 +232,53 @@ static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
235 phy_write(ioaddr, lp->phy_addr, reg, val); 232 phy_write(ioaddr, lp->phy_addr, reg, val);
236} 233}
237 234
238static void r6040_tx_timeout(struct net_device *dev) 235static void r6040_free_txbufs(struct net_device *dev)
239{ 236{
240 struct r6040_private *priv = netdev_priv(dev); 237 struct r6040_private *lp = netdev_priv(dev);
238 int i;
241 239
242 disable_irq(dev->irq); 240 for (i = 0; i < TX_DCNT; i++) {
243 napi_disable(&priv->napi); 241 if (lp->tx_insert_ptr->skb_ptr) {
244 spin_lock(&priv->lock); 242 pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
245 dev->stats.tx_errors++; 243 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
246 spin_unlock(&priv->lock); 244 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
245 lp->rx_insert_ptr->skb_ptr = NULL;
246 }
247 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
248 }
249}
247 250
248 netif_stop_queue(dev); 251static void r6040_free_rxbufs(struct net_device *dev)
252{
253 struct r6040_private *lp = netdev_priv(dev);
254 int i;
255
256 for (i = 0; i < RX_DCNT; i++) {
257 if (lp->rx_insert_ptr->skb_ptr) {
258 pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
259 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
260 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
261 lp->rx_insert_ptr->skb_ptr = NULL;
262 }
263 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
264 }
265}
266
267static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
268 dma_addr_t desc_dma, int size)
269{
270 struct r6040_descriptor *desc = desc_ring;
271 dma_addr_t mapping = desc_dma;
272
273 while (size-- > 0) {
274 mapping += sizeof(sizeof(*desc));
275 desc->ndesc = cpu_to_le32(mapping);
276 desc->vndescp = desc + 1;
277 desc++;
278 }
279 desc--;
280 desc->ndesc = cpu_to_le32(desc_dma);
281 desc->vndescp = desc_ring;
249} 282}
250 283
251/* Allocate skb buffer for rx descriptor */ 284/* Allocate skb buffer for rx descriptor */
@@ -256,7 +289,7 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
256 289
257 descptr = lp->rx_insert_ptr; 290 descptr = lp->rx_insert_ptr;
258 while (lp->rx_free_desc < RX_DCNT) { 291 while (lp->rx_free_desc < RX_DCNT) {
259 descptr->skb_ptr = dev_alloc_skb(MAX_BUF_SIZE); 292 descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
260 293
261 if (!descptr->skb_ptr) 294 if (!descptr->skb_ptr)
262 break; 295 break;
@@ -272,6 +305,63 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
272 lp->rx_insert_ptr = descptr; 305 lp->rx_insert_ptr = descptr;
273} 306}
274 307
308static void r6040_alloc_txbufs(struct net_device *dev)
309{
310 struct r6040_private *lp = netdev_priv(dev);
311 void __iomem *ioaddr = lp->base;
312
313 lp->tx_free_desc = TX_DCNT;
314
315 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
316 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
317
318 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
319 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
320}
321
322static void r6040_alloc_rxbufs(struct net_device *dev)
323{
324 struct r6040_private *lp = netdev_priv(dev);
325 void __iomem *ioaddr = lp->base;
326
327 lp->rx_free_desc = 0;
328
329 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
330 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
331
332 rx_buf_alloc(lp, dev);
333
334 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
335 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
336}
337
338static void r6040_tx_timeout(struct net_device *dev)
339{
340 struct r6040_private *priv = netdev_priv(dev);
341 void __iomem *ioaddr = priv->base;
342
343 printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status "
344 "%4.4x\n",
345 dev->name, ioread16(ioaddr + MIER),
346 mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
347
348 disable_irq(dev->irq);
349 napi_disable(&priv->napi);
350 spin_lock(&priv->lock);
351 /* Clear all descriptors */
352 r6040_free_txbufs(dev);
353 r6040_free_rxbufs(dev);
354 r6040_alloc_txbufs(dev);
355 r6040_alloc_rxbufs(dev);
356
357 /* Reset MAC */
358 iowrite16(MAC_RST, ioaddr + MCR1);
359 spin_unlock(&priv->lock);
360 enable_irq(dev->irq);
361
362 dev->stats.tx_errors++;
363 netif_wake_queue(dev);
364}
275 365
276static struct net_device_stats *r6040_get_stats(struct net_device *dev) 366static struct net_device_stats *r6040_get_stats(struct net_device *dev)
277{ 367{
@@ -280,11 +370,11 @@ static struct net_device_stats *r6040_get_stats(struct net_device *dev)
280 unsigned long flags; 370 unsigned long flags;
281 371
282 spin_lock_irqsave(&priv->lock, flags); 372 spin_lock_irqsave(&priv->lock, flags);
283 priv->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1); 373 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
284 priv->stats.multicast += ioread8(ioaddr + ME_CNT0); 374 dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
285 spin_unlock_irqrestore(&priv->lock, flags); 375 spin_unlock_irqrestore(&priv->lock, flags);
286 376
287 return &priv->stats; 377 return &dev->stats;
288} 378}
289 379
290/* Stop RDC MAC and Free the allocated resource */ 380/* Stop RDC MAC and Free the allocated resource */
@@ -293,7 +383,6 @@ static void r6040_down(struct net_device *dev)
293 struct r6040_private *lp = netdev_priv(dev); 383 struct r6040_private *lp = netdev_priv(dev);
294 void __iomem *ioaddr = lp->base; 384 void __iomem *ioaddr = lp->base;
295 struct pci_dev *pdev = lp->pdev; 385 struct pci_dev *pdev = lp->pdev;
296 int i;
297 int limit = 2048; 386 int limit = 2048;
298 u16 *adrp; 387 u16 *adrp;
299 u16 cmd; 388 u16 cmd;
@@ -313,27 +402,12 @@ static void r6040_down(struct net_device *dev)
313 iowrite16(adrp[1], ioaddr + MID_0M); 402 iowrite16(adrp[1], ioaddr + MID_0M);
314 iowrite16(adrp[2], ioaddr + MID_0H); 403 iowrite16(adrp[2], ioaddr + MID_0H);
315 free_irq(dev->irq, dev); 404 free_irq(dev->irq, dev);
405
316 /* Free RX buffer */ 406 /* Free RX buffer */
317 for (i = 0; i < RX_DCNT; i++) { 407 r6040_free_rxbufs(dev);
318 if (lp->rx_insert_ptr->skb_ptr) {
319 pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
320 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
321 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
322 lp->rx_insert_ptr->skb_ptr = NULL;
323 }
324 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
325 }
326 408
327 /* Free TX buffer */ 409 /* Free TX buffer */
328 for (i = 0; i < TX_DCNT; i++) { 410 r6040_free_txbufs(dev);
329 if (lp->tx_insert_ptr->skb_ptr) {
330 pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
331 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
332 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
333 lp->rx_insert_ptr->skb_ptr = NULL;
334 }
335 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
336 }
337 411
338 /* Free Descriptor memory */ 412 /* Free Descriptor memory */
339 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); 413 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
@@ -432,19 +506,24 @@ static int r6040_rx(struct net_device *dev, int limit)
432 506
433 /* Check for errors */ 507 /* Check for errors */
434 err = ioread16(ioaddr + MLSR); 508 err = ioread16(ioaddr + MLSR);
435 if (err & 0x0400) priv->stats.rx_errors++; 509 if (err & 0x0400)
510 dev->stats.rx_errors++;
436 /* RX FIFO over-run */ 511 /* RX FIFO over-run */
437 if (err & 0x8000) priv->stats.rx_fifo_errors++; 512 if (err & 0x8000)
513 dev->stats.rx_fifo_errors++;
438 /* RX descriptor unavailable */ 514 /* RX descriptor unavailable */
439 if (err & 0x0080) priv->stats.rx_frame_errors++; 515 if (err & 0x0080)
516 dev->stats.rx_frame_errors++;
440 /* Received packet with length over buffer lenght */ 517 /* Received packet with length over buffer lenght */
441 if (err & 0x0020) priv->stats.rx_over_errors++; 518 if (err & 0x0020)
519 dev->stats.rx_over_errors++;
442 /* Received packet with too long or short */ 520 /* Received packet with too long or short */
443 if (err & (0x0010|0x0008)) priv->stats.rx_length_errors++; 521 if (err & (0x0010 | 0x0008))
522 dev->stats.rx_length_errors++;
444 /* Received packet with CRC errors */ 523 /* Received packet with CRC errors */
445 if (err & 0x0004) { 524 if (err & 0x0004) {
446 spin_lock(&priv->lock); 525 spin_lock(&priv->lock);
447 priv->stats.rx_crc_errors++; 526 dev->stats.rx_crc_errors++;
448 spin_unlock(&priv->lock); 527 spin_unlock(&priv->lock);
449 } 528 }
450 529
@@ -469,8 +548,8 @@ static int r6040_rx(struct net_device *dev, int limit)
469 /* Send to upper layer */ 548 /* Send to upper layer */
470 netif_receive_skb(skb_ptr); 549 netif_receive_skb(skb_ptr);
471 dev->last_rx = jiffies; 550 dev->last_rx = jiffies;
472 priv->dev->stats.rx_packets++; 551 dev->stats.rx_packets++;
473 priv->dev->stats.rx_bytes += descptr->len; 552 dev->stats.rx_bytes += descptr->len;
474 /* To next descriptor */ 553 /* To next descriptor */
475 descptr = descptr->vndescp; 554 descptr = descptr->vndescp;
476 priv->rx_free_desc--; 555 priv->rx_free_desc--;
@@ -498,11 +577,13 @@ static void r6040_tx(struct net_device *dev)
498 /* Check for errors */ 577 /* Check for errors */
499 err = ioread16(ioaddr + MLSR); 578 err = ioread16(ioaddr + MLSR);
500 579
501 if (err & 0x0200) priv->stats.rx_fifo_errors++; 580 if (err & 0x0200)
502 if (err & (0x2000 | 0x4000)) priv->stats.tx_carrier_errors++; 581 dev->stats.rx_fifo_errors++;
582 if (err & (0x2000 | 0x4000))
583 dev->stats.tx_carrier_errors++;
503 584
504 if (descptr->status & 0x8000) 585 if (descptr->status & 0x8000)
505 break; /* Not complte */ 586 break; /* Not complete */
506 skb_ptr = descptr->skb_ptr; 587 skb_ptr = descptr->skb_ptr;
507 pci_unmap_single(priv->pdev, descptr->buf, 588 pci_unmap_single(priv->pdev, descptr->buf,
508 skb_ptr->len, PCI_DMA_TODEVICE); 589 skb_ptr->len, PCI_DMA_TODEVICE);
@@ -545,7 +626,6 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
545 struct r6040_private *lp = netdev_priv(dev); 626 struct r6040_private *lp = netdev_priv(dev);
546 void __iomem *ioaddr = lp->base; 627 void __iomem *ioaddr = lp->base;
547 u16 status; 628 u16 status;
548 int handled = 1;
549 629
550 /* Mask off RDC MAC interrupt */ 630 /* Mask off RDC MAC interrupt */
551 iowrite16(MSK_INT, ioaddr + MIER); 631 iowrite16(MSK_INT, ioaddr + MIER);
@@ -565,7 +645,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
565 if (status & 0x10) 645 if (status & 0x10)
566 r6040_tx(dev); 646 r6040_tx(dev);
567 647
568 return IRQ_RETVAL(handled); 648 return IRQ_HANDLED;
569} 649}
570 650
571#ifdef CONFIG_NET_POLL_CONTROLLER 651#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -577,53 +657,15 @@ static void r6040_poll_controller(struct net_device *dev)
577} 657}
578#endif 658#endif
579 659
580static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
581 dma_addr_t desc_dma, int size)
582{
583 struct r6040_descriptor *desc = desc_ring;
584 dma_addr_t mapping = desc_dma;
585
586 while (size-- > 0) {
587 mapping += sizeof(sizeof(*desc));
588 desc->ndesc = cpu_to_le32(mapping);
589 desc->vndescp = desc + 1;
590 desc++;
591 }
592 desc--;
593 desc->ndesc = cpu_to_le32(desc_dma);
594 desc->vndescp = desc_ring;
595}
596
597/* Init RDC MAC */ 660/* Init RDC MAC */
598static void r6040_up(struct net_device *dev) 661static void r6040_up(struct net_device *dev)
599{ 662{
600 struct r6040_private *lp = netdev_priv(dev); 663 struct r6040_private *lp = netdev_priv(dev);
601 void __iomem *ioaddr = lp->base; 664 void __iomem *ioaddr = lp->base;
602 665
603 /* Initialize */ 666 /* Initialise and alloc RX/TX buffers */
604 lp->tx_free_desc = TX_DCNT; 667 r6040_alloc_txbufs(dev);
605 lp->rx_free_desc = 0; 668 r6040_alloc_rxbufs(dev);
606 /* Init descriptor */
607 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
608 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
609 /* Init TX descriptor */
610 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
611
612 /* Init RX descriptor */
613 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
614
615 /* Allocate buffer for RX descriptor */
616 rx_buf_alloc(lp, dev);
617
618 /*
619 * TX and RX descriptor start registers.
620 * Lower 16-bits to MxD_SA0. Higher 16-bits to MxD_SA1.
621 */
622 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
623 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
624
625 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
626 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
627 669
628 /* Buffer Size Register */ 670 /* Buffer Size Register */
629 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); 671 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
@@ -689,8 +731,7 @@ static void r6040_timer(unsigned long data)
689 } 731 }
690 732
691 /* Timer active again */ 733 /* Timer active again */
692 lp->timer.expires = TIMER_WUT; 734 mod_timer(&lp->timer, jiffies + round_jiffies(HZ));
693 add_timer(&lp->timer);
694} 735}
695 736
696/* Read/set MAC address routines */ 737/* Read/set MAC address routines */
@@ -746,14 +787,10 @@ static int r6040_open(struct net_device *dev)
746 napi_enable(&lp->napi); 787 napi_enable(&lp->napi);
747 netif_start_queue(dev); 788 netif_start_queue(dev);
748 789
749 if (lp->switch_sig != ICPLUS_PHY_ID) { 790 /* set and active a timer process */
750 /* set and active a timer process */ 791 setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
751 init_timer(&lp->timer); 792 if (lp->switch_sig != ICPLUS_PHY_ID)
752 lp->timer.expires = TIMER_WUT; 793 mod_timer(&lp->timer, jiffies + HZ);
753 lp->timer.data = (unsigned long)dev;
754 lp->timer.function = &r6040_timer;
755 add_timer(&lp->timer);
756 }
757 return 0; 794 return 0;
758} 795}
759 796
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 2e9e88be7b33..202fdf356621 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1630,7 +1630,8 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
1630 SIS_PCI_COMMIT(); 1630 SIS_PCI_COMMIT();
1631} 1631}
1632 1632
1633static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) 1633static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1634 struct net_device *dev)
1634{ 1635{
1635 u8 from; 1636 u8 from;
1636 1637