aboutsummaryrefslogblamecommitdiffstats
path: root/drivers/net/pppol2tp.c
blob: 921d4ef6d14b66d1b66c0c4b606a571cbec82020 (plain) (tree)
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447




























































































                                                                              
                              















































































































































































































































































































































































































                                                                                                   
                   




                                               


                                                    
                           
                                      





                                                                                   
                        





                                                



                                                

                                                              




                                                     



                                       
                          












































































                                                                                  

















                                                                                 







                                                  





                                                                       
 
                                                   


































                                                                                 























                                                                                    
                                   





























































































































                                                                                      
                         

































































                                                                          
                                                                     


                                                                     
                                                                         
















                                                              
                       




                                           
                                              
                                            
                                               



































                                                                       
                          
                         



























                                                                          

                                        

                              


                                      

                               
                                                                      


                               


                                        




                                                                                   
                                                                    
                                                   
                                                      











                                                                       
                                                 











                                                              



                                                                               
 
                                                     

                                      

                                               

                                   



                                           
                                              
                                            
                                               




                                           

                 
      


                                   

















































































































































































































































                                                                                    

                      
                                 

                                             
                                                          

                                                                               










                                                                             













































































                                                                                        
                                                                



                            
                                                                        

















































































































































































































































































































































































































































































































































































































































                                                                                           
                                                            





















                                                                          
                                                             

























































































































































































































































































































































































                                                                                                           
                                                                            























                                                        
                                                         











                                                           
/*****************************************************************************
 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
 *
 * PPPoX    --- Generic PPP encapsulation socket family
 * PPPoL2TP --- PPP over L2TP (RFC 2661)
 *
 * Version:	1.0.0
 *
 * Authors:	Martijn van Oosterhout <kleptog@svana.org>
 *		James Chapman (jchapman@katalix.com)
 * Contributors:
 *		Michal Ostrowski <mostrows@speakeasy.net>
 *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
 *		David S. Miller (davem@redhat.com)
 *
 * License:
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 */

/* This driver handles only L2TP data frames; control frames are handled by a
 * userspace application.
 *
 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
 * attaches it to a bound UDP socket with local tunnel_id / session_id and
 * peer tunnel_id / session_id set. Data can then be sent or received using
 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
 * can be read or modified using ioctl() or [gs]etsockopt() calls.
 *
 * When a PPPoL2TP socket is connected with local and peer session_id values
 * zero, the socket is treated as a special tunnel management socket.
 *
 * Here's example userspace code to create a socket for sending/receiving data
 * over an L2TP session:-
 *
 *	struct sockaddr_pppol2tp sax;
 *	int fd;
 *	int session_fd;
 *
 *	fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
 *
 *	sax.sa_family = AF_PPPOX;
 *	sax.sa_protocol = PX_PROTO_OL2TP;
 *	sax.pppol2tp.fd = tunnel_fd;	// bound UDP socket
 *	sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
 *	sax.pppol2tp.addr.sin_port = addr->sin_port;
 *	sax.pppol2tp.addr.sin_family = AF_INET;
 *	sax.pppol2tp.s_tunnel  = tunnel_id;
 *	sax.pppol2tp.s_session = session_id;
 *	sax.pppol2tp.d_tunnel  = peer_tunnel_id;
 *	sax.pppol2tp.d_session = peer_session_id;
 *
 *	session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
 *
 * A pppd plugin that allows PPP traffic to be carried over L2TP using
 * this driver is available from the OpenL2TP project at
 * http://openl2tp.sourceforge.net.
 */

#include <linux/module.h>
#include <linux/version.h>
#include <linux/string.h>
#include <linux/list.h>
#include <asm/uaccess.h>

#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/jiffies.h>

#include <linux/netdevice.h>
#include <linux/net.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/if_pppox.h>
#include <linux/if_pppol2tp.h>
#include <net/sock.h>
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/file.h>
#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/proc_fs.h>
#include <net/net_namespace.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/xfrm.h>

#include <asm/byteorder.h>
#include <asm/atomic.h>


#define PPPOL2TP_DRV_VERSION	"V1.0"

/* L2TP header constants */
#define L2TP_HDRFLAG_T	   0x8000
#define L2TP_HDRFLAG_L	   0x4000
#define L2TP_HDRFLAG_S	   0x0800
#define L2TP_HDRFLAG_O	   0x0200
#define L2TP_HDRFLAG_P	   0x0100

#define L2TP_HDR_VER_MASK  0x000F
#define L2TP_HDR_VER	   0x0002

/* Space for UDP, L2TP and PPP headers */
#define PPPOL2TP_HEADER_OVERHEAD	40

/* Just some random numbers */
#define L2TP_TUNNEL_MAGIC	0x42114DDA
#define L2TP_SESSION_MAGIC	0x0C04EB7D

#define PPPOL2TP_HASH_BITS	4
#define PPPOL2TP_HASH_SIZE	(1 << PPPOL2TP_HASH_BITS)

/* Default trace flags */
#define PPPOL2TP_DEFAULT_DEBUG_FLAGS	0

#define PRINTK(_mask, _type, _lvl, _fmt, args...)			\
	do {								\
		if ((_mask) & (_type))					\
			printk(_lvl "PPPOL2TP: " _fmt, ##args);		\
	} while(0)

/* Number of bytes to build transmit L2TP headers.
 * Unfortunately the size is different depending on whether sequence numbers
 * are enabled.
 */
#define PPPOL2TP_L2TP_HDR_SIZE_SEQ		10
#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ		6

struct pppol2tp_tunnel;

/* Describes a session. It is the sk_user_data field in the PPPoL2TP
 * socket. Contains information to determine incoming packets and transmit
 * outgoing ones.
 */
struct pppol2tp_session
{
	int			magic;		/* should be
						 * L2TP_SESSION_MAGIC */
	int			owner;		/* pid that opened the socket */

	struct sock		*sock;		/* Pointer to the session
						 * PPPoX socket */
	struct sock		*tunnel_sock;	/* Pointer to the tunnel UDP
						 * socket */

	struct pppol2tp_addr	tunnel_addr;	/* Description of tunnel */

	struct pppol2tp_tunnel	*tunnel;	/* back pointer to tunnel
						 * context */

	char			name[20];	/* "sess xxxxx/yyyyy", where
						 * x=tunnel_id, y=session_id */
	int			mtu;
	int			mru;
	int			flags;		/* accessed by PPPIOCGFLAGS.
						 * Unused. */
	unsigned		recv_seq:1;	/* expect receive packets with
						 * sequence numbers? */
	unsigned		send_seq:1;	/* send packets with sequence
						 * numbers? */
	unsigned		lns_mode:1;	/* behave as LNS? LAC enables
						 * sequence numbers under
						 * control of LNS. */
	int			debug;		/* bitmask of debug message
						 * categories */
	int			reorder_timeout; /* configured reorder timeout
						  * (in jiffies) */
	u16			nr;		/* session NR state (receive) */
	u16			ns;		/* session NR state (send) */
	struct sk_buff_head	reorder_q;	/* receive reorder queue */
	struct pppol2tp_ioc_stats stats;
	struct hlist_node	hlist;		/* Hash list node */
};

/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
 * all the associated sessions so incoming packets can be sorted out
 */
struct pppol2tp_tunnel
{
	int			magic;		/* Should be L2TP_TUNNEL_MAGIC */
	rwlock_t		hlist_lock;	/* protect session_hlist */
	struct hlist_head	session_hlist[PPPOL2TP_HASH_SIZE];
						/* hashed list of sessions,
						 * hashed by id */
	int			debug;		/* bitmask of debug message
						 * categories */
	char			name[12];	/* "tunl xxxxx" */
	struct pppol2tp_ioc_stats stats;

	void (*old_sk_destruct)(struct sock *);

	struct sock		*sock;		/* Parent socket */
	struct list_head	list;		/* Keep a list of all open
						 * prepared sockets */

	atomic_t		ref_count;
};

/* Private data stored for received packets in the skb.
 */
struct pppol2tp_skb_cb {
	u16			ns;
	u16			nr;
	u16			has_seq;
	u16			length;
	unsigned long		expires;
};

#define PPPOL2TP_SKB_CB(skb)	((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])

static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);

static atomic_t pppol2tp_tunnel_count;
static atomic_t pppol2tp_session_count;
static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
static struct proto_ops pppol2tp_ops;
static LIST_HEAD(pppol2tp_tunnel_list);
static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock);

/* Helpers to obtain tunnel/session contexts from sockets.
 */
static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
{
	struct pppol2tp_session *session;

	if (sk == NULL)
		return NULL;

	session = (struct pppol2tp_session *)(sk->sk_user_data);
	if (session == NULL)
		return NULL;

	BUG_ON(session->magic != L2TP_SESSION_MAGIC);

	return session;
}

static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
{
	struct pppol2tp_tunnel *tunnel;

	if (sk == NULL)
		return NULL;

	tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
	if (tunnel == NULL)
		return NULL;

	BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);

	return tunnel;
}

/* Tunnel reference counts. Incremented per session that is added to
 * the tunnel.
 */
static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
{
	atomic_inc(&tunnel->ref_count);
}

static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
{
	if (atomic_dec_and_test(&tunnel->ref_count))
		pppol2tp_tunnel_free(tunnel);
}

/* Session hash list.
 * The session_id SHOULD be random according to RFC2661, but several
 * L2TP implementations (Cisco and Microsoft) use incrementing
 * session_ids.  So we do a real hash on the session_id, rather than a
 * simple bitmask.
 */
static inline struct hlist_head *
pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
{
	unsigned long hash_val = (unsigned long) session_id;
	return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
}

/* Lookup a session by id
 */
static struct pppol2tp_session *
pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
{
	struct hlist_head *session_list =
		pppol2tp_session_id_hash(tunnel, session_id);
	struct pppol2tp_session *session;
	struct hlist_node *walk;

	read_lock(&tunnel->hlist_lock);
	hlist_for_each_entry(session, walk, session_list, hlist) {
		if (session->tunnel_addr.s_session == session_id) {
			read_unlock(&tunnel->hlist_lock);
			return session;
		}
	}
	read_unlock(&tunnel->hlist_lock);

	return NULL;
}

/* Lookup a tunnel by id
 */
static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
{
	struct pppol2tp_tunnel *tunnel = NULL;

	read_lock(&pppol2tp_tunnel_list_lock);
	list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
		if (tunnel->stats.tunnel_id == tunnel_id) {
			read_unlock(&pppol2tp_tunnel_list_lock);
			return tunnel;
		}
	}
	read_unlock(&pppol2tp_tunnel_list_lock);

	return NULL;
}

/*****************************************************************************
 * Receive data handling
 *****************************************************************************/

/* Queue a skb in order. We come here only if the skb has an L2TP sequence
 * number.
 */
static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
{
	struct sk_buff *skbp;
	u16 ns = PPPOL2TP_SKB_CB(skb)->ns;

	spin_lock(&session->reorder_q.lock);
	skb_queue_walk(&session->reorder_q, skbp) {
		if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
			__skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
			       "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
			       session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
			       skb_queue_len(&session->reorder_q));
			session->stats.rx_oos_packets++;
			goto out;
		}
	}

	__skb_queue_tail(&session->reorder_q, skb);

out:
	spin_unlock(&session->reorder_q.lock);
}

/* Dequeue a single skb.
 */
static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
{
	struct pppol2tp_tunnel *tunnel = session->tunnel;
	int length = PPPOL2TP_SKB_CB(skb)->length;
	struct sock *session_sock = NULL;

	/* We're about to requeue the skb, so unlink it and return resources
	 * to its current owner (a socket receive buffer).
	 */
	skb_unlink(skb, &session->reorder_q);
	skb_orphan(skb);

	tunnel->stats.rx_packets++;
	tunnel->stats.rx_bytes += length;
	session->stats.rx_packets++;
	session->stats.rx_bytes += length;

	if (PPPOL2TP_SKB_CB(skb)->has_seq) {
		/* Bump our Nr */
		session->nr++;
		PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
		       "%s: updated nr to %hu\n", session->name, session->nr);
	}

	/* If the socket is bound, send it in to PPP's input queue. Otherwise
	 * queue it on the session socket.
	 */
	session_sock = session->sock;
	if (session_sock->sk_state & PPPOX_BOUND) {
		struct pppox_sock *po;
		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
		       "%s: recv %d byte data frame, passing to ppp\n",
		       session->name, length);

		/* We need to forget all info related to the L2TP packet
		 * gathered in the skb as we are going to reuse the same
		 * skb for the inner packet.
		 * Namely we need to:
		 * - reset xfrm (IPSec) information as it applies to
		 *   the outer L2TP packet and not to the inner one
		 * - release the dst to force a route lookup on the inner
		 *   IP packet since skb->dst currently points to the dst
		 *   of the UDP tunnel
		 * - reset netfilter information as it doesn't apply
		 *   to the inner packet either
		 */
		secpath_reset(skb);
		dst_release(skb->dst);
		skb->dst = NULL;
		nf_reset(skb);

		po = pppox_sk(session_sock);
		ppp_input(&po->chan, skb);
	} else {
		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
		       "%s: socket not bound\n", session->name);

		/* Not bound. Nothing we can do, so discard. */
		session->stats.rx_errors++;
		kfree_skb(skb);
	}

	sock_put(session->sock);
}

/* Dequeue skbs from the session's reorder_q, subject to packet order.
 * Skbs that have been in the queue for too long are simply discarded.
 */
static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
{
	struct sk_buff *skb;
	struct sk_buff *tmp;

	/* If the pkt at the head of the queue has the nr that we
	 * expect to send up next, dequeue it and any other
	 * in-sequence packets behind it.
	 */
	spin_lock(&session->reorder_q.lock);
	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
		if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
			session->stats.rx_seq_discards++;
			session->stats.rx_errors++;
			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
			       "%s: oos pkt %hu len %d discarded (too old), "
			       "waiting for %hu, reorder_q_len=%d\n",
			       session->name, PPPOL2TP_SKB_CB(skb)->ns,
			       PPPOL2TP_SKB_CB(skb)->length, session->nr,
			       skb_queue_len(&session->reorder_q));
			__skb_unlink(skb, &session->reorder_q);
			kfree_skb(skb);
			continue;
		}

		if (PPPOL2TP_SKB_CB(skb)->has_seq) {
			if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
				PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
				       "%s: holding oos pkt %hu len %d, "
				       "waiting for %hu, reorder_q_len=%d\n",
				       session->name, PPPOL2TP_SKB_CB(skb)->ns,
				       PPPOL2TP_SKB_CB(skb)->length, session->nr,
				       skb_queue_len(&session->reorder_q));
				goto out;
			}
		}
		spin_unlock(&session->reorder_q.lock);
		pppol2tp_recv_dequeue_skb(session, skb);
		spin_lock(&session->reorder_q.lock);
	}

out:
	spin_unlock(&session->reorder_q.lock);
}

/* Internal receive frame. Do the real work of receiving an L2TP data frame
 * here. The skb is not on a list when we get here.
 * Returns 0 if the packet was a data packet and was successfully passed on.
 * Returns 1 if the packet was not a good data packet and could not be
 * forwarded.  All such packets are passed up to userspace to deal with.
 */
static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
{
	struct pppol2tp_session *session = NULL;
	struct pppol2tp_tunnel *tunnel;
	unsigned char *ptr;
	u16 hdrflags;
	u16 tunnel_id, session_id;
	int length;
	int offset;

	tunnel = pppol2tp_sock_to_tunnel(sock);
	if (tunnel == NULL)
		goto error;

	/* UDP always verifies the packet length. */
	__skb_pull(skb, sizeof(struct udphdr));

	/* Short packet? */
	if (!pskb_may_pull(skb, 12)) {
		PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
		       "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
		goto error;
	}

	/* Point to L2TP header */
	ptr = skb->data;

	/* Get L2TP header flags */
	hdrflags = ntohs(*(__be16*)ptr);

	/* Trace packet contents, if enabled */
	if (tunnel->debug & PPPOL2TP_MSG_DATA) {
		length = min(16u, skb->len);
		if (!pskb_may_pull(skb, length))
			goto error;

		printk(KERN_DEBUG "%s: recv: ", tunnel->name);

		offset = 0;
		do {
			printk(" %02X", ptr[offset]);
		} while (++offset < length);

		printk("\n");
	}

	/* Get length of L2TP packet */
	length = skb->len;

	/* If type is control packet, it is handled by userspace. */
	if (hdrflags & L2TP_HDRFLAG_T) {
		PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
		       "%s: recv control packet, len=%d\n", tunnel->name, length);
		goto error;
	}

	/* Skip flags */
	ptr += 2;

	/* If length is present, skip it */
	if (hdrflags & L2TP_HDRFLAG_L)
		ptr += 2;

	/* Extract tunnel and session ID */
	tunnel_id = ntohs(*(__be16 *) ptr);
	ptr += 2;
	session_id = ntohs(*(__be16 *) ptr);
	ptr += 2;

	/* Find the session context */
	session = pppol2tp_session_find(tunnel, session_id);
	if (!session) {
		/* Not found? Pass to userspace to deal with */
		PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
		       "%s: no socket found (%hu/%hu). Passing up.\n",
		       tunnel->name, tunnel_id, session_id);
		goto error;
	}
	sock_hold(session->sock);

	/* The ref count on the socket was increased by the above call since
	 * we now hold a pointer to the session. Take care to do sock_put()
	 * when exiting this function from now on...
	 */

	/* Handle the optional sequence numbers.  If we are the LAC,
	 * enable/disable sequence numbers under the control of the LNS.  If
	 * no sequence numbers present but we were expecting them, discard
	 * frame.
	 */
	if (hdrflags & L2TP_HDRFLAG_S) {
		u16 ns, nr;
		ns = ntohs(*(__be16 *) ptr);
		ptr += 2;
		nr = ntohs(*(__be16 *) ptr);
		ptr += 2;

		/* Received a packet with sequence numbers. If we're the LNS,
		 * check if we sre sending sequence numbers and if not,
		 * configure it so.
		 */
		if ((!session->lns_mode) && (!session->send_seq)) {
			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
			       "%s: requested to enable seq numbers by LNS\n",
			       session->name);
			session->send_seq = -1;
		}

		/* Store L2TP info in the skb */
		PPPOL2TP_SKB_CB(skb)->ns = ns;
		PPPOL2TP_SKB_CB(skb)->nr = nr;
		PPPOL2TP_SKB_CB(skb)->has_seq = 1;

		PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
		       "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
		       session->name, ns, nr, session->nr);
	} else {
		/* No sequence numbers.
		 * If user has configured mandatory sequence numbers, discard.
		 */
		if (session->recv_seq) {
			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
			       "%s: recv data has no seq numbers when required. "
			       "Discarding\n", session->name);
			session->stats.rx_seq_discards++;
			goto discard;
		}

		/* If we're the LAC and we're sending sequence numbers, the
		 * LNS has requested that we no longer send sequence numbers.
		 * If we're the LNS and we're sending sequence numbers, the
		 * LAC is broken. Discard the frame.
		 */
		if ((!session->lns_mode) && (session->send_seq)) {
			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
			       "%s: requested to disable seq numbers by LNS\n",
			       session->name);
			session->send_seq = 0;
		} else if (session->send_seq) {
			PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
			       "%s: recv data has no seq numbers when required. "
			       "Discarding\n", session->name);
			session->stats.rx_seq_discards++;
			goto discard;
		}

		/* Store L2TP info in the skb */
		PPPOL2TP_SKB_CB(skb)->has_seq = 0;
	}

	/* If offset bit set, skip it. */
	if (hdrflags & L2TP_HDRFLAG_O) {
		offset = ntohs(*(__be16 *)ptr);
		skb->transport_header += 2 + offset;
		if (!pskb_may_pull(skb, skb_transport_offset(skb) + 2))
			goto discard;
	}

	__skb_pull(skb, skb_transport_offset(skb));

	/* Skip PPP header, if present.	 In testing, Microsoft L2TP clients
	 * don't send the PPP header (PPP header compression enabled), but
	 * other clients can include the header. So we cope with both cases
	 * here. The PPP header is always FF03 when using L2TP.
	 *
	 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
	 * the field may be unaligned.
	 */
	if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
		skb_pull(skb, 2);

	/* Prepare skb for adding to the session's reorder_q.  Hold
	 * packets for max reorder_timeout or 1 second if not
	 * reordering.
	 */
	PPPOL2TP_SKB_CB(skb)->length = length;
	PPPOL2TP_SKB_CB(skb)->expires = jiffies +
		(session->reorder_timeout ? session->reorder_timeout : HZ);

	/* Add packet to the session's receive queue. Reordering is done here, if
	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
	 */
	if (PPPOL2TP_SKB_CB(skb)->has_seq) {
		if (session->reorder_timeout != 0) {
			/* Packet reordering enabled. Add skb to session's
			 * reorder queue, in order of ns.
			 */
			pppol2tp_recv_queue_skb(session, skb);
		} else {
			/* Packet reordering disabled. Discard out-of-sequence
			 * packets
			 */
			if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
				session->stats.rx_seq_discards++;
				PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
				       "%s: oos pkt %hu len %d discarded, "
				       "waiting for %hu, reorder_q_len=%d\n",
				       session->name, PPPOL2TP_SKB_CB(skb)->ns,
				       PPPOL2TP_SKB_CB(skb)->length, session->nr,
				       skb_queue_len(&session->reorder_q));
				goto discard;
			}
			skb_queue_tail(&session->reorder_q, skb);
		}
	} else {
		/* No sequence numbers. Add the skb to the tail of the
		 * reorder queue. This ensures that it will be
		 * delivered after all previous sequenced skbs.
		 */
		skb_queue_tail(&session->reorder_q, skb);
	}

	/* Try to dequeue as many skbs from reorder_q as we can. */
	pppol2tp_recv_dequeue(session);

	return 0;

discard:
	session->stats.rx_errors++;
	kfree_skb(skb);
	sock_put(session->sock);

	return 0;

error:
	return 1;
}

/* UDP encapsulation receive handler. See net/ipv4/udp.c.
 * Return codes:
 * 0 : success.
 * <0: error
 * >0: skb should be passed up to userspace as UDP.
 */
static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct pppol2tp_tunnel *tunnel;

	tunnel = pppol2tp_sock_to_tunnel(sk);
	if (tunnel == NULL)
		goto pass_up;

	PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
	       "%s: received %d bytes\n", tunnel->name, skb->len);

	if (pppol2tp_recv_core(sk, skb))
		goto pass_up;

	return 0;

pass_up:
	return 1;
}

/* Receive message. This is the recvmsg for the PPPoL2TP socket.
 */
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
			    struct msghdr *msg, size_t len,
			    int flags)
{
	int err;
	struct sk_buff *skb;
	struct sock *sk = sock->sk;

	err = -EIO;
	if (sk->sk_state & PPPOX_BOUND)
		goto end;

	msg->msg_namelen = 0;

	err = 0;
	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
				flags & MSG_DONTWAIT, &err);
	if (skb) {
		err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data,
				     skb->len);
		if (err < 0)
			goto do_skb_free;
		err = skb->len;
	}
do_skb_free:
	kfree_skb(skb);
end:
	return err;
}

/************************************************************************
 * Transmit handling
 ***********************************************************************/

/* Tell how big L2TP headers are for a particular session. This
 * depends on whether sequence numbers are being used.
 */
static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
{
	if (session->send_seq)
		return PPPOL2TP_L2TP_HDR_SIZE_SEQ;

	return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}

/* Build an L2TP header for the session into the buffer provided.
 */
static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
				       void *buf)
{
	__be16 *bufp = buf;
	u16 flags = L2TP_HDR_VER;

	if (session->send_seq)
		flags |= L2TP_HDRFLAG_S;

	/* Setup L2TP header.
	 * FIXME: Can this ever be unaligned? Is direct dereferencing of
	 * 16-bit header fields safe here for all architectures?
	 */
	*bufp++ = htons(flags);
	*bufp++ = htons(session->tunnel_addr.d_tunnel);
	*bufp++ = htons(session->tunnel_addr.d_session);
	if (session->send_seq) {
		*bufp++ = htons(session->ns);
		*bufp++ = 0;
		session->ns++;
		PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
		       "%s: updated ns to %hu\n", session->name, session->ns);
	}
}

/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket.  We come here
 * when a user application does a sendmsg() on the session socket. L2TP and
 * PPP headers must be inserted into the user's data.
 */
static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
			    size_t total_len)
{
	static const unsigned char ppph[2] = { 0xff, 0x03 };
	struct sock *sk = sock->sk;
	struct inet_sock *inet;
	__wsum csum = 0;
	struct sk_buff *skb;
	int error;
	int hdr_len;
	struct pppol2tp_session *session;
	struct pppol2tp_tunnel *tunnel;
	struct udphdr *uh;
	unsigned int len;

	error = -ENOTCONN;
	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
		goto error;

	/* Get session and tunnel contexts */
	error = -EBADF;
	session = pppol2tp_sock_to_session(sk);
	if (session == NULL)
		goto error;

	tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
	if (tunnel == NULL)
		goto error;

	/* What header length is configured for this session? */
	hdr_len = pppol2tp_l2tp_header_len(session);

	/* Allocate a socket buffer */
	error = -ENOMEM;
	skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
			   sizeof(struct udphdr) + hdr_len +
			   sizeof(ppph) + total_len,
			   0, GFP_KERNEL);
	if (!skb)
		goto error;

	/* Reserve space for headers. */
	skb_reserve(skb, NET_SKB_PAD);
	skb_reset_network_header(skb);
	skb_reserve(skb, sizeof(struct iphdr));
	skb_reset_transport_header(skb);

	/* Build UDP header */
	inet = inet_sk(session->tunnel_sock);
	uh = (struct udphdr *) skb->data;
	uh->source = inet->sport;
	uh->dest = inet->dport;
	uh->len = htons(hdr_len + sizeof(ppph) + total_len);
	uh->check = 0;
	skb_put(skb, sizeof(struct udphdr));

	/* Build L2TP header */
	pppol2tp_build_l2tp_header(session, skb->data);
	skb_put(skb, hdr_len);

	/* Add PPP header */
	skb->data[0] = ppph[0];
	skb->data[1] = ppph[1];
	skb_put(skb, 2);

	/* Copy user data into skb */
	error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
	if (error < 0) {
		kfree_skb(skb);
		goto error;
	}
	skb_put(skb, total_len);

	/* Calculate UDP checksum if configured to do so */
	if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT)
		csum = udp_csum_outgoing(sk, skb);

	/* Debug */
	if (session->send_seq)
		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
		       "%s: send %Zd bytes, ns=%hu\n", session->name,
		       total_len, session->ns - 1);
	else
		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
		       "%s: send %Zd bytes\n", session->name, total_len);

	if (session->debug & PPPOL2TP_MSG_DATA) {
		int i;
		unsigned char *datap = skb->data;

		printk(KERN_DEBUG "%s: xmit:", session->name);
		for (i = 0; i < total_len; i++) {
			printk(" %02X", *datap++);
			if (i == 15) {
				printk(" ...");
				break;
			}
		}
		printk("\n");
	}

	/* Queue the packet to IP for output */
	len = skb->len;
	error = ip_queue_xmit(skb, 1);

	/* Update stats */
	if (error >= 0) {
		tunnel->stats.tx_packets++;
		tunnel->stats.tx_bytes += len;
		session->stats.tx_packets++;
		session->stats.tx_bytes += len;
	} else {
		tunnel->stats.tx_errors++;
		session->stats.tx_errors++;
	}

error:
	return error;
}

/* Transmit function called by generic PPP driver.  Sends PPP frame
 * over PPPoL2TP socket.
 *
 * This is almost the same as pppol2tp_sendmsg(), but rather than
 * being called with a msghdr from userspace, it is called with a skb
 * from the kernel.
 *
 * The supplied skb from ppp doesn't have enough headroom for the
 * insertion of L2TP, UDP and IP headers so we need to allocate more
 * headroom in the skb. This will create a cloned skb. But we must be
 * careful in the error case because the caller will expect to free
 * the skb it supplied, not our cloned skb. So we take care to always
 * leave the original skb unfreed if we return an error.
 */
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
	static const u8 ppph[2] = { 0xff, 0x03 };
	struct sock *sk = (struct sock *) chan->private;
	struct sock *sk_tun;
	int hdr_len;
	struct pppol2tp_session *session;
	struct pppol2tp_tunnel *tunnel;
	int rc;
	int headroom;
	int data_len = skb->len;
	struct inet_sock *inet;
	__wsum csum = 0;
	struct udphdr *uh;
	unsigned int len;

	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
		goto abort;

	/* Get session and tunnel contexts from the socket */
	session = pppol2tp_sock_to_session(sk);
	if (session == NULL)
		goto abort;

	sk_tun = session->tunnel_sock;
	if (sk_tun == NULL)
		goto abort;
	tunnel = pppol2tp_sock_to_tunnel(sk_tun);
	if (tunnel == NULL)
		goto abort;

	/* What header length is configured for this session? */
	hdr_len = pppol2tp_l2tp_header_len(session);

	/* Check that there's enough headroom in the skb to insert IP,
	 * UDP and L2TP and PPP headers. If not enough, expand it to
	 * make room. Note that a new skb (or a clone) is
	 * allocated. If we return an error from this point on, make
	 * sure we free the new skb but do not free the original skb
	 * since that is done by the caller for the error case.
	 */
	headroom = NET_SKB_PAD + sizeof(struct iphdr) +
		sizeof(struct udphdr) + hdr_len + sizeof(ppph);
	if (skb_cow_head(skb, headroom))
		goto abort;

	/* Setup PPP header */
	__skb_push(skb, sizeof(ppph));
	skb->data[0] = ppph[0];
	skb->data[1] = ppph[1];

	/* Setup L2TP header */
	pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));

	/* Setup UDP header */
	inet = inet_sk(sk_tun);
	__skb_push(skb, sizeof(*uh));
	skb_reset_transport_header(skb);
	uh = udp_hdr(skb);
	uh->source = inet->sport;
	uh->dest = inet->dport;
	uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
	uh->check = 0;

	/* *BROKEN* Calculate UDP checksum if configured to do so */
	if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
		csum = udp_csum_outgoing(sk_tun, skb);

	/* Debug */
	if (session->send_seq)
		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
		       "%s: send %d bytes, ns=%hu\n", session->name,
		       data_len, session->ns - 1);
	else
		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
		       "%s: send %d bytes\n", session->name, data_len);

	if (session->debug & PPPOL2TP_MSG_DATA) {
		int i;
		unsigned char *datap = skb->data;

		printk(KERN_DEBUG "%s: xmit:", session->name);
		for (i = 0; i < data_len; i++) {
			printk(" %02X", *datap++);
			if (i == 31) {
				printk(" ...");
				break;
			}
		}
		printk("\n");
	}

	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
			      IPSKB_REROUTED);
	nf_reset(skb);

	/* Get routing info from the tunnel socket */
	dst_release(skb->dst);
	skb->dst = sk_dst_get(sk_tun);

	/* Queue the packet to IP for output */
	len = skb->len;
	rc = ip_queue_xmit(skb, 1);

	/* Update stats */
	if (rc >= 0) {
		tunnel->stats.tx_packets++;
		tunnel->stats.tx_bytes += len;
		session->stats.tx_packets++;
		session->stats.tx_bytes += len;
	} else {
		tunnel->stats.tx_errors++;
		session->stats.tx_errors++;
	}

	return 1;

abort:
	/* Free the original skb */
	kfree_skb(skb);
	return 1;
}

/*****************************************************************************
 * Session (and tunnel control) socket create/destroy.
 *****************************************************************************/

/* When the tunnel UDP socket is closed, all the attached sockets need to go
 * too.
 */
static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
{
	int hash;
	struct hlist_node *walk;
	struct hlist_node *tmp;
	struct pppol2tp_session *session;
	struct sock *sk;

	if (tunnel == NULL)
		BUG();

	PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
	       "%s: closing all sessions...\n", tunnel->name);

	write_lock(&tunnel->hlist_lock);
	for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
again:
		hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
			session = hlist_entry(walk, struct pppol2tp_session, hlist);

			sk = session->sock;

			PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
			       "%s: closing session\n", session->name);

			hlist_del_init(&session->hlist);

			/* Since we should hold the sock lock while
			 * doing any unbinding, we need to release the
			 * lock we're holding before taking that lock.
			 * Hold a reference to the sock so it doesn't
			 * disappear as we're jumping between locks.
			 */
			sock_hold(sk);
			write_unlock(&tunnel->hlist_lock);
			lock_sock(sk);

			if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
				pppox_unbind_sock(sk);
				sk->sk_state = PPPOX_DEAD;
				sk->sk_state_change(sk);
			}

			/* Purge any queued data */
			skb_queue_purge(&sk->sk_receive_queue);
			skb_queue_purge(&sk->sk_write_queue);
			skb_queue_purge(&session->reorder_q);

			release_sock(sk);
			sock_put(sk);

			/* Now restart from the beginning of this hash
			 * chain.  We always remove a session from the
			 * list so we are guaranteed to make forward
			 * progress.
			 */
			write_lock(&tunnel->hlist_lock);
			goto again;
		}
	}
	write_unlock(&tunnel->hlist_lock);
}

/* Really kill the tunnel.
 * Come here only when all sessions have been cleared from the tunnel.
 */
static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
{
	/* Remove from socket list */
	write_lock(&pppol2tp_tunnel_list_lock);
	list_del_init(&tunnel->list);
	write_unlock(&pppol2tp_tunnel_list_lock);

	atomic_dec(&pppol2tp_tunnel_count);
	kfree(tunnel);
}

/* Tunnel UDP socket destruct hook.
 * The tunnel context is deleted only when all session sockets have been
 * closed.
 */
static void pppol2tp_tunnel_destruct(struct sock *sk)
{
	struct pppol2tp_tunnel *tunnel;

	tunnel = pppol2tp_sock_to_tunnel(sk);
	if (tunnel == NULL)
		goto end;

	PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
	       "%s: closing...\n", tunnel->name);

	/* Close all sessions */
	pppol2tp_tunnel_closeall(tunnel);

	/* No longer an encapsulation socket. See net/ipv4/udp.c */
	(udp_sk(sk))->encap_type = 0;
	(udp_sk(sk))->encap_rcv = NULL;

	/* Remove hooks into tunnel socket */
	tunnel->sock = NULL;
	sk->sk_destruct = tunnel->old_sk_destruct;
	sk->sk_user_data = NULL;

	/* Call original (UDP) socket descructor */
	if (sk->sk_destruct != NULL)
		(*sk->sk_destruct)(sk);

	pppol2tp_tunnel_dec_refcount(tunnel);

end:
	return;
}

/* Really kill the session socket. (Called from sock_put() if
 * refcnt == 0.)
 */
static void pppol2tp_session_destruct(struct sock *sk)
{
	struct pppol2tp_session *session = NULL;

	if (sk->sk_user_data != NULL) {
		struct pppol2tp_tunnel *tunnel;

		session = pppol2tp_sock_to_session(sk);
		if (session == NULL)
			goto out;

		/* Don't use pppol2tp_sock_to_tunnel() here to
		 * get the tunnel context because the tunnel
		 * socket might have already been closed (its
		 * sk->sk_user_data will be NULL) so use the
		 * session's private tunnel ptr instead.
		 */
		tunnel = session->tunnel;
		if (tunnel != NULL) {
			BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);

			/* If session_id is zero, this is a null
			 * session context, which was created for a
			 * socket that is being used only to manage
			 * tunnels.
			 */
			if (session->tunnel_addr.s_session != 0) {
				/* Delete the session socket from the
				 * hash
				 */
				write_lock(&tunnel->hlist_lock);
				hlist_del_init(&session->hlist);
				write_unlock(&tunnel->hlist_lock);

				atomic_dec(&pppol2tp_session_count);
			}

			/* This will delete the tunnel context if this
			 * is the last session on the tunnel.
			 */
			session->tunnel = NULL;
			session->tunnel_sock = NULL;
			pppol2tp_tunnel_dec_refcount(tunnel);
		}
	}

	kfree(session);
out:
	return;
}

/* Called when the PPPoX socket (session) is closed.
 */
static int pppol2tp_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	int error;

	if (!sk)
		return 0;

	error = -EBADF;
	lock_sock(sk);
	if (sock_flag(sk, SOCK_DEAD) != 0)
		goto error;

	pppox_unbind_sock(sk);

	/* Signal the death of the socket. */
	sk->sk_state = PPPOX_DEAD;
	sock_orphan(sk);
	sock->sk = NULL;

	/* Purge any queued data */
	skb_queue_purge(&sk->sk_receive_queue);
	skb_queue_purge(&sk->sk_write_queue);

	release_sock(sk);

	/* This will delete the session context via
	 * pppol2tp_session_destruct() if the socket's refcnt drops to
	 * zero.
	 */
	sock_put(sk);

	return 0;

error:
	release_sock(sk);
	return error;
}

/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
 * sockets attached to it.
 */
static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
						   int *error)
{
	int err;
	struct socket *sock = NULL;
	struct sock *sk;
	struct pppol2tp_tunnel *tunnel;
	struct sock *ret = NULL;

	/* Get the tunnel UDP socket from the fd, which was opened by
	 * the userspace L2TP daemon.
	 */
	err = -EBADF;
	sock = sockfd_lookup(fd, &err);
	if (!sock) {
		PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
		       "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
		       tunnel_id, fd, err);
		goto err;
	}

	sk = sock->sk;

	/* Quick sanity checks */
	err = -EPROTONOSUPPORT;
	if (sk->sk_protocol != IPPROTO_UDP) {
		PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
		       "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
		       tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
		goto err;
	}
	err = -EAFNOSUPPORT;
	if (sock->ops->family != AF_INET) {
		PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
		       "tunl %hu: fd %d wrong family, got %d, expected %d\n",
		       tunnel_id, fd, sock->ops->family, AF_INET);
		goto err;
	}

	err = -ENOTCONN;

	/* Check if this socket has already been prepped */
	tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
	if (tunnel != NULL) {
		/* User-data field already set */
		err = -EBUSY;
		BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);

		/* This socket has already been prepped */
		ret = tunnel->sock;
		goto out;
	}

	/* This socket is available and needs prepping. Create a new tunnel
	 * context and init it.
	 */
	sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
	if (sk->sk_user_data == NULL) {
		err = -ENOMEM;
		goto err;
	}

	tunnel->magic = L2TP_TUNNEL_MAGIC;
	sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);

	tunnel->stats.tunnel_id = tunnel_id;
	tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;

	/* Hook on the tunnel socket destructor so that we can cleanup
	 * if the tunnel socket goes away.
	 */
	tunnel->old_sk_destruct = sk->sk_destruct;
	sk->sk_destruct = &pppol2tp_tunnel_destruct;

	tunnel->sock = sk;
	sk->sk_allocation = GFP_ATOMIC;

	/* Misc init */
	rwlock_init(&tunnel->hlist_lock);

	/* Add tunnel to our list */
	INIT_LIST_HEAD(&tunnel->list);
	write_lock(&pppol2tp_tunnel_list_lock);
	list_add(&tunnel->list, &pppol2tp_tunnel_list);
	write_unlock(&pppol2tp_tunnel_list_lock);
	atomic_inc(&pppol2tp_tunnel_count);

	/* Bump the reference count. The tunnel context is deleted
	 * only when this drops to zero.
	 */
	pppol2tp_tunnel_inc_refcount(tunnel);

	/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
	(udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
	(udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;

	ret = tunnel->sock;

	*error = 0;
out:
	if (sock)
		sockfd_put(sock);

	return ret;

err:
	*error = err;
	goto out;
}

static struct proto pppol2tp_sk_proto = {
	.name	  = "PPPOL2TP",
	.owner	  = THIS_MODULE,
	.obj_size = sizeof(struct pppox_sock),
};

/* socket() handler. Initialize a new struct sock.
 */
static int pppol2tp_create(struct net *net, struct socket *sock)
{
	int error = -ENOMEM;
	struct sock *sk;

	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1);
	if (!sk)
		goto out;

	sock_init_data(sock, sk);

	sock->state  = SS_UNCONNECTED;
	sock->ops    = &pppol2tp_ops;

	sk->sk_backlog_rcv = pppol2tp_recv_core;
	sk->sk_protocol	   = PX_PROTO_OL2TP;
	sk->sk_family	   = PF_PPPOX;
	sk->sk_state	   = PPPOX_NONE;
	sk->sk_type	   = SOCK_STREAM;
	sk->sk_destruct	   = pppol2tp_session_destruct;

	error = 0;

out:
	return error;
}

/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
 */
static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
			    int sockaddr_len, int flags)
{
	struct sock *sk = sock->sk;
	struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
	struct pppox_sock *po = pppox_sk(sk);
	struct sock *tunnel_sock = NULL;
	struct pppol2tp_session *session = NULL;
	struct pppol2tp_tunnel *tunnel;
	struct dst_entry *dst;
	int error = 0;

	lock_sock(sk);

	error = -EINVAL;
	if (sp->sa_protocol != PX_PROTO_OL2TP)
		goto end;

	/* Check for already bound sockets */
	error = -EBUSY;
	if (sk->sk_state & PPPOX_CONNECTED)
		goto end;

	/* We don't supporting rebinding anyway */
	error = -EALREADY;
	if (sk->sk_user_data)
		goto end; /* socket is already attached */

	/* Don't bind if s_tunnel is 0 */
	error = -EINVAL;
	if (sp->pppol2tp.s_tunnel == 0)
		goto end;

	/* Special case: prepare tunnel socket if s_session and
	 * d_session is 0. Otherwise look up tunnel using supplied
	 * tunnel id.
	 */
	if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
		tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd,
							     sp->pppol2tp.s_tunnel,
							     &error);
		if (tunnel_sock == NULL)
			goto end;

		tunnel = tunnel_sock->sk_user_data;
	} else {
		tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel);

		/* Error if we can't find the tunnel */
		error = -ENOENT;
		if (tunnel == NULL)
			goto end;

		tunnel_sock = tunnel->sock;
	}

	/* Check that this session doesn't already exist */
	error = -EEXIST;
	session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
	if (session != NULL)
		goto end;

	/* Allocate and initialize a new session context. */
	session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
	if (session == NULL) {
		error = -ENOMEM;
		goto end;
	}

	skb_queue_head_init(&session->reorder_q);

	session->magic	     = L2TP_SESSION_MAGIC;
	session->owner	     = current->pid;
	session->sock	     = sk;
	session->tunnel	     = tunnel;
	session->tunnel_sock = tunnel_sock;
	session->tunnel_addr = sp->pppol2tp;
	sprintf(&session->name[0], "sess %hu/%hu",
		session->tunnel_addr.s_tunnel,
		session->tunnel_addr.s_session);

	session->stats.tunnel_id  = session->tunnel_addr.s_tunnel;
	session->stats.session_id = session->tunnel_addr.s_session;

	INIT_HLIST_NODE(&session->hlist);

	/* Inherit debug options from tunnel */
	session->debug = tunnel->debug;

	/* Default MTU must allow space for UDP/L2TP/PPP
	 * headers.
	 */
	session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;

	/* If PMTU discovery was enabled, use the MTU that was discovered */
	dst = sk_dst_get(sk);
	if (dst != NULL) {
		u32 pmtu = dst_mtu(__sk_dst_get(sk));
		if (pmtu != 0)
			session->mtu = session->mru = pmtu -
				PPPOL2TP_HEADER_OVERHEAD;
		dst_release(dst);
	}

	/* Special case: if source & dest session_id == 0x0000, this socket is
	 * being created to manage the tunnel. Don't add the session to the
	 * session hash list, just set up the internal context for use by
	 * ioctl() and sockopt() handlers.
	 */
	if ((session->tunnel_addr.s_session == 0) &&
	    (session->tunnel_addr.d_session == 0)) {
		error = 0;
		sk->sk_user_data = session;
		goto out_no_ppp;
	}

	/* Get tunnel context from the tunnel socket */
	tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
	if (tunnel == NULL) {
		error = -EBADF;
		goto end;
	}

	/* Right now, because we don't have a way to push the incoming skb's
	 * straight through the UDP layer, the only header we need to worry
	 * about is the L2TP header. This size is different depending on
	 * whether sequence numbers are enabled for the data channel.
	 */
	po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;

	po->chan.private = sk;
	po->chan.ops	 = &pppol2tp_chan_ops;
	po->chan.mtu	 = session->mtu;

	error = ppp_register_channel(&po->chan);
	if (error)
		goto end;

	/* This is how we get the session context from the socket. */
	sk->sk_user_data = session;

	/* Add session to the tunnel's hash list */
	write_lock(&tunnel->hlist_lock);
	hlist_add_head(&session->hlist,
		       pppol2tp_session_id_hash(tunnel,
						session->tunnel_addr.s_session));
	write_unlock(&tunnel->hlist_lock);

	atomic_inc(&pppol2tp_session_count);

out_no_ppp:
	pppol2tp_tunnel_inc_refcount(tunnel);
	sk->sk_state = PPPOX_CONNECTED;
	PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
	       "%s: created\n", session->name);

end:
	release_sock(sk);

	if (error != 0)
		PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
		       "%s: connect failed: %d\n", session->name, error);

	return error;
}

/* getname() support.
 */
static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
			    int *usockaddr_len, int peer)
{
	int len = sizeof(struct sockaddr_pppol2tp);
	struct sockaddr_pppol2tp sp;
	int error = 0;
	struct pppol2tp_session *session;

	error = -ENOTCONN;
	if (sock->sk->sk_state != PPPOX_CONNECTED)
		goto end;

	session = pppol2tp_sock_to_session(sock->sk);
	if (session == NULL) {
		error = -EBADF;
		goto end;
	}

	sp.sa_family	= AF_PPPOX;
	sp.sa_protocol	= PX_PROTO_OL2TP;
	memcpy(&sp.pppol2tp, &session->tunnel_addr,
	       sizeof(struct pppol2tp_addr));

	memcpy(uaddr, &sp, len);

	*usockaddr_len = len;

	error = 0;

end:
	return error;
}

/****************************************************************************
 * ioctl() handlers.
 *
 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
 * sockets. However, in order to control kernel tunnel features, we allow
 * userspace to create a special "tunnel" PPPoX socket which is used for
 * control only.  Tunnel PPPoX sockets have session_id == 0 and simply allow
 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
 * calls.
 ****************************************************************************/

/* Session ioctl helper.
 */
static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
				  unsigned int cmd, unsigned long arg)
{
	struct ifreq ifr;
	int err = 0;
	struct sock *sk = session->sock;
	int val = (int) arg;

	PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
	       "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
	       session->name, cmd, arg);

	sock_hold(sk);

	switch (cmd) {
	case SIOCGIFMTU:
		err = -ENXIO;
		if (!(sk->sk_state & PPPOX_CONNECTED))
			break;

		err = -EFAULT;
		if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
			break;
		ifr.ifr_mtu = session->mtu;
		if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
			break;

		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get mtu=%d\n", session->name, session->mtu);
		err = 0;
		break;

	case SIOCSIFMTU:
		err = -ENXIO;
		if (!(sk->sk_state & PPPOX_CONNECTED))
			break;

		err = -EFAULT;
		if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
			break;

		session->mtu = ifr.ifr_mtu;

		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set mtu=%d\n", session->name, session->mtu);
		err = 0;
		break;

	case PPPIOCGMRU:
		err = -ENXIO;
		if (!(sk->sk_state & PPPOX_CONNECTED))
			break;

		err = -EFAULT;
		if (put_user(session->mru, (int __user *) arg))
			break;

		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get mru=%d\n", session->name, session->mru);
		err = 0;
		break;

	case PPPIOCSMRU:
		err = -ENXIO;
		if (!(sk->sk_state & PPPOX_CONNECTED))
			break;

		err = -EFAULT;
		if (get_user(val,(int __user *) arg))
			break;

		session->mru = val;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set mru=%d\n", session->name, session->mru);
		err = 0;
		break;

	case PPPIOCGFLAGS:
		err = -EFAULT;
		if (put_user(session->flags, (int __user *) arg))
			break;

		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get flags=%d\n", session->name, session->flags);
		err = 0;
		break;

	case PPPIOCSFLAGS:
		err = -EFAULT;
		if (get_user(val, (int __user *) arg))
			break;
		session->flags = val;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set flags=%d\n", session->name, session->flags);
		err = 0;
		break;

	case PPPIOCGL2TPSTATS:
		err = -ENXIO;
		if (!(sk->sk_state & PPPOX_CONNECTED))
			break;

		if (copy_to_user((void __user *) arg, &session->stats,
				 sizeof(session->stats)))
			break;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get L2TP stats\n", session->name);
		err = 0;
		break;

	default:
		err = -ENOSYS;
		break;
	}

	sock_put(sk);

	return err;
}

/* Tunnel ioctl helper.
 *
 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
 * specifies a session_id, the session ioctl handler is called. This allows an
 * application to retrieve session stats via a tunnel socket.
 */
static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
				 unsigned int cmd, unsigned long arg)
{
	int err = 0;
	struct sock *sk = tunnel->sock;
	struct pppol2tp_ioc_stats stats_req;

	PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
	       "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
	       cmd, arg);

	sock_hold(sk);

	switch (cmd) {
	case PPPIOCGL2TPSTATS:
		err = -ENXIO;
		if (!(sk->sk_state & PPPOX_CONNECTED))
			break;

		if (copy_from_user(&stats_req, (void __user *) arg,
				   sizeof(stats_req))) {
			err = -EFAULT;
			break;
		}
		if (stats_req.session_id != 0) {
			/* resend to session ioctl handler */
			struct pppol2tp_session *session =
				pppol2tp_session_find(tunnel, stats_req.session_id);
			if (session != NULL)
				err = pppol2tp_session_ioctl(session, cmd, arg);
			else
				err = -EBADR;
			break;
		}
#ifdef CONFIG_XFRM
		tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
#endif
		if (copy_to_user((void __user *) arg, &tunnel->stats,
				 sizeof(tunnel->stats))) {
			err = -EFAULT;
			break;
		}
		PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get L2TP stats\n", tunnel->name);
		err = 0;
		break;

	default:
		err = -ENOSYS;
		break;
	}

	sock_put(sk);

	return err;
}

/* Main ioctl() handler.
 * Dispatch to tunnel or session helpers depending on the socket.
 */
static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
			  unsigned long arg)
{
	struct sock *sk = sock->sk;
	struct pppol2tp_session *session;
	struct pppol2tp_tunnel *tunnel;
	int err;

	if (!sk)
		return 0;

	err = -EBADF;
	if (sock_flag(sk, SOCK_DEAD) != 0)
		goto end;

	err = -ENOTCONN;
	if ((sk->sk_user_data == NULL) ||
	    (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
		goto end;

	/* Get session context from the socket */
	err = -EBADF;
	session = pppol2tp_sock_to_session(sk);
	if (session == NULL)
		goto end;

	/* Special case: if session's session_id is zero, treat ioctl as a
	 * tunnel ioctl
	 */
	if ((session->tunnel_addr.s_session == 0) &&
	    (session->tunnel_addr.d_session == 0)) {
		err = -EBADF;
		tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
		if (tunnel == NULL)
			goto end;

		err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
		goto end;
	}

	err = pppol2tp_session_ioctl(session, cmd, arg);

end:
	return err;
}

/*****************************************************************************
 * setsockopt() / getsockopt() support.
 *
 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
 * sockets. In order to control kernel tunnel features, we allow userspace to
 * create a special "tunnel" PPPoX socket which is used for control only.
 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
 *****************************************************************************/

/* Tunnel setsockopt() helper.
 */
static int pppol2tp_tunnel_setsockopt(struct sock *sk,
				      struct pppol2tp_tunnel *tunnel,
				      int optname, int val)
{
	int err = 0;

	switch (optname) {
	case PPPOL2TP_SO_DEBUG:
		tunnel->debug = val;
		PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set debug=%x\n", tunnel->name, tunnel->debug);
		break;

	default:
		err = -ENOPROTOOPT;
		break;
	}

	return err;
}

/* Session setsockopt helper.
 */
static int pppol2tp_session_setsockopt(struct sock *sk,
				       struct pppol2tp_session *session,
				       int optname, int val)
{
	int err = 0;

	switch (optname) {
	case PPPOL2TP_SO_RECVSEQ:
		if ((val != 0) && (val != 1)) {
			err = -EINVAL;
			break;
		}
		session->recv_seq = val ? -1 : 0;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set recv_seq=%d\n", session->name,
		       session->recv_seq);
		break;

	case PPPOL2TP_SO_SENDSEQ:
		if ((val != 0) && (val != 1)) {
			err = -EINVAL;
			break;
		}
		session->send_seq = val ? -1 : 0;
		{
			struct sock *ssk      = session->sock;
			struct pppox_sock *po = pppox_sk(ssk);
			po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
				PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
		}
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set send_seq=%d\n", session->name, session->send_seq);
		break;

	case PPPOL2TP_SO_LNSMODE:
		if ((val != 0) && (val != 1)) {
			err = -EINVAL;
			break;
		}
		session->lns_mode = val ? -1 : 0;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set lns_mode=%d\n", session->name,
		       session->lns_mode);
		break;

	case PPPOL2TP_SO_DEBUG:
		session->debug = val;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set debug=%x\n", session->name, session->debug);
		break;

	case PPPOL2TP_SO_REORDERTO:
		session->reorder_timeout = msecs_to_jiffies(val);
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: set reorder_timeout=%d\n", session->name,
		       session->reorder_timeout);
		break;

	default:
		err = -ENOPROTOOPT;
		break;
	}

	return err;
}

/* Main setsockopt() entry point.
 * Does API checks, then calls either the tunnel or session setsockopt
 * handler, according to whether the PPPoL2TP socket is a for a regular
 * session or the special tunnel type.
 */
static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
			       char __user *optval, int optlen)
{
	struct sock *sk = sock->sk;
	struct pppol2tp_session *session = sk->sk_user_data;
	struct pppol2tp_tunnel *tunnel;
	int val;
	int err;

	if (level != SOL_PPPOL2TP)
		return udp_prot.setsockopt(sk, level, optname, optval, optlen);

	if (optlen < sizeof(int))
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

	err = -ENOTCONN;
	if (sk->sk_user_data == NULL)
		goto end;

	/* Get session context from the socket */
	err = -EBADF;
	session = pppol2tp_sock_to_session(sk);
	if (session == NULL)
		goto end;

	/* Special case: if session_id == 0x0000, treat as operation on tunnel
	 */
	if ((session->tunnel_addr.s_session == 0) &&
	    (session->tunnel_addr.d_session == 0)) {
		err = -EBADF;
		tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
		if (tunnel == NULL)
			goto end;

		err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
	} else
		err = pppol2tp_session_setsockopt(sk, session, optname, val);

	err = 0;

end:
	return err;
}

/* Tunnel getsockopt helper. Called with sock locked.
 */
static int pppol2tp_tunnel_getsockopt(struct sock *sk,
				      struct pppol2tp_tunnel *tunnel,
				      int optname, int *val)
{
	int err = 0;

	switch (optname) {
	case PPPOL2TP_SO_DEBUG:
		*val = tunnel->debug;
		PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get debug=%x\n", tunnel->name, tunnel->debug);
		break;

	default:
		err = -ENOPROTOOPT;
		break;
	}

	return err;
}

/* Session getsockopt helper. Called with sock locked.
 */
static int pppol2tp_session_getsockopt(struct sock *sk,
				       struct pppol2tp_session *session,
				       int optname, int *val)
{
	int err = 0;

	switch (optname) {
	case PPPOL2TP_SO_RECVSEQ:
		*val = session->recv_seq;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get recv_seq=%d\n", session->name, *val);
		break;

	case PPPOL2TP_SO_SENDSEQ:
		*val = session->send_seq;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get send_seq=%d\n", session->name, *val);
		break;

	case PPPOL2TP_SO_LNSMODE:
		*val = session->lns_mode;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get lns_mode=%d\n", session->name, *val);
		break;

	case PPPOL2TP_SO_DEBUG:
		*val = session->debug;
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get debug=%d\n", session->name, *val);
		break;

	case PPPOL2TP_SO_REORDERTO:
		*val = (int) jiffies_to_msecs(session->reorder_timeout);
		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
		       "%s: get reorder_timeout=%d\n", session->name, *val);
		break;

	default:
		err = -ENOPROTOOPT;
	}

	return err;
}

/* Main getsockopt() entry point.
 * Does API checks, then calls either the tunnel or session getsockopt
 * handler, according to whether the PPPoX socket is a for a regular session
 * or the special tunnel type.
 */
static int pppol2tp_getsockopt(struct socket *sock, int level,
			       int optname, char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;
	struct pppol2tp_session *session = sk->sk_user_data;
	struct pppol2tp_tunnel *tunnel;
	int val, len;
	int err;

	if (level != SOL_PPPOL2TP)
		return udp_prot.getsockopt(sk, level, optname, optval, optlen);

	if (get_user(len, (int __user *) optlen))
		return -EFAULT;

	len = min_t(unsigned int, len, sizeof(int));

	if (len < 0)
		return -EINVAL;

	err = -ENOTCONN;
	if (sk->sk_user_data == NULL)
		goto end;

	/* Get the session context */
	err = -EBADF;
	session = pppol2tp_sock_to_session(sk);
	if (session == NULL)
		goto end;

	/* Special case: if session_id == 0x0000, treat as operation on tunnel */
	if ((session->tunnel_addr.s_session == 0) &&
	    (session->tunnel_addr.d_session == 0)) {
		err = -EBADF;
		tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
		if (tunnel == NULL)
			goto end;

		err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
	} else
		err = pppol2tp_session_getsockopt(sk, session, optname, &val);

	err = -EFAULT;
	if (put_user(len, (int __user *) optlen))
		goto end;

	if (copy_to_user((void __user *) optval, &val, len))
		goto end;

	err = 0;
end:
	return err;
}

/*****************************************************************************
 * /proc filesystem for debug
 *****************************************************************************/

#ifdef CONFIG_PROC_FS

#include <linux/seq_file.h>

struct pppol2tp_seq_data {
	struct pppol2tp_tunnel *tunnel; /* current tunnel */
	struct pppol2tp_session *session; /* NULL means get first session in tunnel */
};

static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
{
	struct pppol2tp_session *session = NULL;
	struct hlist_node *walk;
	int found = 0;
	int next = 0;
	int i;

	read_lock(&tunnel->hlist_lock);
	for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
		hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
			if (curr == NULL) {
				found = 1;
				goto out;
			}
			if (session == curr) {
				next = 1;
				continue;
			}
			if (next) {
				found = 1;
				goto out;
			}
		}
	}
out:
	read_unlock(&tunnel->hlist_lock);
	if (!found)
		session = NULL;

	return session;
}

static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
{
	struct pppol2tp_tunnel *tunnel = NULL;

	read_lock(&pppol2tp_tunnel_list_lock);
	if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
		goto out;
	}
	tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
out:
	read_unlock(&pppol2tp_tunnel_list_lock);

	return tunnel;
}

static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
{
	struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
	loff_t pos = *offs;

	if (!pos)
		goto out;

	BUG_ON(m->private == NULL);
	pd = m->private;

	if (pd->tunnel == NULL) {
		if (!list_empty(&pppol2tp_tunnel_list))
			pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
	} else {
		pd->session = next_session(pd->tunnel, pd->session);
		if (pd->session == NULL) {
			pd->tunnel = next_tunnel(pd->tunnel);
		}
	}

	/* NULL tunnel and session indicates end of list */
	if ((pd->tunnel == NULL) && (pd->session == NULL))
		pd = NULL;

out:
	return pd;
}

static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	(*pos)++;
	return NULL;
}

static void pppol2tp_seq_stop(struct seq_file *p, void *v)
{
	/* nothing to do */
}

static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
{
	struct pppol2tp_tunnel *tunnel = v;

	seq_printf(m, "\nTUNNEL '%s', %c %d\n",
		   tunnel->name,
		   (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
		   atomic_read(&tunnel->ref_count) - 1);
	seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
		   tunnel->debug,
		   tunnel->stats.tx_packets, tunnel->stats.tx_bytes,
		   tunnel->stats.tx_errors,
		   tunnel->stats.rx_packets, tunnel->stats.rx_bytes,
		   tunnel->stats.rx_errors);
}

static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
{
	struct pppol2tp_session *session = v;

	seq_printf(m, "  SESSION '%s' %08X/%d %04X/%04X -> "
		   "%04X/%04X %d %c\n",
		   session->name,
		   ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
		   ntohs(session->tunnel_addr.addr.sin_port),
		   session->tunnel_addr.s_tunnel,
		   session->tunnel_addr.s_session,
		   session->tunnel_addr.d_tunnel,
		   session->tunnel_addr.d_session,
		   session->sock->sk_state,
		   (session == session->sock->sk_user_data) ?
		   'Y' : 'N');
	seq_printf(m, "   %d/%d/%c/%c/%s %08x %u\n",
		   session->mtu, session->mru,
		   session->recv_seq ? 'R' : '-',
		   session->send_seq ? 'S' : '-',
		   session->lns_mode ? "LNS" : "LAC",
		   session->debug,
		   jiffies_to_msecs(session->reorder_timeout));
	seq_printf(m, "   %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
		   session->nr, session->ns,
		   session->stats.tx_packets,
		   session->stats.tx_bytes,
		   session->stats.tx_errors,
		   session->stats.rx_packets,
		   session->stats.rx_bytes,
		   session->stats.rx_errors);
}

static int pppol2tp_seq_show(struct seq_file *m, void *v)
{
	struct pppol2tp_seq_data *pd = v;

	/* display header on line 1 */
	if (v == SEQ_START_TOKEN) {
		seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
		seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
		seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
		seq_puts(m, "  SESSION name, addr/port src-tid/sid "
			 "dest-tid/sid state user-data-ok\n");
		seq_puts(m, "   mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
		seq_puts(m, "   nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
		goto out;
	}

	/* Show the tunnel or session context.
	 */
	if (pd->session == NULL)
		pppol2tp_seq_tunnel_show(m, pd->tunnel);
	else
		pppol2tp_seq_session_show(m, pd->session);

out:
	return 0;
}

static struct seq_operations pppol2tp_seq_ops = {
	.start		= pppol2tp_seq_start,
	.next		= pppol2tp_seq_next,
	.stop		= pppol2tp_seq_stop,
	.show		= pppol2tp_seq_show,
};

/* Called when our /proc file is opened. We allocate data for use when
 * iterating our tunnel / session contexts and store it in the private
 * data of the seq_file.
 */
static int pppol2tp_proc_open(struct inode *inode, struct file *file)
{
	struct seq_file *m;
	struct pppol2tp_seq_data *pd;
	int ret = 0;

	ret = seq_open(file, &pppol2tp_seq_ops);
	if (ret < 0)
		goto out;

	m = file->private_data;

	/* Allocate and fill our proc_data for access later */
	ret = -ENOMEM;
	m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
	if (m->private == NULL)
		goto out;

	pd = m->private;
	ret = 0;

out:
	return ret;
}

/* Called when /proc file access completes.
 */
static int pppol2tp_proc_release(struct inode *inode, struct file *file)
{
	struct seq_file *m = (struct seq_file *)file->private_data;

	kfree(m->private);
	m->private = NULL;

	return seq_release(inode, file);
}

static struct file_operations pppol2tp_proc_fops = {
	.owner		= THIS_MODULE,
	.open		= pppol2tp_proc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= pppol2tp_proc_release,
};

static struct proc_dir_entry *pppol2tp_proc;

#endif /* CONFIG_PROC_FS */

/*****************************************************************************
 * Init and cleanup
 *****************************************************************************/

static struct proto_ops pppol2tp_ops = {
	.family		= AF_PPPOX,
	.owner		= THIS_MODULE,
	.release	= pppol2tp_release,
	.bind		= sock_no_bind,
	.connect	= pppol2tp_connect,
	.socketpair	= sock_no_socketpair,
	.accept		= sock_no_accept,
	.getname	= pppol2tp_getname,
	.poll		= datagram_poll,
	.listen		= sock_no_listen,
	.shutdown	= sock_no_shutdown,
	.setsockopt	= pppol2tp_setsockopt,
	.getsockopt	= pppol2tp_getsockopt,
	.sendmsg	= pppol2tp_sendmsg,
	.recvmsg	= pppol2tp_recvmsg,
	.mmap		= sock_no_mmap,
	.ioctl		= pppox_ioctl,
};

static struct pppox_proto pppol2tp_proto = {
	.create		= pppol2tp_create,
	.ioctl		= pppol2tp_ioctl
};

static int __init pppol2tp_init(void)
{
	int err;

	err = proto_register(&pppol2tp_sk_proto, 0);
	if (err)
		goto out;
	err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
	if (err)
		goto out_unregister_pppol2tp_proto;

#ifdef CONFIG_PROC_FS
	pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net);
	if (!pppol2tp_proc) {
		err = -ENOMEM;
		goto out_unregister_pppox_proto;
	}
	pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
#endif /* CONFIG_PROC_FS */
	printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
	       PPPOL2TP_DRV_VERSION);

out:
	return err;

out_unregister_pppox_proto:
	unregister_pppox_proto(PX_PROTO_OL2TP);
out_unregister_pppol2tp_proto:
	proto_unregister(&pppol2tp_sk_proto);
	goto out;
}

static void __exit pppol2tp_exit(void)
{
	unregister_pppox_proto(PX_PROTO_OL2TP);

#ifdef CONFIG_PROC_FS
	remove_proc_entry("pppol2tp", init_net.proc_net);
#endif
	proto_unregister(&pppol2tp_sk_proto);
}

module_init(pppol2tp_init);
module_exit(pppol2tp_exit);

MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>,"
	      "James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("PPP over L2TP over UDP");
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
n>(struct net_device *dev, struct nv_skb_map* tx_skb) { struct fe_priv *np = netdev_priv(dev); if (tx_skb->dma) { pci_unmap_page(np->pci_dev, tx_skb->dma, tx_skb->dma_len, PCI_DMA_TODEVICE); tx_skb->dma = 0; } if (tx_skb->skb) { dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; return 1; } else { return 0; } } static void nv_drain_tx(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); unsigned int i; for (i = 0; i < np->tx_ring_size; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { np->tx_ring.orig[i].flaglen = 0; np->tx_ring.orig[i].buf = 0; } else { np->tx_ring.ex[i].flaglen = 0; np->tx_ring.ex[i].txvlan = 0; np->tx_ring.ex[i].bufhigh = 0; np->tx_ring.ex[i].buflow = 0; } if (nv_release_txskb(dev, &np->tx_skb[i])) np->stats.tx_dropped++; } } static void nv_drain_rx(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); int i; for (i = 0; i < np->rx_ring_size; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { np->rx_ring.orig[i].flaglen = 0; np->rx_ring.orig[i].buf = 0; } else { np->rx_ring.ex[i].flaglen = 0; np->rx_ring.ex[i].txvlan = 0; np->rx_ring.ex[i].bufhigh = 0; np->rx_ring.ex[i].buflow = 0; } wmb(); if (np->rx_skb[i].skb) { pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, (skb_end_pointer(np->rx_skb[i].skb) - np->rx_skb[i].skb->data), PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_skb[i].skb); np->rx_skb[i].skb = NULL; } } } static void drain_ring(struct net_device *dev) { nv_drain_tx(dev); nv_drain_rx(dev); } static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) { return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); } /* * nv_start_xmit: dev->hard_start_xmit function * Called with netif_tx_lock held. */ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u32 tx_flags = 0; u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); unsigned int fragments = skb_shinfo(skb)->nr_frags; unsigned int i; u32 offset = 0; u32 bcnt; u32 size = skb->len-skb->data_len; u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); u32 empty_slots; struct ring_desc* put_tx; struct ring_desc* start_tx; struct ring_desc* prev_tx; struct nv_skb_map* prev_tx_ctx; /* add fragments to entries count */ for (i = 0; i < fragments; i++) { entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); } empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { spin_lock_irq(&np->lock); netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irq(&np->lock); return NETDEV_TX_BUSY; } start_tx = put_tx = np->put_tx.orig; /* setup the header buffer */ do { prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); tx_flags = np->tx_flags; offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.orig)) put_tx = np->first_tx.orig; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) np->put_tx_ctx = np->first_tx_ctx; } while (size); /* setup the fragments */ for (i = 0; i < fragments; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 size = frag->size; offset = 0; do { prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.orig)) put_tx = np->first_tx.orig; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) np->put_tx_ctx = np->first_tx_ctx; } while (size); } /* set last fragment flag */ prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); /* save skb in this slot's context area */ prev_tx_ctx->skb = skb; if (skb_is_gso(skb)) tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); else tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; spin_lock_irq(&np->lock); /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); np->put_tx.orig = put_tx; spin_unlock_irq(&np->lock); dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", dev->name, entries, tx_flags_extra); { int j; for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); dprintk(" %02x", ((unsigned char*)skb->data)[j]); } dprintk("\n"); } dev->trans_start = jiffies; writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); return NETDEV_TX_OK; } static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u32 tx_flags = 0; u32 tx_flags_extra; unsigned int fragments = skb_shinfo(skb)->nr_frags; unsigned int i; u32 offset = 0; u32 bcnt; u32 size = skb->len-skb->data_len; u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); u32 empty_slots; struct ring_desc_ex* put_tx; struct ring_desc_ex* start_tx; struct ring_desc_ex* prev_tx; struct nv_skb_map* prev_tx_ctx; /* add fragments to entries count */ for (i = 0; i < fragments; i++) { entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); } empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { spin_lock_irq(&np->lock); netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irq(&np->lock); return NETDEV_TX_BUSY; } start_tx = put_tx = np->put_tx.ex; /* setup the header buffer */ do { prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); tx_flags = NV_TX2_VALID; offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.ex)) put_tx = np->first_tx.ex; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) np->put_tx_ctx = np->first_tx_ctx; } while (size); /* setup the fragments */ for (i = 0; i < fragments; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 size = frag->size; offset = 0; do { prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.ex)) put_tx = np->first_tx.ex; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) np->put_tx_ctx = np->first_tx_ctx; } while (size); } /* set last fragment flag */ prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); /* save skb in this slot's context area */ prev_tx_ctx->skb = skb; if (skb_is_gso(skb)) tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); else tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; /* vlan tag */ if (likely(!np->vlangrp)) { start_tx->txvlan = 0; } else { if (vlan_tx_tag_present(skb)) start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); else start_tx->txvlan = 0; } spin_lock_irq(&np->lock); /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); np->put_tx.ex = put_tx; spin_unlock_irq(&np->lock); dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", dev->name, entries, tx_flags_extra); { int j; for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); dprintk(" %02x", ((unsigned char*)skb->data)[j]); } dprintk("\n"); } dev->trans_start = jiffies; writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); return NETDEV_TX_OK; } /* * nv_tx_done: check for completed packets, release the skbs. * * Caller must own np->lock. */ static void nv_tx_done(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u32 flags; struct ring_desc* orig_get_tx = np->get_tx.orig; while ((np->get_tx.orig != np->put_tx.orig) && !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", dev->name, flags); pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, np->get_tx_ctx->dma_len, PCI_DMA_TODEVICE); np->get_tx_ctx->dma = 0; if (np->desc_ver == DESC_VER_1) { if (flags & NV_TX_LASTPACKET) { if (flags & NV_TX_ERROR) { if (flags & NV_TX_UNDERFLOW) np->stats.tx_fifo_errors++; if (flags & NV_TX_CARRIERLOST) np->stats.tx_carrier_errors++; np->stats.tx_errors++; } else { np->stats.tx_packets++; np->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; } } else { if (flags & NV_TX2_LASTPACKET) { if (flags & NV_TX2_ERROR) { if (flags & NV_TX2_UNDERFLOW) np->stats.tx_fifo_errors++; if (flags & NV_TX2_CARRIERLOST) np->stats.tx_carrier_errors++; np->stats.tx_errors++; } else { np->stats.tx_packets++; np->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; } } if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) np->get_tx.orig = np->first_tx.orig; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) np->get_tx_ctx = np->first_tx_ctx; } if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { np->tx_stop = 0; netif_wake_queue(dev); } } static void nv_tx_done_optimized(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); u32 flags; struct ring_desc_ex* orig_get_tx = np->get_tx.ex; while ((np->get_tx.ex != np->put_tx.ex) && !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && (limit-- > 0)) { dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", dev->name, flags); pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, np->get_tx_ctx->dma_len, PCI_DMA_TODEVICE); np->get_tx_ctx->dma = 0; if (flags & NV_TX2_LASTPACKET) { if (!(flags & NV_TX2_ERROR)) np->stats.tx_packets++; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; } if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) np->get_tx.ex = np->first_tx.ex; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) np->get_tx_ctx = np->first_tx_ctx; } if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { np->tx_stop = 0; netif_wake_queue(dev); } } /* * nv_tx_timeout: dev->tx_timeout function * Called with netif_tx_lock held. */ static void nv_tx_timeout(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 status; if (np->msi_flags & NV_MSI_X_ENABLED) status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; else status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); { int i; printk(KERN_INFO "%s: Ring at %lx\n", dev->name, (unsigned long)np->ring_addr); printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); for (i=0;i<=np->register_size;i+= 32) { printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", i, readl(base + i + 0), readl(base + i + 4), readl(base + i + 8), readl(base + i + 12), readl(base + i + 16), readl(base + i + 20), readl(base + i + 24), readl(base + i + 28)); } printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); for (i=0;i<np->tx_ring_size;i+= 4) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", i, le32_to_cpu(np->tx_ring.orig[i].buf), le32_to_cpu(np->tx_ring.orig[i].flaglen), le32_to_cpu(np->tx_ring.orig[i+1].buf), le32_to_cpu(np->tx_ring.orig[i+1].flaglen), le32_to_cpu(np->tx_ring.orig[i+2].buf), le32_to_cpu(np->tx_ring.orig[i+2].flaglen), le32_to_cpu(np->tx_ring.orig[i+3].buf), le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); } else { printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", i, le32_to_cpu(np->tx_ring.ex[i].bufhigh), le32_to_cpu(np->tx_ring.ex[i].buflow), le32_to_cpu(np->tx_ring.ex[i].flaglen), le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), le32_to_cpu(np->tx_ring.ex[i+1].buflow), le32_to_cpu(np->tx_ring.ex[i+1].flaglen), le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), le32_to_cpu(np->tx_ring.ex[i+2].buflow), le32_to_cpu(np->tx_ring.ex[i+2].flaglen), le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), le32_to_cpu(np->tx_ring.ex[i+3].buflow), le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); } } } spin_lock_irq(&np->lock); /* 1) stop tx engine */ nv_stop_tx(dev); /* 2) check that the packets were not sent already: */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) nv_tx_done(dev); else nv_tx_done_optimized(dev, np->tx_ring_size); /* 3) if there are dead entries: clear everything */ if (np->get_tx_ctx != np->put_tx_ctx) { printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); nv_drain_tx(dev); nv_init_tx(dev); setup_hw_rings(dev, NV_SETUP_TX_RING); } netif_wake_queue(dev); /* 4) restart tx engine */ nv_start_tx(dev); spin_unlock_irq(&np->lock); } /* * Called when the nic notices a mismatch between the actual data len on the * wire and the len indicated in the 802 header */ static int nv_getlen(struct net_device *dev, void *packet, int datalen) { int hdrlen; /* length of the 802 header */ int protolen; /* length as stored in the proto field */ /* 1) calculate len according to header */ if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); hdrlen = VLAN_HLEN; } else { protolen = ntohs( ((struct ethhdr *)packet)->h_proto); hdrlen = ETH_HLEN; } dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", dev->name, datalen, protolen, hdrlen); if (protolen > ETH_DATA_LEN) return datalen; /* Value in proto field not a len, no checks possible */ protolen += hdrlen; /* consistency checks: */ if (datalen > ETH_ZLEN) { if (datalen >= protolen) { /* more data on wire than in 802 header, trim of * additional data. */ dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", dev->name, protolen); return protolen; } else { /* less data on wire than mentioned in header. * Discard the packet. */ dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", dev->name); return -1; } } else { /* short packet. Accept only if 802 values are also short */ if (protolen > ETH_ZLEN) { dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", dev->name); return -1; } dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", dev->name, datalen); return datalen; } } static int nv_rx_process(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); u32 flags; u32 rx_processed_cnt = 0; struct sk_buff *skb; int len; while((np->get_rx.orig != np->put_rx.orig) && !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && (rx_processed_cnt++ < limit)) { dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", dev->name, flags); /* * the packet is for us - immediately tear down the pci mapping. * TODO: check if a prefetch of the first cacheline improves * the performance. */ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, np->get_rx_ctx->dma_len, PCI_DMA_FROMDEVICE); skb = np->get_rx_ctx->skb; np->get_rx_ctx->skb = NULL; { int j; dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); dprintk(" %02x", ((unsigned char*)skb->data)[j]); } dprintk("\n"); } /* look at what we actually got: */ if (np->desc_ver == DESC_VER_1) { if (likely(flags & NV_RX_DESCRIPTORVALID)) { len = flags & LEN_MASK_V1; if (unlikely(flags & NV_RX_ERROR)) { if (flags & NV_RX_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { np->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } } /* framing errors are soft errors */ else if (flags & NV_RX_FRAMINGERR) { if (flags & NV_RX_SUBSTRACT1) { len--; } } /* the rest are hard errors */ else { if (flags & NV_RX_MISSEDFRAME) np->stats.rx_missed_errors++; if (flags & NV_RX_CRCERR) np->stats.rx_crc_errors++; if (flags & NV_RX_OVERFLOW) np->stats.rx_over_errors++; np->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } } } else { dev_kfree_skb(skb); goto next_pkt; } } else { if (likely(flags & NV_RX2_DESCRIPTORVALID)) { len = flags & LEN_MASK_V2; if (unlikely(flags & NV_RX2_ERROR)) { if (flags & NV_RX2_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { np->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } } /* framing errors are soft errors */ else if (flags & NV_RX2_FRAMINGERR) { if (flags & NV_RX2_SUBSTRACT1) { len--; } } /* the rest are hard errors */ else { if (flags & NV_RX2_CRCERR) np->stats.rx_crc_errors++; if (flags & NV_RX2_OVERFLOW) np->stats.rx_over_errors++; np->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } } if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { skb->ip_summed = CHECKSUM_UNNECESSARY; } } } else { dev_kfree_skb(skb); goto next_pkt; } } /* got a valid packet - forward it to the network core */ skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", dev->name, len, skb->protocol); #ifdef CONFIG_FORCEDETH_NAPI netif_receive_skb(skb); #else netif_rx(skb); #endif dev->last_rx = jiffies; np->stats.rx_packets++; np->stats.rx_bytes += len; next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) np->get_rx.orig = np->first_rx.orig; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) np->get_rx_ctx = np->first_rx_ctx; } return rx_processed_cnt; } static int nv_rx_process_optimized(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); u32 flags; u32 vlanflags = 0; u32 rx_processed_cnt = 0; struct sk_buff *skb; int len; while((np->get_rx.ex != np->put_rx.ex) && !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && (rx_processed_cnt++ < limit)) { dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", dev->name, flags); /* * the packet is for us - immediately tear down the pci mapping. * TODO: check if a prefetch of the first cacheline improves * the performance. */ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, np->get_rx_ctx->dma_len, PCI_DMA_FROMDEVICE); skb = np->get_rx_ctx->skb; np->get_rx_ctx->skb = NULL; { int j; dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); for (j=0; j<64; j++) { if ((j%16) == 0) dprintk("\n%03x:", j); dprintk(" %02x", ((unsigned char*)skb->data)[j]); } dprintk("\n"); } /* look at what we actually got: */ if (likely(flags & NV_RX2_DESCRIPTORVALID)) { len = flags & LEN_MASK_V2; if (unlikely(flags & NV_RX2_ERROR)) { if (flags & NV_RX2_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { dev_kfree_skb(skb); goto next_pkt; } } /* framing errors are soft errors */ else if (flags & NV_RX2_FRAMINGERR) { if (flags & NV_RX2_SUBSTRACT1) { len--; } } /* the rest are hard errors */ else { dev_kfree_skb(skb); goto next_pkt; } } if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { skb->ip_summed = CHECKSUM_UNNECESSARY; } } /* got a valid packet - forward it to the network core */ skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); prefetch(skb->data); dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", dev->name, len, skb->protocol); if (likely(!np->vlangrp)) { #ifdef CONFIG_FORCEDETH_NAPI netif_receive_skb(skb); #else netif_rx(skb); #endif } else { vlanflags = le32_to_cpu(np->get_rx.ex->buflow); if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { #ifdef CONFIG_FORCEDETH_NAPI vlan_hwaccel_receive_skb(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); #else vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); #endif } else { #ifdef CONFIG_FORCEDETH_NAPI netif_receive_skb(skb); #else netif_rx(skb); #endif } } dev->last_rx = jiffies; np->stats.rx_packets++; np->stats.rx_bytes += len; } else { dev_kfree_skb(skb); } next_pkt: if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) np->get_rx.ex = np->first_rx.ex; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) np->get_rx_ctx = np->first_rx_ctx; } return rx_processed_cnt; } static void set_bufsize(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); if (dev->mtu <= ETH_DATA_LEN) np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; else np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; } /* * nv_change_mtu: dev->change_mtu function * Called with dev_base_lock held for read. */ static int nv_change_mtu(struct net_device *dev, int new_mtu) { struct fe_priv *np = netdev_priv(dev); int old_mtu; if (new_mtu < 64 || new_mtu > np->pkt_limit) return -EINVAL; old_mtu = dev->mtu; dev->mtu = new_mtu; /* return early if the buffer sizes will not change */ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) return 0; if (old_mtu == new_mtu) return 0; /* synchronized against open : rtnl_lock() held by caller */ if (netif_running(dev)) { u8 __iomem *base = get_hwbase(dev); /* * It seems that the nic preloads valid ring entries into an * internal buffer. The procedure for flushing everything is * guessed, there is probably a simpler approach. * Changing the MTU is a rare event, it shouldn't matter. */ nv_disable_irq(dev); netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); nv_txrx_reset(dev); /* drain rx queue */ nv_drain_rx(dev); nv_drain_tx(dev); /* reinit driver view of the rx queue */ set_bufsize(dev); if (nv_init_ring(dev)) { if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); } /* reinit nic view of the rx queue */ writel(np->rx_buf_sz, base + NvRegOffloadConfig); setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(base); /* restart rx engine */ nv_start_rx(dev); nv_start_tx(dev); spin_unlock(&np->lock); netif_tx_unlock_bh(dev); nv_enable_irq(dev); } return 0; } static void nv_copy_mac_to_hw(struct net_device *dev) { u8 __iomem *base = get_hwbase(dev); u32 mac[2]; mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); writel(mac[0], base + NvRegMacAddrA); writel(mac[1], base + NvRegMacAddrB); } /* * nv_set_mac_address: dev->set_mac_address function * Called with rtnl_lock() held. */ static int nv_set_mac_address(struct net_device *dev, void *addr) { struct fe_priv *np = netdev_priv(dev); struct sockaddr *macaddr = (struct sockaddr*)addr; if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; /* synchronized against open : rtnl_lock() held by caller */ memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); if (netif_running(dev)) { netif_tx_lock_bh(dev); spin_lock_irq(&np->lock); /* stop rx engine */ nv_stop_rx(dev); /* set mac address */ nv_copy_mac_to_hw(dev); /* restart rx engine */ nv_start_rx(dev); spin_unlock_irq(&np->lock); netif_tx_unlock_bh(dev); } else { nv_copy_mac_to_hw(dev); } return 0; } /* * nv_set_multicast: dev->set_multicast function * Called with netif_tx_lock held. */ static void nv_set_multicast(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 addr[2]; u32 mask[2]; u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; memset(addr, 0, sizeof(addr)); memset(mask, 0, sizeof(mask)); if (dev->flags & IFF_PROMISC) { pff |= NVREG_PFF_PROMISC; } else { pff |= NVREG_PFF_MYADDR; if (dev->flags & IFF_ALLMULTI || dev->mc_list) { u32 alwaysOff[2]; u32 alwaysOn[2]; alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; if (dev->flags & IFF_ALLMULTI) { alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; } else { struct dev_mc_list *walk; walk = dev->mc_list; while (walk != NULL) { u32 a, b; a = le32_to_cpu(*(u32 *) walk->dmi_addr); b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); alwaysOn[0] &= a; alwaysOff[0] &= ~a; alwaysOn[1] &= b; alwaysOff[1] &= ~b; walk = walk->next; } } addr[0] = alwaysOn[0]; addr[1] = alwaysOn[1]; mask[0] = alwaysOn[0] | alwaysOff[0]; mask[1] = alwaysOn[1] | alwaysOff[1]; } } addr[0] |= NVREG_MCASTADDRA_FORCE; pff |= NVREG_PFF_ALWAYS; spin_lock_irq(&np->lock); nv_stop_rx(dev); writel(addr[0], base + NvRegMulticastAddrA); writel(addr[1], base + NvRegMulticastAddrB); writel(mask[0], base + NvRegMulticastMaskA); writel(mask[1], base + NvRegMulticastMaskB); writel(pff, base + NvRegPacketFilterFlags); dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", dev->name); nv_start_rx(dev); spin_unlock_irq(&np->lock); } static void nv_update_pause(struct net_device *dev, u32 pause_flags) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; } else { writel(pff, base + NvRegPacketFilterFlags); } } if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } else { writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); writel(regmisc, base + NvRegMisc1); } } } /** * nv_update_linkspeed: Setup the MAC according to the link partner * @dev: Network device to be configured * * The function queries the PHY and checks if there is a link partner. * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is * set to 10 MBit HD. * * The function returns 0 if there is no link partner and 1 if there is * a good link partner. */ static int nv_update_linkspeed(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int adv = 0; int lpa = 0; int adv_lpa, adv_pause, lpa_pause; int newls = np->linkspeed; int newdup = np->duplex; int mii_status; int retval = 0; u32 control_1000, status_1000, phyreg, pause_flags, txreg; /* BMSR_LSTATUS is latched, read it twice: * we want the current value. */ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); if (!(mii_status & BMSR_LSTATUS)) { dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", dev->name); newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 0; retval = 0; goto set_speed; } if (np->autoneg == 0) { dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", dev->name, np->fixed_mode); if (np->fixed_mode & LPA_100FULL) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; newdup = 1; } else if (np->fixed_mode & LPA_100HALF) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; newdup = 0; } else if (np->fixed_mode & LPA_10FULL) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 1; } else { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 0; } retval = 1; goto set_speed; } /* check auto negotiation is complete */ if (!(mii_status & BMSR_ANEGCOMPLETE)) { /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 0; retval = 0; dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); goto set_speed; } adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", dev->name, adv, lpa); retval = 1; if (np->gigabit == PHY_GIGABIT) { control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); if ((control_1000 & ADVERTISE_1000FULL) && (status_1000 & LPA_1000FULL)) { dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", dev->name); newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; newdup = 1; goto set_speed; } } /* FIXME: handle parallel detection properly */ adv_lpa = lpa & adv; if (adv_lpa & LPA_100FULL) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; newdup = 1; } else if (adv_lpa & LPA_100HALF) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; newdup = 0; } else if (adv_lpa & LPA_10FULL) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 1; } else if (adv_lpa & LPA_10HALF) { newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 0; } else { dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; newdup = 0; } set_speed: if (np->duplex == newdup && np->linkspeed == newls) return retval; dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", dev->name, np->linkspeed, np->duplex, newls, newdup); np->duplex = newdup; np->linkspeed = newls; if (np->gigabit == PHY_GIGABIT) { phyreg = readl(base + NvRegRandomSeed); phyreg &= ~(0x3FF00); if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) phyreg |= NVREG_RNDSEED_FORCE3; else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) phyreg |= NVREG_RNDSEED_FORCE2; else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) phyreg |= NVREG_RNDSEED_FORCE; writel(phyreg, base + NvRegRandomSeed); } phyreg = readl(base + NvRegPhyInterface); phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); if (np->duplex == 0) phyreg |= PHY_HALF; if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) phyreg |= PHY_100; else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) phyreg |= PHY_1000; writel(phyreg, base + NvRegPhyInterface); if (phyreg & PHY_RGMII) { if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) txreg = NVREG_TX_DEFERRAL_RGMII_1000; else txreg = NVREG_TX_DEFERRAL_RGMII_10_100; } else { txreg = NVREG_TX_DEFERRAL_DEFAULT; } writel(txreg, base + NvRegTxDeferral); if (np->desc_ver == DESC_VER_1) { txreg = NVREG_TX_WM_DESC1_DEFAULT; } else { if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) txreg = NVREG_TX_WM_DESC2_3_1000; else txreg = NVREG_TX_WM_DESC2_3_DEFAULT; } writel(txreg, base + NvRegTxWatermark); writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), base + NvRegMisc1); pci_push(base); writel(np->linkspeed, base + NvRegLinkSpeed); pci_push(base); pause_flags = 0; /* setup pause frame */ if (np->duplex != 0) { if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); switch (adv_pause) { case ADVERTISE_PAUSE_CAP: if (lpa_pause & LPA_PAUSE_CAP) { pause_flags |= NV_PAUSEFRAME_RX_ENABLE; if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } break; case ADVERTISE_PAUSE_ASYM: if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) { pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } break; case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: if (lpa_pause & LPA_PAUSE_CAP) { pause_flags |= NV_PAUSEFRAME_RX_ENABLE; if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } if (lpa_pause == LPA_PAUSE_ASYM) { pause_flags |= NV_PAUSEFRAME_RX_ENABLE; } break; } } else { pause_flags = np->pause_flags; } } nv_update_pause(dev, pause_flags); return retval; } static void nv_linkchange(struct net_device *dev) { if (nv_update_linkspeed(dev)) { if (!netif_carrier_ok(dev)) { netif_carrier_on(dev); printk(KERN_INFO "%s: link up.\n", dev->name); nv_start_rx(dev); } } else { if (netif_carrier_ok(dev)) { netif_carrier_off(dev); printk(KERN_INFO "%s: link down.\n", dev->name); nv_stop_rx(dev); } } } static void nv_link_irq(struct net_device *dev) { u8 __iomem *base = get_hwbase(dev); u32 miistat; miistat = readl(base + NvRegMIIStatus); writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); if (miistat & (NVREG_MIISTAT_LINKCHANGE)) nv_linkchange(dev); dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); } static irqreturn_t nv_nic_irq(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; int i; dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); for (i=0; ; i++) { if (!(np->msi_flags & NV_MSI_X_ENABLED)) { events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); } else { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); } dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); if (!(events & np->irqmask)) break; spin_lock(&np->lock); nv_tx_done(dev); spin_unlock(&np->lock); #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { netif_rx_schedule(dev, &np->napi); /* Disable furthur receive irq's */ spin_lock(&np->lock); np->irqmask &= ~NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); spin_unlock(&np->lock); } #else if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx(dev))) { spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock(&np->lock); } } #endif if (unlikely(events & NVREG_IRQ_LINK)) { spin_lock(&np->lock); nv_link_irq(dev); spin_unlock(&np->lock); } if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { spin_lock(&np->lock); nv_linkchange(dev); spin_unlock(&np->lock); np->link_timeout = jiffies + LINK_TIMEOUT; } if (unlikely(events & (NVREG_IRQ_TX_ERR))) { dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dev->name, events); } if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", dev->name, events); } if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { spin_lock(&np->lock); /* disable interrupts on the nic */ if (!(np->msi_flags & NV_MSI_X_ENABLED)) writel(0, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq = np->irqmask; np->recover_error = 1; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock(&np->lock); break; } if (unlikely(i > max_interrupt_work)) { spin_lock(&np->lock); /* disable interrupts on the nic */ if (!(np->msi_flags & NV_MSI_X_ENABLED)) writel(0, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq = np->irqmask; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock(&np->lock); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); break; } } dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); return IRQ_RETVAL(i); } /** * All _optimized functions are used to help increase performance * (reduce CPU and increase throughput). They use descripter version 3, * compiler directives, and reduce memory accesses. */ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; int i; dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); for (i=0; ; i++) { if (!(np->msi_flags & NV_MSI_X_ENABLED)) { events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); } else { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); } dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); if (!(events & np->irqmask)) break; spin_lock(&np->lock); nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); spin_unlock(&np->lock); #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { netif_rx_schedule(dev, &np->napi); /* Disable furthur receive irq's */ spin_lock(&np->lock); np->irqmask &= ~NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); spin_unlock(&np->lock); } #else if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx_optimized(dev))) { spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock(&np->lock); } } #endif if (unlikely(events & NVREG_IRQ_LINK)) { spin_lock(&np->lock); nv_link_irq(dev); spin_unlock(&np->lock); } if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { spin_lock(&np->lock); nv_linkchange(dev); spin_unlock(&np->lock); np->link_timeout = jiffies + LINK_TIMEOUT; } if (unlikely(events & (NVREG_IRQ_TX_ERR))) { dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dev->name, events); } if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", dev->name, events); } if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { spin_lock(&np->lock); /* disable interrupts on the nic */ if (!(np->msi_flags & NV_MSI_X_ENABLED)) writel(0, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq = np->irqmask; np->recover_error = 1; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock(&np->lock); break; } if (unlikely(i > max_interrupt_work)) { spin_lock(&np->lock); /* disable interrupts on the nic */ if (!(np->msi_flags & NV_MSI_X_ENABLED)) writel(0, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq = np->irqmask; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock(&np->lock); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); break; } } dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); return IRQ_RETVAL(i); } static irqreturn_t nv_nic_irq_tx(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; int i; unsigned long flags; dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); for (i=0; ; i++) { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); if (!(events & np->irqmask)) break; spin_lock_irqsave(&np->lock, flags); nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); spin_unlock_irqrestore(&np->lock, flags); if (unlikely(events & (NVREG_IRQ_TX_ERR))) { dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dev->name, events); } if (unlikely(i > max_interrupt_work)) { spin_lock_irqsave(&np->lock, flags); /* disable interrupts on the nic */ writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq |= NVREG_IRQ_TX_ALL; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock_irqrestore(&np->lock, flags); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); break; } } dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); return IRQ_RETVAL(i); } #ifdef CONFIG_FORCEDETH_NAPI static int nv_napi_poll(struct napi_struct *napi, int budget) { struct fe_priv *np = container_of(napi, struct fe_priv, napi); struct net_device *dev = np->dev; u8 __iomem *base = get_hwbase(dev); unsigned long flags; int pkts, retcode; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { pkts = nv_rx_process(dev, budget); retcode = nv_alloc_rx(dev); } else { pkts = nv_rx_process_optimized(dev, budget); retcode = nv_alloc_rx_optimized(dev); } if (retcode) { spin_lock_irqsave(&np->lock, flags); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irqrestore(&np->lock, flags); } if (pkts < budget) { /* re-enable receive interrupts */ spin_lock_irqsave(&np->lock, flags); __netif_rx_complete(dev, napi); np->irqmask |= NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); spin_unlock_irqrestore(&np->lock, flags); } return pkts; } #endif #ifdef CONFIG_FORCEDETH_NAPI static irqreturn_t nv_nic_irq_rx(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); if (events) { netif_rx_schedule(dev, &np->napi); /* disable receive interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); } return IRQ_HANDLED; } #else static irqreturn_t nv_nic_irq_rx(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; int i; unsigned long flags; dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); for (i=0; ; i++) { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); if (!(events & np->irqmask)) break; if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx_optimized(dev))) { spin_lock_irqsave(&np->lock, flags); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irqrestore(&np->lock, flags); } } if (unlikely(i > max_interrupt_work)) { spin_lock_irqsave(&np->lock, flags); /* disable interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq |= NVREG_IRQ_RX_ALL; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock_irqrestore(&np->lock, flags); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); break; } } dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); return IRQ_RETVAL(i); } #endif static irqreturn_t nv_nic_irq_other(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; int i; unsigned long flags; dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); for (i=0; ; i++) { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); if (!(events & np->irqmask)) break; /* check tx in case we reached max loop limit in tx isr */ spin_lock_irqsave(&np->lock, flags); nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); spin_unlock_irqrestore(&np->lock, flags); if (events & NVREG_IRQ_LINK) { spin_lock_irqsave(&np->lock, flags); nv_link_irq(dev); spin_unlock_irqrestore(&np->lock, flags); } if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { spin_lock_irqsave(&np->lock, flags); nv_linkchange(dev); spin_unlock_irqrestore(&np->lock, flags); np->link_timeout = jiffies + LINK_TIMEOUT; } if (events & NVREG_IRQ_RECOVER_ERROR) { spin_lock_irq(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq |= NVREG_IRQ_OTHER; np->recover_error = 1; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock_irq(&np->lock); break; } if (events & (NVREG_IRQ_UNKNOWN)) { printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", dev->name, events); } if (unlikely(i > max_interrupt_work)) { spin_lock_irqsave(&np->lock, flags); /* disable interrupts on the nic */ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); pci_push(base); if (!np->in_shutdown) { np->nic_poll_irq |= NVREG_IRQ_OTHER; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock_irqrestore(&np->lock, flags); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); break; } } dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); return IRQ_RETVAL(i); } static irqreturn_t nv_nic_irq_test(int foo, void *data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); if (!(np->msi_flags & NV_MSI_X_ENABLED)) { events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); } else { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); } pci_push(base); dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); if (!(events & NVREG_IRQ_TIMER)) return IRQ_RETVAL(0); spin_lock(&np->lock); np->intr_test = 1; spin_unlock(&np->lock); dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); return IRQ_RETVAL(1); } static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) { u8 __iomem *base = get_hwbase(dev); int i; u32 msixmap = 0; /* Each interrupt bit can be mapped to a MSIX vector (4 bits). * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents * the remaining 8 interrupts. */ for (i = 0; i < 8; i++) { if ((irqmask >> i) & 0x1) { msixmap |= vector << (i << 2); } } writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); msixmap = 0; for (i = 0; i < 8; i++) { if ((irqmask >> (i + 8)) & 0x1) { msixmap |= vector << (i << 2); } } writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); } static int nv_request_irq(struct net_device *dev, int intr_test) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); int ret = 1; int i; irqreturn_t (*handler)(int foo, void *data); if (intr_test) { handler = nv_nic_irq_test; } else { if (np->desc_ver == DESC_VER_3) handler = nv_nic_irq_optimized; else handler = nv_nic_irq; } if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { np->msi_x_entry[i].entry = i; } if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { np->msi_flags |= NV_MSI_X_ENABLED; if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { /* Request irq for rx handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_err; } /* Request irq for tx handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_free_rx; } /* Request irq for link and timer handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_free_tx; } /* map interrupts to their respective vector */ writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); } else { /* Request irq for all interrupts */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_err; } /* map interrupts to vector 0 */ writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); } } } if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { if ((ret = pci_enable_msi(np->pci_dev)) == 0) { np->msi_flags |= NV_MSI_ENABLED; if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; goto out_err; } /* map interrupts to vector 0 */ writel(0, base + NvRegMSIMap0); writel(0, base + NvRegMSIMap1); /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); } } if (ret != 0) { if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) goto out_err; } return 0; out_free_tx: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); out_free_rx: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); out_err: return 1; } static void nv_free_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); int i; if (np->msi_flags & NV_MSI_X_ENABLED) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { free_irq(np->msi_x_entry[i].vector, dev); } pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; } else { free_irq(np->pci_dev->irq, dev); if (np->msi_flags & NV_MSI_ENABLED) { pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; } } } static void nv_do_nic_poll(unsigned long data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 mask = 0; /* * First disable irq(s) and then * reenable interrupts on the nic, we have to do this before calling * nv_nic_irq because that may decide to do otherwise */ if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else disable_irq_lockdep(dev->irq); mask = np->irqmask; } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); mask |= NVREG_IRQ_RX_ALL; } if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); mask |= NVREG_IRQ_TX_ALL; } if (np->nic_poll_irq & NVREG_IRQ_OTHER) { disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); mask |= NVREG_IRQ_OTHER; } } np->nic_poll_irq = 0; if (np->recover_error) { np->recover_error = 0; printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); if (netif_running(dev)) { netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); nv_txrx_reset(dev); /* drain rx queue */ nv_drain_rx(dev); nv_drain_tx(dev); /* reinit driver view of the rx queue */ set_bufsize(dev); if (nv_init_ring(dev)) { if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); } /* reinit nic view of the rx queue */ writel(np->rx_buf_sz, base + NvRegOffloadConfig); setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(base); /* restart rx engine */ nv_start_rx(dev); nv_start_tx(dev); spin_unlock(&np->lock); netif_tx_unlock_bh(dev); } } /* FIXME: Do we need synchronize_irq(dev->irq) here? */ writel(mask, base + NvRegIrqMask); pci_push(base); if (!using_multi_irqs(dev)) { if (np->desc_ver == DESC_VER_3) nv_nic_irq_optimized(0, dev); else nv_nic_irq(0, dev); if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else enable_irq_lockdep(dev->irq); } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { nv_nic_irq_rx(0, dev); enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); } if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { nv_nic_irq_tx(0, dev); enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); } if (np->nic_poll_irq & NVREG_IRQ_OTHER) { nv_nic_irq_other(0, dev); enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } } #ifdef CONFIG_NET_POLL_CONTROLLER static void nv_poll_controller(struct net_device *dev) { nv_do_nic_poll((unsigned long) dev); } #endif static void nv_do_stats_poll(unsigned long data) { struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); nv_get_hw_stats(dev); if (!np->in_shutdown) mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); } static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct fe_priv *np = netdev_priv(dev); strcpy(info->driver, "forcedeth"); strcpy(info->version, FORCEDETH_VERSION); strcpy(info->bus_info, pci_name(np->pci_dev)); } static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) { struct fe_priv *np = netdev_priv(dev); wolinfo->supported = WAKE_MAGIC; spin_lock_irq(&np->lock); if (np->wolenabled) wolinfo->wolopts = WAKE_MAGIC; spin_unlock_irq(&np->lock); } static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 flags = 0; if (wolinfo->wolopts == 0) { np->wolenabled = 0; } else if (wolinfo->wolopts & WAKE_MAGIC) { np->wolenabled = 1; flags = NVREG_WAKEUPFLAGS_ENABLE; } if (netif_running(dev)) { spin_lock_irq(&np->lock); writel(flags, base + NvRegWakeUpFlags); spin_unlock_irq(&np->lock); } return 0; } static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct fe_priv *np = netdev_priv(dev); int adv; spin_lock_irq(&np->lock); ecmd->port = PORT_MII; if (!netif_running(dev)) { /* We do not track link speed / duplex setting if the * interface is disabled. Force a link check */ if (nv_update_linkspeed(dev)) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); } } if (netif_carrier_ok(dev)) { switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { case NVREG_LINKSPEED_10: ecmd->speed = SPEED_10; break; case NVREG_LINKSPEED_100: ecmd->speed = SPEED_100; break; case NVREG_LINKSPEED_1000: ecmd->speed = SPEED_1000; break; } ecmd->duplex = DUPLEX_HALF; if (np->duplex) ecmd->duplex = DUPLEX_FULL; } else { ecmd->speed = -1; ecmd->duplex = -1; } ecmd->autoneg = np->autoneg; ecmd->advertising = ADVERTISED_MII; if (np->autoneg) { ecmd->advertising |= ADVERTISED_Autoneg; adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); if (adv & ADVERTISE_10HALF) ecmd->advertising |= ADVERTISED_10baseT_Half; if (adv & ADVERTISE_10FULL) ecmd->advertising |= ADVERTISED_10baseT_Full; if (adv & ADVERTISE_100HALF) ecmd->advertising |= ADVERTISED_100baseT_Half; if (adv & ADVERTISE_100FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); if (adv & ADVERTISE_1000FULL) ecmd->advertising |= ADVERTISED_1000baseT_Full; } } ecmd->supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_MII); if (np->gigabit == PHY_GIGABIT) ecmd->supported |= SUPPORTED_1000baseT_Full; ecmd->phy_address = np->phyaddr; ecmd->transceiver = XCVR_EXTERNAL; /* ignore maxtxpkt, maxrxpkt for now */ spin_unlock_irq(&np->lock); return 0; } static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct fe_priv *np = netdev_priv(dev); if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_EXTERNAL) return -EINVAL; if (ecmd->phy_address != np->phyaddr) { /* TODO: support switching between multiple phys. Should be * trivial, but not enabled due to lack of test hardware. */ return -EINVAL; } if (ecmd->autoneg == AUTONEG_ENABLE) { u32 mask; mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; if (np->gigabit == PHY_GIGABIT) mask |= ADVERTISED_1000baseT_Full; if ((ecmd->advertising & mask) == 0) return -EINVAL; } else if (ecmd->autoneg == AUTONEG_DISABLE) { /* Note: autonegotiation disable, speed 1000 intentionally * forbidden - noone should need that. */ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; } netif_carrier_off(dev); if (netif_running(dev)) { nv_disable_irq(dev); netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); spin_unlock(&np->lock); netif_tx_unlock_bh(dev); } if (ecmd->autoneg == AUTONEG_ENABLE) { int adv, bmcr; np->autoneg = 1; /* advertise only what has been requested */ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (ecmd->advertising & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (ecmd->advertising & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (ecmd->advertising & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (ecmd->advertising & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) adv |= ADVERTISE_PAUSE_ASYM; mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); adv &= ~ADVERTISE_1000FULL; if (ecmd->advertising & ADVERTISED_1000baseT_Full) adv |= ADVERTISE_1000FULL; mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); } if (netif_running(dev)) printk(KERN_INFO "%s: link down.\n", dev->name); bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); if (np->phy_model == PHY_MODEL_MARVELL_E3016) { bmcr |= BMCR_ANENABLE; /* reset the phy in order for settings to stick, * and cause autoneg to start */ if (phy_reset(dev, bmcr)) { printk(KERN_INFO "%s: phy reset failed\n", dev->name); return -EINVAL; } } else { bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); } } else { int adv, bmcr; np->autoneg = 0; adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) adv |= ADVERTISE_10HALF; if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) adv |= ADVERTISE_10FULL; if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) adv |= ADVERTISE_100HALF; if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) adv |= ADVERTISE_100FULL; np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; } if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { adv |= ADVERTISE_PAUSE_ASYM; np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; } mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); np->fixed_mode = adv; if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); adv &= ~ADVERTISE_1000FULL; mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); } bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) bmcr |= BMCR_FULLDPLX; if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) bmcr |= BMCR_SPEED100; if (np->phy_oui == PHY_OUI_MARVELL) { /* reset the phy in order for forced mode settings to stick */ if (phy_reset(dev, bmcr)) { printk(KERN_INFO "%s: phy reset failed\n", dev->name); return -EINVAL; } } else { mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); if (netif_running(dev)) { /* Wait a bit and then reconfigure the nic. */ udelay(10); nv_linkchange(dev); } } } if (netif_running(dev)) { nv_start_rx(dev); nv_start_tx(dev); nv_enable_irq(dev); } return 0; } #define FORCEDETH_REGS_VER 1 static int nv_get_regs_len(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); return np->register_size; } static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 *rbuf = buf; int i; regs->version = FORCEDETH_REGS_VER; spin_lock_irq(&np->lock); for (i = 0;i <= np->register_size/sizeof(u32); i++) rbuf[i] = readl(base + i*sizeof(u32)); spin_unlock_irq(&np->lock); } static int nv_nway_reset(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); int ret; if (np->autoneg) { int bmcr; netif_carrier_off(dev); if (netif_running(dev)) { nv_disable_irq(dev); netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); spin_unlock(&np->lock); netif_tx_unlock_bh(dev); printk(KERN_INFO "%s: link down.\n", dev->name); } bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); if (np->phy_model == PHY_MODEL_MARVELL_E3016) { bmcr |= BMCR_ANENABLE; /* reset the phy in order for settings to stick*/ if (phy_reset(dev, bmcr)) { printk(KERN_INFO "%s: phy reset failed\n", dev->name); return -EINVAL; } } else { bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); } if (netif_running(dev)) { nv_start_rx(dev); nv_start_tx(dev); nv_enable_irq(dev); } ret = 0; } else { ret = -EINVAL; } return ret; } static int nv_set_tso(struct net_device *dev, u32 value) { struct fe_priv *np = netdev_priv(dev); if ((np->driver_data & DEV_HAS_CHECKSUM)) return ethtool_op_set_tso(dev, value); else return -EOPNOTSUPP; } static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) { struct fe_priv *np = netdev_priv(dev); ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; ring->rx_pending = np->rx_ring_size; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; ring->tx_pending = np->tx_ring_size; } static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; dma_addr_t ring_addr; if (ring->rx_pending < RX_RING_MIN || ring->tx_pending < TX_RING_MIN || ring->rx_mini_pending != 0 || ring->rx_jumbo_pending != 0 || (np->desc_ver == DESC_VER_1 && (ring->rx_pending > RING_MAX_DESC_VER_1 || ring->tx_pending > RING_MAX_DESC_VER_1)) || (np->desc_ver != DESC_VER_1 && (ring->rx_pending > RING_MAX_DESC_VER_2_3 || ring->tx_pending > RING_MAX_DESC_VER_2_3))) { return -EINVAL; } /* allocate new rings */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { rxtx_ring = pci_alloc_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), &ring_addr); } else { rxtx_ring = pci_alloc_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), &ring_addr); } rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { /* fall back to old rings */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { if (rxtx_ring) pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), rxtx_ring, ring_addr); } else { if (rxtx_ring) pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), rxtx_ring, ring_addr); } if (rx_skbuff) kfree(rx_skbuff); if (tx_skbuff) kfree(tx_skbuff); goto exit; } if (netif_running(dev)) { nv_disable_irq(dev); netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); nv_txrx_reset(dev); /* drain queues */ nv_drain_rx(dev); nv_drain_tx(dev); /* delete queues */ free_rings(dev); } /* set new values */ np->rx_ring_size = ring->rx_pending; np->tx_ring_size = ring->tx_pending; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { np->rx_ring.orig = (struct ring_desc*)rxtx_ring; np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; } else { np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; } np->rx_skb = (struct nv_skb_map*)rx_skbuff; np->tx_skb = (struct nv_skb_map*)tx_skbuff; np->ring_addr = ring_addr; memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); if (netif_running(dev)) { /* reinit driver view of the queues */ set_bufsize(dev); if (nv_init_ring(dev)) { if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); } /* reinit nic view of the queues */ writel(np->rx_buf_sz, base + NvRegOffloadConfig); setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(base); /* restart engines */ nv_start_rx(dev); nv_start_tx(dev); spin_unlock(&np->lock); netif_tx_unlock_bh(dev); nv_enable_irq(dev); } return 0; exit: return -ENOMEM; } static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) { struct fe_priv *np = netdev_priv(dev); pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; } static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) { struct fe_priv *np = netdev_priv(dev); int adv, bmcr; if ((!np->autoneg && np->duplex == 0) || (np->autoneg && !pause->autoneg && np->duplex == 0)) { printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", dev->name); return -EINVAL; } if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); return -EINVAL; } netif_carrier_off(dev); if (netif_running(dev)) { nv_disable_irq(dev); netif_tx_lock_bh(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); spin_unlock(&np->lock); netif_tx_unlock_bh(dev); } np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); if (pause->rx_pause) np->pause_flags |= NV_PAUSEFRAME_RX_REQ; if (pause->tx_pause) np->pause_flags |= NV_PAUSEFRAME_TX_REQ; if (np->autoneg && pause->autoneg) { np->pause_flags |= NV_PAUSEFRAME_AUTONEG; adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) adv |= ADVERTISE_PAUSE_ASYM; mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); if (netif_running(dev)) printk(KERN_INFO "%s: link down.\n", dev->name); bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); } else { np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); if (pause->rx_pause) np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; if (pause->tx_pause) np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; if (!netif_running(dev)) nv_update_linkspeed(dev); else nv_update_pause(dev, np->pause_flags); } if (netif_running(dev)) { nv_start_rx(dev); nv_start_tx(dev); nv_enable_irq(dev); } return 0; } static u32 nv_get_rx_csum(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); return (np->rx_csum) != 0; } static int nv_set_rx_csum(struct net_device *dev, u32 data) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int retcode = 0; if (np->driver_data & DEV_HAS_CHECKSUM) { if (data) { np->rx_csum = 1; np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; } else { np->rx_csum = 0; /* vlan is dependent on rx checksum offload */ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; } if (netif_running(dev)) { spin_lock_irq(&np->lock); writel(np->txrxctl_bits, base + NvRegTxRxControl); spin_unlock_irq(&np->lock); } } else { return -EINVAL; } return retcode; } static int nv_set_tx_csum(struct net_device *dev, u32 data) { struct fe_priv *np = netdev_priv(dev); if (np->driver_data & DEV_HAS_CHECKSUM) return ethtool_op_set_tx_hw_csum(dev, data); else return -EOPNOTSUPP; } static int nv_set_sg(struct net_device *dev, u32 data) { struct fe_priv *np = netdev_priv(dev); if (np->driver_data & DEV_HAS_CHECKSUM) return ethtool_op_set_sg(dev, data); else return -EOPNOTSUPP; } static int nv_get_sset_count(struct net_device *dev, int sset) { struct fe_priv *np = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: if (np->driver_data & DEV_HAS_TEST_EXTENDED) return NV_TEST_COUNT_EXTENDED; else return NV_TEST_COUNT_BASE; case ETH_SS_STATS: if (np->driver_data & DEV_HAS_STATISTICS_V1) return NV_DEV_STATISTICS_V1_COUNT; else if (np->driver_data & DEV_HAS_STATISTICS_V2) return NV_DEV_STATISTICS_V2_COUNT; else return 0; default: return -EOPNOTSUPP; } } static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) { struct fe_priv *np = netdev_priv(dev); /* update stats */ nv_do_stats_poll((unsigned long)dev); memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); } static int nv_link_test(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); int mii_status; mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); /* check phy link status */ if (!(mii_status & BMSR_LSTATUS)) return 0; else return 1; } static int nv_register_test(struct net_device *dev) { u8 __iomem *base = get_hwbase(dev); int i = 0; u32 orig_read, new_read; do { orig_read = readl(base + nv_registers_test[i].reg); /* xor with mask to toggle bits */ orig_read ^= nv_registers_test[i].mask; writel(orig_read, base + nv_registers_test[i].reg); new_read = readl(base + nv_registers_test[i].reg); if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) return 0; /* restore original value */ orig_read ^= nv_registers_test[i].mask; writel(orig_read, base + nv_registers_test[i].reg); } while (nv_registers_test[++i].reg != 0); return 1; } static int nv_interrupt_test(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int ret = 1; int testcnt; u32 save_msi_flags, save_poll_interval = 0; if (netif_running(dev)) { /* free current irq */ nv_free_irq(dev); save_poll_interval = readl(base+NvRegPollingInterval); } /* flag to test interrupt handler */ np->intr_test = 0; /* setup test irq */ save_msi_flags = np->msi_flags; np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; np->msi_flags |= 0x001; /* setup 1 vector */ if (nv_request_irq(dev, 1)) return 0; /* setup timer interrupt */ writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); /* wait for at least one interrupt */ msleep(100); spin_lock_irq(&np->lock); /* flag should be set within ISR */ testcnt = np->intr_test; if (!testcnt) ret = 2; nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); if (!(np->msi_flags & NV_MSI_X_ENABLED)) writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); else writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); spin_unlock_irq(&np->lock); nv_free_irq(dev); np->msi_flags = save_msi_flags; if (netif_running(dev)) { writel(save_poll_interval, base + NvRegPollingInterval); writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); /* restore original irq */ if (nv_request_irq(dev, 0)) return 0; } return ret; } static int nv_loopback_test(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); struct sk_buff *tx_skb, *rx_skb; dma_addr_t test_dma_addr; u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); u32 flags; int len, i, pkt_len; u8 *pkt_data; u32 filter_flags = 0; u32 misc1_flags = 0; int ret = 1; if (netif_running(dev)) { nv_disable_irq(dev); filter_flags = readl(base + NvRegPacketFilterFlags); misc1_flags = readl(base + NvRegMisc1); } else { nv_txrx_reset(dev); } /* reinit driver view of the rx queue */ set_bufsize(dev); nv_init_ring(dev); /* setup hardware for loopback */ writel(NVREG_MISC1_FORCE, base + NvRegMisc1); writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); /* reinit nic view of the rx queue */ writel(np->rx_buf_sz, base + NvRegOffloadConfig); setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); /* restart rx engine */ nv_start_rx(dev); nv_start_tx(dev); /* setup packet for tx */ pkt_len = ETH_DATA_LEN; tx_skb = dev_alloc_skb(pkt_len); if (!tx_skb) { printk(KERN_ERR "dev_alloc_skb() failed during loopback test" " of %s\n", dev->name); ret = 0; goto out; } test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, skb_tailroom(tx_skb), PCI_DMA_FROMDEVICE); pkt_data = skb_put(tx_skb, pkt_len); for (i = 0; i < pkt_len; i++) pkt_data[i] = (u8)(i & 0xff); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); } else { np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); } writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(get_hwbase(dev)); msleep(500); /* check for rx of the packet */ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); } else { flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); } if (flags & NV_RX_AVAIL) { ret = 0; } else if (np->desc_ver == DESC_VER_1) { if (flags & NV_RX_ERROR) ret = 0; } else { if (flags & NV_RX2_ERROR) { ret = 0; } } if (ret) { if (len != pkt_len) { ret = 0; dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", dev->name, len, pkt_len); } else { rx_skb = np->rx_skb[0].skb; for (i = 0; i < pkt_len; i++) { if (rx_skb->data[i] != (u8)(i & 0xff)) { ret = 0; dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", dev->name, i); break; } } } } else { dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); } pci_unmap_page(np->pci_dev, test_dma_addr, (skb_end_pointer(tx_skb) - tx_skb->data), PCI_DMA_TODEVICE); dev_kfree_skb_any(tx_skb); out: /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); nv_txrx_reset(dev); /* drain rx queue */ nv_drain_rx(dev); nv_drain_tx(dev); if (netif_running(dev)) { writel(misc1_flags, base + NvRegMisc1); writel(filter_flags, base + NvRegPacketFilterFlags); nv_enable_irq(dev); } return ret; } static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int result; memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[0] = 1; } if (test->flags & ETH_TEST_FL_OFFLINE) { if (netif_running(dev)) { netif_stop_queue(dev); #ifdef CONFIG_FORCEDETH_NAPI napi_disable(&np->napi); #endif netif_tx_lock_bh(dev); spin_lock_irq(&np->lock); nv_disable_hw_interrupts(dev, np->irqmask); if (!(np->msi_flags & NV_MSI_X_ENABLED)) { writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); } else { writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); } /* stop engines */ nv_stop_rx(dev); nv_stop_tx(dev); nv_txrx_reset(dev); /* drain rx queue */ nv_drain_rx(dev); nv_drain_tx(dev); spin_unlock_irq(&np->lock); netif_tx_unlock_bh(dev); } if (!nv_register_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[1] = 1; } result = nv_interrupt_test(dev); if (result != 1) { test->flags |= ETH_TEST_FL_FAILED; buffer[2] = 1; } if (result == 0) { /* bail out */ return; } if (!nv_loopback_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[3] = 1; } if (netif_running(dev)) { /* reinit driver view of the rx queue */ set_bufsize(dev); if (nv_init_ring(dev)) { if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); } /* reinit nic view of the rx queue */ writel(np->rx_buf_sz, base + NvRegOffloadConfig); setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(base); /* restart rx engine */ nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); #ifdef CONFIG_FORCEDETH_NAPI napi_enable(&np->napi); #endif nv_enable_hw_interrupts(dev, np->irqmask); } } } static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) { switch (stringset) { case ETH_SS_STATS: memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); break; case ETH_SS_TEST: memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); break; } } static const struct ethtool_ops ops = { .get_drvinfo = nv_get_drvinfo, .get_link = ethtool_op_get_link, .get_wol = nv_get_wol, .set_wol = nv_set_wol, .get_settings = nv_get_settings, .set_settings = nv_set_settings, .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, .set_tso = nv_set_tso, .get_ringparam = nv_get_ringparam, .set_ringparam = nv_set_ringparam, .get_pauseparam = nv_get_pauseparam, .set_pauseparam = nv_set_pauseparam, .get_rx_csum = nv_get_rx_csum, .set_rx_csum = nv_set_rx_csum, .set_tx_csum = nv_set_tx_csum, .set_sg = nv_set_sg, .get_strings = nv_get_strings, .get_ethtool_stats = nv_get_ethtool_stats, .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, }; static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct fe_priv *np = get_nvpriv(dev); spin_lock_irq(&np->lock); /* save vlan group */ np->vlangrp = grp; if (grp) { /* enable vlan on MAC */ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; } else { /* disable vlan on MAC */ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; } writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); spin_unlock_irq(&np->lock); } /* The mgmt unit and driver use a semaphore to access the phy during init */ static int nv_mgmt_acquire_sema(struct net_device *dev) { u8 __iomem *base = get_hwbase(dev); int i; u32 tx_ctrl, mgmt_sema; for (i = 0; i < 10; i++) { mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) break; msleep(500); } if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) return 0; for (i = 0; i < 2; i++) { tx_ctrl = readl(base + NvRegTransmitterControl); tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; writel(tx_ctrl, base + NvRegTransmitterControl); /* verify that semaphore was acquired */ tx_ctrl = readl(base + NvRegTransmitterControl); if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) return 1; else udelay(50); } return 0; } static int nv_open(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int ret = 1; int oom, i; dprintk(KERN_DEBUG "nv_open: begin\n"); /* erase previous misconfiguration */ if (np->driver_data & DEV_HAS_POWER_CNTRL) nv_mac_reset(dev); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(0, base + NvRegMulticastAddrB); writel(0, base + NvRegMulticastMaskA); writel(0, base + NvRegMulticastMaskB); writel(0, base + NvRegPacketFilterFlags); writel(0, base + NvRegTransmitterControl); writel(0, base + NvRegReceiverControl); writel(0, base + NvRegAdapterControl); if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); /* initialize descriptor rings */ set_bufsize(dev); oom = nv_init_ring(dev); writel(0, base + NvRegLinkSpeed); writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); nv_txrx_reset(dev); writel(0, base + NvRegUnknownSetupReg6); np->in_shutdown = 0; /* give hw rings */ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); writel(np->linkspeed, base + NvRegLinkSpeed); if (np->desc_ver == DESC_VER_1) writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); else writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); writel(np->txrxctl_bits, base + NvRegTxRxControl); writel(np->vlanctl_bits, base + NvRegVlanControl); pci_push(base); writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); writel(0, base + NvRegMIIMask); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); writel(np->rx_buf_sz, base + NvRegOffloadConfig); writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); get_random_bytes(&i, sizeof(i)); writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); if (poll_interval == -1) { if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); else writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); } else writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); if (np->wolenabled) writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); i = readl(base + NvRegPowerState); if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); pci_push(base); udelay(10); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); nv_disable_hw_interrupts(dev, np->irqmask); pci_push(base); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); pci_push(base); if (nv_request_irq(dev, 0)) { goto out_drain; } /* ask for interrupts */ nv_enable_hw_interrupts(dev, np->irqmask); spin_lock_irq(&np->lock); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(0, base + NvRegMulticastAddrB); writel(0, base + NvRegMulticastMaskA); writel(0, base + NvRegMulticastMaskB); writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); /* One manual link speed update: Interrupts are enabled, future link * speed changes cause interrupts and are handled by nv_link_irq(). */ { u32 miistat; miistat = readl(base + NvRegMIIStatus); writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); } /* set linkspeed to invalid value, thus force nv_update_linkspeed * to init hw */ np->linkspeed = 0; ret = nv_update_linkspeed(dev); nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); #ifdef CONFIG_FORCEDETH_NAPI napi_enable(&np->napi); #endif if (ret) { netif_carrier_on(dev); } else { printk(KERN_INFO "%s: no link during initialization.\n", dev->name); netif_carrier_off(dev); } if (oom) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); /* start statistics timer */ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); spin_unlock_irq(&np->lock); return 0; out_drain: drain_ring(dev); return ret; } static int nv_close(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base; spin_lock_irq(&np->lock); np->in_shutdown = 1; spin_unlock_irq(&np->lock); #ifdef CONFIG_FORCEDETH_NAPI napi_disable(&np->napi); #endif synchronize_irq(dev->irq); del_timer_sync(&np->oom_kick); del_timer_sync(&np->nic_poll); del_timer_sync(&np->stats_poll); netif_stop_queue(dev); spin_lock_irq(&np->lock); nv_stop_tx(dev); nv_stop_rx(dev); nv_txrx_reset(dev); /* disable interrupts on the nic or we will lock up */ base = get_hwbase(dev); nv_disable_hw_interrupts(dev, np->irqmask); pci_push(base); dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); spin_unlock_irq(&np->lock); nv_free_irq(dev); drain_ring(dev); if (np->wolenabled) { writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); nv_start_rx(dev); } /* FIXME: power down nic */ return 0; } static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct net_device *dev; struct fe_priv *np; unsigned long addr; u8 __iomem *base; int err, i; u32 powerstate, txreg; u32 phystate_orig = 0, phystate; int phyinitialized = 0; DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct fe_priv)); err = -ENOMEM; if (!dev) goto out; np = netdev_priv(dev); np->dev = dev; np->pci_dev = pci_dev; spin_lock_init(&np->lock); SET_NETDEV_DEV(dev, &pci_dev->dev); init_timer(&np->oom_kick); np->oom_kick.data = (unsigned long) dev; np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ init_timer(&np->nic_poll); np->nic_poll.data = (unsigned long) dev; np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ init_timer(&np->stats_poll); np->stats_poll.data = (unsigned long) dev; np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ err = pci_enable_device(pci_dev); if (err) { printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", err, pci_name(pci_dev)); goto out_free; } pci_set_master(pci_dev); err = pci_request_regions(pci_dev, DRV_NAME); if (err < 0) goto out_disable; if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) np->register_size = NV_PCI_REGSZ_VER3; else if (id->driver_data & DEV_HAS_STATISTICS_V1) np->register_size = NV_PCI_REGSZ_VER2; else np->register_size = NV_PCI_REGSZ_VER1; err = -EINVAL; addr = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), pci_resource_len(pci_dev, i), pci_resource_flags(pci_dev, i)); if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && pci_resource_len(pci_dev, i) >= np->register_size) { addr = pci_resource_start(pci_dev, i); break; } } if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", pci_name(pci_dev)); goto out_relreg; } /* copy of driver data */ np->driver_data = id->driver_data; /* handle different descriptor versions */ if (id->driver_data & DEV_HAS_HIGH_DMA) { /* packet format 3: supports 40-bit addressing */ np->desc_ver = DESC_VER_3; np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (dma_64bit) { if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", pci_name(pci_dev)); } else { dev->features |= NETIF_F_HIGHDMA; printk(KERN_INFO "forcedeth: using HIGHDMA\n"); } if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", pci_name(pci_dev)); } } } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ np->desc_ver = DESC_VER_2; np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; } else { /* original packet format */ np->desc_ver = DESC_VER_1; np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; } np->pkt_limit = NV_PKTLIMIT_1; if (id->driver_data & DEV_HAS_LARGEDESC) np->pkt_limit = NV_PKTLIMIT_2; if (id->driver_data & DEV_HAS_CHECKSUM) { np->rx_csum = 1; np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; dev->features |= NETIF_F_TSO; } np->vlanctl_bits = 0; if (id->driver_data & DEV_HAS_VLAN) { np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; dev->vlan_rx_register = nv_vlan_rx_register; } np->msi_flags = 0; if ((id->driver_data & DEV_HAS_MSI) && msi) { np->msi_flags |= NV_MSI_CAPABLE; } if ((id->driver_data & DEV_HAS_MSI_X) && msix) { np->msi_flags |= NV_MSI_X_CAPABLE; } np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; } err = -ENOMEM; np->base = ioremap(addr, np->register_size); if (!np->base) goto out_relreg; dev->base_addr = (unsigned long)np->base; dev->irq = pci_dev->irq; np->rx_ring_size = RX_RING_DEFAULT; np->tx_ring_size = TX_RING_DEFAULT; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { np->rx_ring.orig = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), &np->ring_addr); if (!np->rx_ring.orig) goto out_unmap; np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; } else { np->rx_ring.ex = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), &np->ring_addr); if (!np->rx_ring.ex) goto out_unmap; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; } np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); if (!np->rx_skb || !np->tx_skb) goto out_freering; dev->open = nv_open; dev->stop = nv_close; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) dev->hard_start_xmit = nv_start_xmit; else dev->hard_start_xmit = nv_start_xmit_optimized; dev->get_stats = nv_get_stats; dev->change_mtu = nv_change_mtu; dev->set_mac_address = nv_set_mac_address; dev->set_multicast_list = nv_set_multicast; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = nv_poll_controller; #endif #ifdef CONFIG_FORCEDETH_NAPI netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); #endif SET_ETHTOOL_OPS(dev, &ops); dev->tx_timeout = nv_tx_timeout; dev->watchdog_timeo = NV_WATCHDOG_TIMEO; pci_set_drvdata(pci_dev, dev); /* read the mac address */ base = get_hwbase(dev); np->orig_mac[0] = readl(base + NvRegMacAddrA); np->orig_mac[1] = readl(base + NvRegMacAddrB); /* check the workaround bit for correct mac address order */ txreg = readl(base + NvRegTransmitPoll); if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) || (id->driver_data & DEV_HAS_CORRECT_MACADDR)) { /* mac address is already in correct order */ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; } else { /* need to reverse mac address to correct order */ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; /* set permanent address to be correct aswell */ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); } memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); if (!is_valid_ether_addr(dev->perm_addr)) { /* * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab */ printk(KERN_ERR "%s: Invalid Mac address detected: %s\n", pci_name(pci_dev), print_mac(mac, dev->dev_addr)); printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x00; dev->dev_addr[2] = 0x6c; get_random_bytes(&dev->dev_addr[3], 3); } dprintk(KERN_DEBUG "%s: MAC Address %s\n", pci_name(pci_dev), print_mac(mac, dev->dev_addr)); /* set mac address */ nv_copy_mac_to_hw(dev); /* disable WOL */ writel(0, base + NvRegWakeUpFlags); np->wolenabled = 0; if (id->driver_data & DEV_HAS_POWER_CNTRL) { /* take phy and nic out of low power mode */ powerstate = readl(base + NvRegPowerState2); powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && pci_dev->revision >= 0xA3) powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; writel(powerstate, base + NvRegPowerState2); } if (np->desc_ver == DESC_VER_1) { np->tx_flags = NV_TX_VALID; } else { np->tx_flags = NV_TX2_VALID; } if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { np->irqmask = NVREG_IRQMASK_THROUGHPUT; if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ np->msi_flags |= 0x0003; } else { np->irqmask = NVREG_IRQMASK_CPU; if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ np->msi_flags |= 0x0001; } if (id->driver_data & DEV_NEED_TIMERIRQ) np->irqmask |= NVREG_IRQ_TIMER; if (id->driver_data & DEV_NEED_LINKTIMER) { dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); np->need_linktimer = 1; np->link_timeout = jiffies + LINK_TIMEOUT; } else { dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); np->need_linktimer = 0; } /* clear phy state and temporarily halt phy interrupts */ writel(0, base + NvRegMIIMask); phystate = readl(base + NvRegAdapterControl); if (phystate & NVREG_ADAPTCTL_RUNNING) { phystate_orig = 1; phystate &= ~NVREG_ADAPTCTL_RUNNING; writel(phystate, base + NvRegAdapterControl); } writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); if (id->driver_data & DEV_HAS_MGMT_UNIT) { /* management unit running on the mac? */ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); for (i = 0; i < 5000; i++) { msleep(1); if (nv_mgmt_acquire_sema(dev)) { /* management unit setup the phy already? */ if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == NVREG_XMITCTL_SYNC_PHY_INIT) { /* phy is inited by mgmt unit */ phyinitialized = 1; dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); } else { /* we need to init the phy */ } break; } } } } /* find a suitable phy */ for (i = 1; i <= 32; i++) { int id1, id2; int phyaddr = i & 0x1F; spin_lock_irq(&np->lock); id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); spin_unlock_irq(&np->lock); if (id1 < 0 || id1 == 0xffff) continue; spin_lock_irq(&np->lock); id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); spin_unlock_irq(&np->lock); if (id2 < 0 || id2 == 0xffff) continue; np->phy_model = id2 & PHYID2_MODEL_MASK; id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", pci_name(pci_dev), id1, id2, phyaddr); np->phyaddr = phyaddr; np->phy_oui = id1 | id2; break; } if (i == 33) { printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", pci_name(pci_dev)); goto out_error; } if (!phyinitialized) { /* reset it */ phy_init(dev); } else { /* see if it is a gigabit phy */ u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); if (mii_status & PHY_GIGABIT) { np->gigabit = PHY_GIGABIT; } } /* set default link speed settings */ np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; np->duplex = 0; np->autoneg = 1; err = register_netdev(dev); if (err) { printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); goto out_error; } printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_name(pci_dev)); return 0; out_error: if (phystate_orig) writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); pci_set_drvdata(pci_dev, NULL); out_freering: free_rings(dev); out_unmap: iounmap(get_hwbase(dev)); out_relreg: pci_release_regions(pci_dev); out_disable: pci_disable_device(pci_dev); out_free: free_netdev(dev); out: return err; } static void __devexit nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); unregister_netdev(dev); /* special op: write back the misordered MAC address - otherwise * the next nv_probe would see a wrong address. */ writel(np->orig_mac[0], base + NvRegMacAddrA); writel(np->orig_mac[1], base + NvRegMacAddrB); /* free all structures */ free_rings(dev); iounmap(get_hwbase(dev)); pci_release_regions(pci_dev); pci_disable_device(pci_dev); free_netdev(dev); pci_set_drvdata(pci_dev, NULL); } #ifdef CONFIG_PM static int nv_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct fe_priv *np = netdev_priv(dev); if (!netif_running(dev)) goto out; netif_device_detach(dev); // Gross. nv_close(dev); pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); pci_set_power_state(pdev, pci_choose_state(pdev, state)); out: return 0; } static int nv_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int rc = 0; if (!netif_running(dev)) goto out; netif_device_attach(dev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); rc = nv_open(dev); out: return rc; } #else #define nv_suspend NULL #define nv_resume NULL #endif /* CONFIG_PM */ static struct pci_device_id pci_tbl[] = { { /* nForce Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, }, { /* nForce2 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* CK804 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, }, { /* CK804 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, }, { /* MCP04 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, }, { /* MCP04 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, }, { /* MCP51 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, }, { /* MCP51 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, {0,}, }; static struct pci_driver driver = { .name = "forcedeth", .id_table = pci_tbl, .probe = nv_probe, .remove = __devexit_p(nv_remove), .suspend = nv_suspend, .resume = nv_resume, }; static int __init init_nic(void) { printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); return pci_register_driver(&driver); } static void __exit exit_nic(void) { pci_unregister_driver(&driver); } module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); module_param(optimization_mode, int, 0); MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); module_param(poll_interval, int, 0); MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); module_param(msi, int, 0); MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); module_param(msix, int, 0); MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); module_param(dma_64bit, int, 0); MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pci_tbl); module_init(init_nic); module_exit(exit_nic);