diff options
Diffstat (limited to 'drivers/net/pasemi_mac.c')
-rw-r--r-- | drivers/net/pasemi_mac.c | 1019 |
1 files changed, 1019 insertions, 0 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c new file mode 100644 index 000000000000..d670ac74824f --- /dev/null +++ b/drivers/net/pasemi_mac.c | |||
@@ -0,0 +1,1019 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2007 PA Semi, Inc | ||
3 | * | ||
4 | * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | #include <linux/etherdevice.h> | ||
28 | #include <asm/dma-mapping.h> | ||
29 | #include <linux/in.h> | ||
30 | #include <linux/skbuff.h> | ||
31 | |||
32 | #include <linux/ip.h> | ||
33 | #include <linux/tcp.h> | ||
34 | #include <net/checksum.h> | ||
35 | |||
36 | #include "pasemi_mac.h" | ||
37 | |||
38 | |||
39 | /* TODO list | ||
40 | * | ||
41 | * - Get rid of pci_{read,write}_config(), map registers with ioremap | ||
42 | * for performance | ||
43 | * - PHY support | ||
44 | * - Multicast support | ||
45 | * - Large MTU support | ||
46 | * - Other performance improvements | ||
47 | */ | ||
48 | |||
49 | |||
50 | /* Must be a power of two */ | ||
51 | #define RX_RING_SIZE 512 | ||
52 | #define TX_RING_SIZE 512 | ||
53 | |||
54 | #define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)]) | ||
55 | #define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)]) | ||
56 | #define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)]) | ||
57 | #define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)]) | ||
58 | #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)]) | ||
59 | |||
60 | #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
61 | |||
62 | /* XXXOJN these should come out of the device tree some day */ | ||
63 | #define PAS_DMA_CAP_BASE 0xe00d0040 | ||
64 | #define PAS_DMA_CAP_SIZE 0x100 | ||
65 | #define PAS_DMA_COM_BASE 0xe00d0100 | ||
66 | #define PAS_DMA_COM_SIZE 0x100 | ||
67 | |||
68 | static struct pasdma_status *dma_status; | ||
69 | |||
70 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) | ||
71 | { | ||
72 | struct pci_dev *pdev = mac->pdev; | ||
73 | struct device_node *dn = pci_device_to_OF_node(pdev); | ||
74 | const u8 *maddr; | ||
75 | u8 addr[6]; | ||
76 | |||
77 | if (!dn) { | ||
78 | dev_dbg(&pdev->dev, | ||
79 | "No device node for mac, not configuring\n"); | ||
80 | return -ENOENT; | ||
81 | } | ||
82 | |||
83 | maddr = get_property(dn, "mac-address", NULL); | ||
84 | if (maddr == NULL) { | ||
85 | dev_warn(&pdev->dev, | ||
86 | "no mac address in device tree, not configuring\n"); | ||
87 | return -ENOENT; | ||
88 | } | ||
89 | |||
90 | if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], | ||
91 | &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { | ||
92 | dev_warn(&pdev->dev, | ||
93 | "can't parse mac address, not configuring\n"); | ||
94 | return -EINVAL; | ||
95 | } | ||
96 | |||
97 | memcpy(mac->mac_addr, addr, sizeof(addr)); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static int pasemi_mac_setup_rx_resources(struct net_device *dev) | ||
102 | { | ||
103 | struct pasemi_mac_rxring *ring; | ||
104 | struct pasemi_mac *mac = netdev_priv(dev); | ||
105 | int chan_id = mac->dma_rxch; | ||
106 | |||
107 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
108 | |||
109 | if (!ring) | ||
110 | goto out_ring; | ||
111 | |||
112 | spin_lock_init(&ring->lock); | ||
113 | |||
114 | ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * | ||
115 | RX_RING_SIZE, GFP_KERNEL); | ||
116 | |||
117 | if (!ring->desc_info) | ||
118 | goto out_desc_info; | ||
119 | |||
120 | /* Allocate descriptors */ | ||
121 | ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, | ||
122 | RX_RING_SIZE * | ||
123 | sizeof(struct pas_dma_xct_descr), | ||
124 | &ring->dma, GFP_KERNEL); | ||
125 | |||
126 | if (!ring->desc) | ||
127 | goto out_desc; | ||
128 | |||
129 | memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); | ||
130 | |||
131 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, | ||
132 | RX_RING_SIZE * sizeof(u64), | ||
133 | &ring->buf_dma, GFP_KERNEL); | ||
134 | if (!ring->buffers) | ||
135 | goto out_buffers; | ||
136 | |||
137 | memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); | ||
138 | |||
139 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id), | ||
140 | PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); | ||
141 | |||
142 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id), | ||
143 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | | ||
144 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2)); | ||
145 | |||
146 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id), | ||
147 | PAS_DMA_RXCHAN_CFG_HBU(1)); | ||
148 | |||
149 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if), | ||
150 | PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers))); | ||
151 | |||
152 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if), | ||
153 | PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) | | ||
154 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); | ||
155 | |||
156 | ring->next_to_fill = 0; | ||
157 | ring->next_to_clean = 0; | ||
158 | |||
159 | snprintf(ring->irq_name, sizeof(ring->irq_name), | ||
160 | "%s rx", dev->name); | ||
161 | mac->rx = ring; | ||
162 | |||
163 | return 0; | ||
164 | |||
165 | out_buffers: | ||
166 | dma_free_coherent(&mac->dma_pdev->dev, | ||
167 | RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), | ||
168 | mac->rx->desc, mac->rx->dma); | ||
169 | out_desc: | ||
170 | kfree(ring->desc_info); | ||
171 | out_desc_info: | ||
172 | kfree(ring); | ||
173 | out_ring: | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | |||
177 | |||
178 | static int pasemi_mac_setup_tx_resources(struct net_device *dev) | ||
179 | { | ||
180 | struct pasemi_mac *mac = netdev_priv(dev); | ||
181 | u32 val; | ||
182 | int chan_id = mac->dma_txch; | ||
183 | struct pasemi_mac_txring *ring; | ||
184 | |||
185 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
186 | if (!ring) | ||
187 | goto out_ring; | ||
188 | |||
189 | spin_lock_init(&ring->lock); | ||
190 | |||
191 | ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * | ||
192 | TX_RING_SIZE, GFP_KERNEL); | ||
193 | if (!ring->desc_info) | ||
194 | goto out_desc_info; | ||
195 | |||
196 | /* Allocate descriptors */ | ||
197 | ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, | ||
198 | TX_RING_SIZE * | ||
199 | sizeof(struct pas_dma_xct_descr), | ||
200 | &ring->dma, GFP_KERNEL); | ||
201 | if (!ring->desc) | ||
202 | goto out_desc; | ||
203 | |||
204 | memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); | ||
205 | |||
206 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id), | ||
207 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); | ||
208 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); | ||
209 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2); | ||
210 | |||
211 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val); | ||
212 | |||
213 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id), | ||
214 | PAS_DMA_TXCHAN_CFG_TY_IFACE | | ||
215 | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | | ||
216 | PAS_DMA_TXCHAN_CFG_UP | | ||
217 | PAS_DMA_TXCHAN_CFG_WT(2)); | ||
218 | |||
219 | ring->next_to_use = 0; | ||
220 | ring->next_to_clean = 0; | ||
221 | |||
222 | snprintf(ring->irq_name, sizeof(ring->irq_name), | ||
223 | "%s tx", dev->name); | ||
224 | mac->tx = ring; | ||
225 | |||
226 | return 0; | ||
227 | |||
228 | out_desc: | ||
229 | kfree(ring->desc_info); | ||
230 | out_desc_info: | ||
231 | kfree(ring); | ||
232 | out_ring: | ||
233 | return -ENOMEM; | ||
234 | } | ||
235 | |||
236 | static void pasemi_mac_free_tx_resources(struct net_device *dev) | ||
237 | { | ||
238 | struct pasemi_mac *mac = netdev_priv(dev); | ||
239 | unsigned int i; | ||
240 | struct pasemi_mac_buffer *info; | ||
241 | struct pas_dma_xct_descr *dp; | ||
242 | |||
243 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
244 | info = &TX_DESC_INFO(mac, i); | ||
245 | dp = &TX_DESC(mac, i); | ||
246 | if (info->dma) { | ||
247 | if (info->skb) { | ||
248 | pci_unmap_single(mac->dma_pdev, | ||
249 | info->dma, | ||
250 | info->skb->len, | ||
251 | PCI_DMA_TODEVICE); | ||
252 | dev_kfree_skb_any(info->skb); | ||
253 | } | ||
254 | info->dma = 0; | ||
255 | info->skb = NULL; | ||
256 | dp->mactx = 0; | ||
257 | dp->ptr = 0; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | dma_free_coherent(&mac->dma_pdev->dev, | ||
262 | TX_RING_SIZE * sizeof(struct pas_dma_xct_descr), | ||
263 | mac->tx->desc, mac->tx->dma); | ||
264 | |||
265 | kfree(mac->tx->desc_info); | ||
266 | kfree(mac->tx); | ||
267 | mac->tx = NULL; | ||
268 | } | ||
269 | |||
270 | static void pasemi_mac_free_rx_resources(struct net_device *dev) | ||
271 | { | ||
272 | struct pasemi_mac *mac = netdev_priv(dev); | ||
273 | unsigned int i; | ||
274 | struct pasemi_mac_buffer *info; | ||
275 | struct pas_dma_xct_descr *dp; | ||
276 | |||
277 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
278 | info = &RX_DESC_INFO(mac, i); | ||
279 | dp = &RX_DESC(mac, i); | ||
280 | if (info->dma) { | ||
281 | if (info->skb) { | ||
282 | pci_unmap_single(mac->dma_pdev, | ||
283 | info->dma, | ||
284 | info->skb->len, | ||
285 | PCI_DMA_FROMDEVICE); | ||
286 | dev_kfree_skb_any(info->skb); | ||
287 | } | ||
288 | info->dma = 0; | ||
289 | info->skb = NULL; | ||
290 | dp->macrx = 0; | ||
291 | dp->ptr = 0; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | dma_free_coherent(&mac->dma_pdev->dev, | ||
296 | RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), | ||
297 | mac->rx->desc, mac->rx->dma); | ||
298 | |||
299 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), | ||
300 | mac->rx->buffers, mac->rx->buf_dma); | ||
301 | |||
302 | kfree(mac->rx->desc_info); | ||
303 | kfree(mac->rx); | ||
304 | mac->rx = NULL; | ||
305 | } | ||
306 | |||
307 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev) | ||
308 | { | ||
309 | struct pasemi_mac *mac = netdev_priv(dev); | ||
310 | unsigned int i; | ||
311 | int start = mac->rx->next_to_fill; | ||
312 | unsigned int count; | ||
313 | |||
314 | count = (mac->rx->next_to_clean + RX_RING_SIZE - | ||
315 | mac->rx->next_to_fill) & (RX_RING_SIZE - 1); | ||
316 | |||
317 | /* Check to see if we're doing first-time setup */ | ||
318 | if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0)) | ||
319 | count = RX_RING_SIZE; | ||
320 | |||
321 | if (count <= 0) | ||
322 | return; | ||
323 | |||
324 | for (i = start; i < start + count; i++) { | ||
325 | struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i); | ||
326 | u64 *buff = &RX_BUFF(mac, i); | ||
327 | struct sk_buff *skb; | ||
328 | dma_addr_t dma; | ||
329 | |||
330 | skb = dev_alloc_skb(BUF_SIZE); | ||
331 | |||
332 | if (!skb) { | ||
333 | count = i - start; | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | skb->dev = dev; | ||
338 | |||
339 | dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, | ||
340 | PCI_DMA_FROMDEVICE); | ||
341 | |||
342 | if (dma_mapping_error(dma)) { | ||
343 | dev_kfree_skb_irq(info->skb); | ||
344 | count = i - start; | ||
345 | break; | ||
346 | } | ||
347 | |||
348 | info->skb = skb; | ||
349 | info->dma = dma; | ||
350 | *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); | ||
351 | } | ||
352 | |||
353 | wmb(); | ||
354 | |||
355 | pci_write_config_dword(mac->dma_pdev, | ||
356 | PAS_DMA_RXCHAN_INCR(mac->dma_rxch), | ||
357 | count); | ||
358 | pci_write_config_dword(mac->dma_pdev, | ||
359 | PAS_DMA_RXINT_INCR(mac->dma_if), | ||
360 | count); | ||
361 | |||
362 | mac->rx->next_to_fill += count; | ||
363 | } | ||
364 | |||
365 | static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | ||
366 | { | ||
367 | unsigned int i; | ||
368 | int start, count; | ||
369 | |||
370 | spin_lock(&mac->rx->lock); | ||
371 | |||
372 | start = mac->rx->next_to_clean; | ||
373 | count = 0; | ||
374 | |||
375 | for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) { | ||
376 | struct pas_dma_xct_descr *dp; | ||
377 | struct pasemi_mac_buffer *info; | ||
378 | struct sk_buff *skb; | ||
379 | unsigned int j, len; | ||
380 | dma_addr_t dma; | ||
381 | |||
382 | rmb(); | ||
383 | |||
384 | dp = &RX_DESC(mac, i); | ||
385 | |||
386 | if (!(dp->macrx & XCT_MACRX_O)) | ||
387 | break; | ||
388 | |||
389 | count++; | ||
390 | |||
391 | info = NULL; | ||
392 | |||
393 | /* We have to scan for our skb since there's no way | ||
394 | * to back-map them from the descriptor, and if we | ||
395 | * have several receive channels then they might not | ||
396 | * show up in the same order as they were put on the | ||
397 | * interface ring. | ||
398 | */ | ||
399 | |||
400 | dma = (dp->ptr & XCT_PTR_ADDR_M); | ||
401 | for (j = start; j < (start + RX_RING_SIZE); j++) { | ||
402 | info = &RX_DESC_INFO(mac, j); | ||
403 | if (info->dma == dma) | ||
404 | break; | ||
405 | } | ||
406 | |||
407 | BUG_ON(!info); | ||
408 | BUG_ON(info->dma != dma); | ||
409 | |||
410 | pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len, | ||
411 | PCI_DMA_FROMDEVICE); | ||
412 | |||
413 | skb = info->skb; | ||
414 | |||
415 | len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; | ||
416 | |||
417 | skb_put(skb, len); | ||
418 | |||
419 | skb->protocol = eth_type_trans(skb, mac->netdev); | ||
420 | |||
421 | if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) { | ||
422 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
423 | skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >> | ||
424 | XCT_MACRX_CSUM_S; | ||
425 | } else | ||
426 | skb->ip_summed = CHECKSUM_NONE; | ||
427 | |||
428 | mac->stats.rx_bytes += len; | ||
429 | mac->stats.rx_packets++; | ||
430 | |||
431 | netif_receive_skb(skb); | ||
432 | |||
433 | info->dma = 0; | ||
434 | info->skb = NULL; | ||
435 | dp->ptr = 0; | ||
436 | dp->macrx = 0; | ||
437 | } | ||
438 | |||
439 | mac->rx->next_to_clean += count; | ||
440 | pasemi_mac_replenish_rx_ring(mac->netdev); | ||
441 | |||
442 | spin_unlock(&mac->rx->lock); | ||
443 | |||
444 | return count; | ||
445 | } | ||
446 | |||
447 | static int pasemi_mac_clean_tx(struct pasemi_mac *mac) | ||
448 | { | ||
449 | int i; | ||
450 | struct pasemi_mac_buffer *info; | ||
451 | struct pas_dma_xct_descr *dp; | ||
452 | int start, count; | ||
453 | int flags; | ||
454 | |||
455 | spin_lock_irqsave(&mac->tx->lock, flags); | ||
456 | |||
457 | start = mac->tx->next_to_clean; | ||
458 | count = 0; | ||
459 | |||
460 | for (i = start; i < mac->tx->next_to_use; i++) { | ||
461 | dp = &TX_DESC(mac, i); | ||
462 | if (!dp || (dp->mactx & XCT_MACTX_O)) | ||
463 | break; | ||
464 | |||
465 | count++; | ||
466 | |||
467 | info = &TX_DESC_INFO(mac, i); | ||
468 | |||
469 | pci_unmap_single(mac->dma_pdev, info->dma, | ||
470 | info->skb->len, PCI_DMA_TODEVICE); | ||
471 | dev_kfree_skb_irq(info->skb); | ||
472 | |||
473 | info->skb = NULL; | ||
474 | info->dma = 0; | ||
475 | dp->mactx = 0; | ||
476 | dp->ptr = 0; | ||
477 | } | ||
478 | mac->tx->next_to_clean += count; | ||
479 | spin_unlock_irqrestore(&mac->tx->lock, flags); | ||
480 | |||
481 | return count; | ||
482 | } | ||
483 | |||
484 | |||
485 | static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | ||
486 | { | ||
487 | struct net_device *dev = data; | ||
488 | struct pasemi_mac *mac = netdev_priv(dev); | ||
489 | unsigned int reg; | ||
490 | |||
491 | if (!(*mac->rx_status & PAS_STATUS_INT)) | ||
492 | return IRQ_NONE; | ||
493 | |||
494 | netif_rx_schedule(dev); | ||
495 | pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG, | ||
496 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0)); | ||
497 | |||
498 | reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC | | ||
499 | PAS_IOB_DMA_RXCH_RESET_DINTC; | ||
500 | if (*mac->rx_status & PAS_STATUS_TIMER) | ||
501 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; | ||
502 | |||
503 | pci_write_config_dword(mac->iob_pdev, | ||
504 | PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); | ||
505 | |||
506 | |||
507 | return IRQ_HANDLED; | ||
508 | } | ||
509 | |||
510 | static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) | ||
511 | { | ||
512 | struct net_device *dev = data; | ||
513 | struct pasemi_mac *mac = netdev_priv(dev); | ||
514 | unsigned int reg; | ||
515 | int was_full; | ||
516 | |||
517 | was_full = mac->tx->next_to_clean - mac->tx->next_to_use == TX_RING_SIZE; | ||
518 | |||
519 | if (!(*mac->tx_status & PAS_STATUS_INT)) | ||
520 | return IRQ_NONE; | ||
521 | |||
522 | pasemi_mac_clean_tx(mac); | ||
523 | |||
524 | reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC; | ||
525 | if (*mac->tx_status & PAS_STATUS_TIMER) | ||
526 | reg |= PAS_IOB_DMA_TXCH_RESET_TINTC; | ||
527 | |||
528 | pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), | ||
529 | reg); | ||
530 | |||
531 | if (was_full) | ||
532 | netif_wake_queue(dev); | ||
533 | |||
534 | return IRQ_HANDLED; | ||
535 | } | ||
536 | |||
537 | static int pasemi_mac_open(struct net_device *dev) | ||
538 | { | ||
539 | struct pasemi_mac *mac = netdev_priv(dev); | ||
540 | unsigned int flags; | ||
541 | int ret; | ||
542 | |||
543 | /* enable rx section */ | ||
544 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD, | ||
545 | PAS_DMA_COM_RXCMD_EN); | ||
546 | |||
547 | /* enable tx section */ | ||
548 | pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD, | ||
549 | PAS_DMA_COM_TXCMD_EN); | ||
550 | |||
551 | flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | | ||
552 | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | | ||
553 | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); | ||
554 | |||
555 | pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags); | ||
556 | |||
557 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | | ||
558 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; | ||
559 | |||
560 | flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; | ||
561 | |||
562 | pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), | ||
563 | PAS_IOB_DMA_RXCH_CFG_CNTTH(30)); | ||
564 | |||
565 | pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG, | ||
566 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000)); | ||
567 | |||
568 | pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags); | ||
569 | |||
570 | ret = pasemi_mac_setup_rx_resources(dev); | ||
571 | if (ret) | ||
572 | goto out_rx_resources; | ||
573 | |||
574 | ret = pasemi_mac_setup_tx_resources(dev); | ||
575 | if (ret) | ||
576 | goto out_tx_resources; | ||
577 | |||
578 | pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL, | ||
579 | PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) | | ||
580 | PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch)); | ||
581 | |||
582 | /* enable rx if */ | ||
583 | pci_write_config_dword(mac->dma_pdev, | ||
584 | PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
585 | PAS_DMA_RXINT_RCMDSTA_EN); | ||
586 | |||
587 | /* enable rx channel */ | ||
588 | pci_write_config_dword(mac->dma_pdev, | ||
589 | PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), | ||
590 | PAS_DMA_RXCHAN_CCMDSTA_EN | | ||
591 | PAS_DMA_RXCHAN_CCMDSTA_DU); | ||
592 | |||
593 | /* enable tx channel */ | ||
594 | pci_write_config_dword(mac->dma_pdev, | ||
595 | PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), | ||
596 | PAS_DMA_TXCHAN_TCMDSTA_EN); | ||
597 | |||
598 | pasemi_mac_replenish_rx_ring(dev); | ||
599 | |||
600 | netif_start_queue(dev); | ||
601 | netif_poll_enable(dev); | ||
602 | |||
603 | ret = request_irq(mac->dma_pdev->irq + mac->dma_txch, | ||
604 | &pasemi_mac_tx_intr, IRQF_DISABLED, | ||
605 | mac->tx->irq_name, dev); | ||
606 | if (ret) { | ||
607 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", | ||
608 | mac->dma_pdev->irq + mac->dma_txch, ret); | ||
609 | goto out_tx_int; | ||
610 | } | ||
611 | |||
612 | ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, | ||
613 | &pasemi_mac_rx_intr, IRQF_DISABLED, | ||
614 | mac->rx->irq_name, dev); | ||
615 | if (ret) { | ||
616 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", | ||
617 | mac->dma_pdev->irq + 20 + mac->dma_rxch, ret); | ||
618 | goto out_rx_int; | ||
619 | } | ||
620 | |||
621 | return 0; | ||
622 | |||
623 | out_rx_int: | ||
624 | free_irq(mac->dma_pdev->irq + mac->dma_txch, dev); | ||
625 | out_tx_int: | ||
626 | netif_poll_disable(dev); | ||
627 | netif_stop_queue(dev); | ||
628 | pasemi_mac_free_tx_resources(dev); | ||
629 | out_tx_resources: | ||
630 | pasemi_mac_free_rx_resources(dev); | ||
631 | out_rx_resources: | ||
632 | |||
633 | return ret; | ||
634 | } | ||
635 | |||
636 | #define MAX_RETRIES 5000 | ||
637 | |||
638 | static int pasemi_mac_close(struct net_device *dev) | ||
639 | { | ||
640 | struct pasemi_mac *mac = netdev_priv(dev); | ||
641 | unsigned int stat; | ||
642 | int retries; | ||
643 | |||
644 | netif_stop_queue(dev); | ||
645 | |||
646 | /* Clean out any pending buffers */ | ||
647 | pasemi_mac_clean_tx(mac); | ||
648 | pasemi_mac_clean_rx(mac, RX_RING_SIZE); | ||
649 | |||
650 | /* Disable interface */ | ||
651 | pci_write_config_dword(mac->dma_pdev, | ||
652 | PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), | ||
653 | PAS_DMA_TXCHAN_TCMDSTA_ST); | ||
654 | pci_write_config_dword(mac->dma_pdev, | ||
655 | PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
656 | PAS_DMA_RXINT_RCMDSTA_ST); | ||
657 | pci_write_config_dword(mac->dma_pdev, | ||
658 | PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), | ||
659 | PAS_DMA_RXCHAN_CCMDSTA_ST); | ||
660 | |||
661 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
662 | pci_read_config_dword(mac->dma_pdev, | ||
663 | PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), | ||
664 | &stat); | ||
665 | if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT) | ||
666 | break; | ||
667 | cond_resched(); | ||
668 | } | ||
669 | |||
670 | if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) { | ||
671 | dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); | ||
672 | } | ||
673 | |||
674 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
675 | pci_read_config_dword(mac->dma_pdev, | ||
676 | PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), | ||
677 | &stat); | ||
678 | if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT) | ||
679 | break; | ||
680 | cond_resched(); | ||
681 | } | ||
682 | |||
683 | if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) { | ||
684 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); | ||
685 | } | ||
686 | |||
687 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
688 | pci_read_config_dword(mac->dma_pdev, | ||
689 | PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
690 | &stat); | ||
691 | if (stat & PAS_DMA_RXINT_RCMDSTA_ACT) | ||
692 | break; | ||
693 | cond_resched(); | ||
694 | } | ||
695 | |||
696 | if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) { | ||
697 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n"); | ||
698 | } | ||
699 | |||
700 | /* Then, disable the channel. This must be done separately from | ||
701 | * stopping, since you can't disable when active. | ||
702 | */ | ||
703 | |||
704 | pci_write_config_dword(mac->dma_pdev, | ||
705 | PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0); | ||
706 | pci_write_config_dword(mac->dma_pdev, | ||
707 | PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0); | ||
708 | pci_write_config_dword(mac->dma_pdev, | ||
709 | PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); | ||
710 | |||
711 | free_irq(mac->dma_pdev->irq + mac->dma_txch, dev); | ||
712 | free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev); | ||
713 | |||
714 | /* Free resources */ | ||
715 | pasemi_mac_free_rx_resources(dev); | ||
716 | pasemi_mac_free_tx_resources(dev); | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | ||
722 | { | ||
723 | struct pasemi_mac *mac = netdev_priv(dev); | ||
724 | struct pasemi_mac_txring *txring; | ||
725 | struct pasemi_mac_buffer *info; | ||
726 | struct pas_dma_xct_descr *dp; | ||
727 | u64 dflags; | ||
728 | dma_addr_t map; | ||
729 | int flags; | ||
730 | |||
731 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; | ||
732 | |||
733 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
734 | switch (skb->nh.iph->protocol) { | ||
735 | case IPPROTO_TCP: | ||
736 | dflags |= XCT_MACTX_CSUM_TCP; | ||
737 | dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); | ||
738 | dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); | ||
739 | break; | ||
740 | case IPPROTO_UDP: | ||
741 | dflags |= XCT_MACTX_CSUM_UDP; | ||
742 | dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); | ||
743 | dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); | ||
744 | break; | ||
745 | } | ||
746 | } | ||
747 | |||
748 | map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | ||
749 | |||
750 | if (dma_mapping_error(map)) | ||
751 | return NETDEV_TX_BUSY; | ||
752 | |||
753 | txring = mac->tx; | ||
754 | |||
755 | spin_lock_irqsave(&txring->lock, flags); | ||
756 | |||
757 | if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) { | ||
758 | spin_unlock_irqrestore(&txring->lock, flags); | ||
759 | pasemi_mac_clean_tx(mac); | ||
760 | spin_lock_irqsave(&txring->lock, flags); | ||
761 | |||
762 | if (txring->next_to_clean - txring->next_to_use == | ||
763 | TX_RING_SIZE) { | ||
764 | /* Still no room -- stop the queue and wait for tx | ||
765 | * intr when there's room. | ||
766 | */ | ||
767 | netif_stop_queue(dev); | ||
768 | goto out_err; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | |||
773 | dp = &TX_DESC(mac, txring->next_to_use); | ||
774 | info = &TX_DESC_INFO(mac, txring->next_to_use); | ||
775 | |||
776 | dp->mactx = dflags | XCT_MACTX_LLEN(skb->len); | ||
777 | dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map); | ||
778 | info->dma = map; | ||
779 | info->skb = skb; | ||
780 | |||
781 | txring->next_to_use++; | ||
782 | mac->stats.tx_packets++; | ||
783 | mac->stats.tx_bytes += skb->len; | ||
784 | |||
785 | spin_unlock_irqrestore(&txring->lock, flags); | ||
786 | |||
787 | pci_write_config_dword(mac->dma_pdev, | ||
788 | PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1); | ||
789 | |||
790 | return NETDEV_TX_OK; | ||
791 | |||
792 | out_err: | ||
793 | spin_unlock_irqrestore(&txring->lock, flags); | ||
794 | pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE); | ||
795 | return NETDEV_TX_BUSY; | ||
796 | } | ||
797 | |||
798 | static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev) | ||
799 | { | ||
800 | struct pasemi_mac *mac = netdev_priv(dev); | ||
801 | |||
802 | return &mac->stats; | ||
803 | } | ||
804 | |||
805 | static void pasemi_mac_set_rx_mode(struct net_device *dev) | ||
806 | { | ||
807 | struct pasemi_mac *mac = netdev_priv(dev); | ||
808 | unsigned int flags; | ||
809 | |||
810 | pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags); | ||
811 | |||
812 | /* Set promiscuous */ | ||
813 | if (dev->flags & IFF_PROMISC) | ||
814 | flags |= PAS_MAC_CFG_PCFG_PR; | ||
815 | else | ||
816 | flags &= ~PAS_MAC_CFG_PCFG_PR; | ||
817 | |||
818 | pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags); | ||
819 | } | ||
820 | |||
821 | |||
822 | static int pasemi_mac_poll(struct net_device *dev, int *budget) | ||
823 | { | ||
824 | int pkts, limit = min(*budget, dev->quota); | ||
825 | struct pasemi_mac *mac = netdev_priv(dev); | ||
826 | |||
827 | pkts = pasemi_mac_clean_rx(mac, limit); | ||
828 | |||
829 | if (pkts < limit) { | ||
830 | /* all done, no more packets present */ | ||
831 | netif_rx_complete(dev); | ||
832 | |||
833 | /* re-enable receive interrupts */ | ||
834 | pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG, | ||
835 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000)); | ||
836 | return 0; | ||
837 | } else { | ||
838 | /* used up our quantum, so reschedule */ | ||
839 | dev->quota -= pkts; | ||
840 | *budget -= pkts; | ||
841 | return 1; | ||
842 | } | ||
843 | } | ||
844 | |||
845 | static int __devinit | ||
846 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
847 | { | ||
848 | static int index = 0; | ||
849 | struct net_device *dev; | ||
850 | struct pasemi_mac *mac; | ||
851 | int err; | ||
852 | |||
853 | err = pci_enable_device(pdev); | ||
854 | if (err) | ||
855 | return err; | ||
856 | |||
857 | dev = alloc_etherdev(sizeof(struct pasemi_mac)); | ||
858 | if (dev == NULL) { | ||
859 | dev_err(&pdev->dev, | ||
860 | "pasemi_mac: Could not allocate ethernet device.\n"); | ||
861 | err = -ENOMEM; | ||
862 | goto out_disable_device; | ||
863 | } | ||
864 | |||
865 | SET_MODULE_OWNER(dev); | ||
866 | pci_set_drvdata(pdev, dev); | ||
867 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
868 | |||
869 | mac = netdev_priv(dev); | ||
870 | |||
871 | mac->pdev = pdev; | ||
872 | mac->netdev = dev; | ||
873 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); | ||
874 | |||
875 | if (!mac->dma_pdev) { | ||
876 | dev_err(&pdev->dev, "Can't find DMA Controller\n"); | ||
877 | err = -ENODEV; | ||
878 | goto out_free_netdev; | ||
879 | } | ||
880 | |||
881 | mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); | ||
882 | |||
883 | if (!mac->iob_pdev) { | ||
884 | dev_err(&pdev->dev, "Can't find I/O Bridge\n"); | ||
885 | err = -ENODEV; | ||
886 | goto out_put_dma_pdev; | ||
887 | } | ||
888 | |||
889 | /* These should come out of the device tree eventually */ | ||
890 | mac->dma_txch = index; | ||
891 | mac->dma_rxch = index; | ||
892 | |||
893 | /* We probe GMAC before XAUI, but the DMA interfaces are | ||
894 | * in XAUI, GMAC order. | ||
895 | */ | ||
896 | if (index < 4) | ||
897 | mac->dma_if = index + 2; | ||
898 | else | ||
899 | mac->dma_if = index - 4; | ||
900 | index++; | ||
901 | |||
902 | switch (pdev->device) { | ||
903 | case 0xa005: | ||
904 | mac->type = MAC_TYPE_GMAC; | ||
905 | break; | ||
906 | case 0xa006: | ||
907 | mac->type = MAC_TYPE_XAUI; | ||
908 | break; | ||
909 | default: | ||
910 | err = -ENODEV; | ||
911 | goto out; | ||
912 | } | ||
913 | |||
914 | /* get mac addr from device tree */ | ||
915 | if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { | ||
916 | err = -ENODEV; | ||
917 | goto out; | ||
918 | } | ||
919 | memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); | ||
920 | |||
921 | dev->open = pasemi_mac_open; | ||
922 | dev->stop = pasemi_mac_close; | ||
923 | dev->hard_start_xmit = pasemi_mac_start_tx; | ||
924 | dev->get_stats = pasemi_mac_get_stats; | ||
925 | dev->set_multicast_list = pasemi_mac_set_rx_mode; | ||
926 | dev->weight = 64; | ||
927 | dev->poll = pasemi_mac_poll; | ||
928 | dev->features = NETIF_F_HW_CSUM; | ||
929 | |||
930 | /* The dma status structure is located in the I/O bridge, and | ||
931 | * is cache coherent. | ||
932 | */ | ||
933 | if (!dma_status) | ||
934 | /* XXXOJN This should come from the device tree */ | ||
935 | dma_status = __ioremap(0xfd800000, 0x1000, 0); | ||
936 | |||
937 | mac->rx_status = &dma_status->rx_sta[mac->dma_rxch]; | ||
938 | mac->tx_status = &dma_status->tx_sta[mac->dma_txch]; | ||
939 | |||
940 | err = register_netdev(dev); | ||
941 | |||
942 | if (err) { | ||
943 | dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", | ||
944 | err); | ||
945 | goto out; | ||
946 | } else | ||
947 | printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, " | ||
948 | "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
949 | dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", | ||
950 | mac->dma_if, mac->dma_txch, mac->dma_rxch, | ||
951 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | ||
952 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | ||
953 | |||
954 | return err; | ||
955 | |||
956 | out: | ||
957 | pci_dev_put(mac->iob_pdev); | ||
958 | out_put_dma_pdev: | ||
959 | pci_dev_put(mac->dma_pdev); | ||
960 | out_free_netdev: | ||
961 | free_netdev(dev); | ||
962 | out_disable_device: | ||
963 | pci_disable_device(pdev); | ||
964 | return err; | ||
965 | |||
966 | } | ||
967 | |||
968 | static void __devexit pasemi_mac_remove(struct pci_dev *pdev) | ||
969 | { | ||
970 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
971 | struct pasemi_mac *mac; | ||
972 | |||
973 | if (!netdev) | ||
974 | return; | ||
975 | |||
976 | mac = netdev_priv(netdev); | ||
977 | |||
978 | unregister_netdev(netdev); | ||
979 | |||
980 | pci_disable_device(pdev); | ||
981 | pci_dev_put(mac->dma_pdev); | ||
982 | pci_dev_put(mac->iob_pdev); | ||
983 | |||
984 | pci_set_drvdata(pdev, NULL); | ||
985 | free_netdev(netdev); | ||
986 | } | ||
987 | |||
988 | static struct pci_device_id pasemi_mac_pci_tbl[] = { | ||
989 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, | ||
990 | { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, | ||
991 | }; | ||
992 | |||
993 | MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); | ||
994 | |||
995 | static struct pci_driver pasemi_mac_driver = { | ||
996 | .name = "pasemi_mac", | ||
997 | .id_table = pasemi_mac_pci_tbl, | ||
998 | .probe = pasemi_mac_probe, | ||
999 | .remove = __devexit_p(pasemi_mac_remove), | ||
1000 | }; | ||
1001 | |||
1002 | static void __exit pasemi_mac_cleanup_module(void) | ||
1003 | { | ||
1004 | pci_unregister_driver(&pasemi_mac_driver); | ||
1005 | __iounmap(dma_status); | ||
1006 | dma_status = NULL; | ||
1007 | } | ||
1008 | |||
1009 | int pasemi_mac_init_module(void) | ||
1010 | { | ||
1011 | return pci_register_driver(&pasemi_mac_driver); | ||
1012 | } | ||
1013 | |||
1014 | MODULE_LICENSE("GPL"); | ||
1015 | MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); | ||
1016 | MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); | ||
1017 | |||
1018 | module_init(pasemi_mac_init_module); | ||
1019 | module_exit(pasemi_mac_cleanup_module); | ||