diff options
author | Olof Johansson <olof@lixom.net> | 2007-11-28 21:56:32 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:04:21 -0500 |
commit | 34c20624ce541f8a7ff937f474af51f9044cedd7 (patch) | |
tree | 6d1bcecb9c43d70f938c61c40fd23c695ccb2547 /drivers | |
parent | 8ee9d85779356c1dc2ba87aca27fbf9414f2d82b (diff) |
pasemi_mac: Convert to new dma library
pasemi_mac: Convert to new dma library
Convert the pasemi_mac driver to the new platform global DMA manaagement
library. This also does a couple of other minor cleanups w.r.t. channel
management.
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/pasemi_mac.c | 480 | ||||
-rw-r--r-- | drivers/net/pasemi_mac.h | 16 |
2 files changed, 210 insertions, 286 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 8319bc18bfa2..a50eb34ece5d 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -69,9 +69,9 @@ | |||
69 | NETIF_MSG_RX_ERR | \ | 69 | NETIF_MSG_RX_ERR | \ |
70 | NETIF_MSG_TX_ERR) | 70 | NETIF_MSG_TX_ERR) |
71 | 71 | ||
72 | #define TX_DESC(tx, num) ((tx)->ring[(num) & (TX_RING_SIZE-1)]) | 72 | #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) |
73 | #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) | 73 | #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) |
74 | #define RX_DESC(rx, num) ((rx)->ring[(num) & (RX_RING_SIZE-1)]) | 74 | #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) |
75 | #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) | 75 | #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) |
76 | #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) | 76 | #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) |
77 | 77 | ||
@@ -89,8 +89,6 @@ static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ | |||
89 | module_param(debug, int, 0); | 89 | module_param(debug, int, 0); |
90 | MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); | 90 | MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); |
91 | 91 | ||
92 | static struct pasdma_status *dma_status; | ||
93 | |||
94 | static int translation_enabled(void) | 92 | static int translation_enabled(void) |
95 | { | 93 | { |
96 | #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) | 94 | #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) |
@@ -100,32 +98,30 @@ static int translation_enabled(void) | |||
100 | #endif | 98 | #endif |
101 | } | 99 | } |
102 | 100 | ||
103 | static void write_iob_reg(struct pasemi_mac *mac, unsigned int reg, | 101 | static void write_iob_reg(unsigned int reg, unsigned int val) |
104 | unsigned int val) | ||
105 | { | 102 | { |
106 | out_le32(mac->iob_regs+reg, val); | 103 | pasemi_write_iob_reg(reg, val); |
107 | } | 104 | } |
108 | 105 | ||
109 | static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg) | 106 | static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg) |
110 | { | 107 | { |
111 | return in_le32(mac->regs+reg); | 108 | return pasemi_read_mac_reg(mac->dma_if, reg); |
112 | } | 109 | } |
113 | 110 | ||
114 | static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg, | 111 | static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg, |
115 | unsigned int val) | 112 | unsigned int val) |
116 | { | 113 | { |
117 | out_le32(mac->regs+reg, val); | 114 | pasemi_write_mac_reg(mac->dma_if, reg, val); |
118 | } | 115 | } |
119 | 116 | ||
120 | static unsigned int read_dma_reg(struct pasemi_mac *mac, unsigned int reg) | 117 | static unsigned int read_dma_reg(unsigned int reg) |
121 | { | 118 | { |
122 | return in_le32(mac->dma_regs+reg); | 119 | return pasemi_read_dma_reg(reg); |
123 | } | 120 | } |
124 | 121 | ||
125 | static void write_dma_reg(struct pasemi_mac *mac, unsigned int reg, | 122 | static void write_dma_reg(unsigned int reg, unsigned int val) |
126 | unsigned int val) | ||
127 | { | 123 | { |
128 | out_le32(mac->dma_regs+reg, val); | 124 | pasemi_write_dma_reg(reg, val); |
129 | } | 125 | } |
130 | 126 | ||
131 | static struct pasemi_mac_rxring *rx_ring(struct pasemi_mac *mac) | 127 | static struct pasemi_mac_rxring *rx_ring(struct pasemi_mac *mac) |
@@ -138,6 +134,34 @@ static struct pasemi_mac_txring *tx_ring(struct pasemi_mac *mac) | |||
138 | return mac->tx; | 134 | return mac->tx; |
139 | } | 135 | } |
140 | 136 | ||
137 | static int mac_to_intf(struct pasemi_mac *mac) | ||
138 | { | ||
139 | struct pci_dev *pdev = mac->pdev; | ||
140 | u32 tmp; | ||
141 | int nintf, off, i, j; | ||
142 | int devfn = pdev->devfn; | ||
143 | |||
144 | tmp = read_dma_reg(PAS_DMA_CAP_IFI); | ||
145 | nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; | ||
146 | off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; | ||
147 | |||
148 | /* IOFF contains the offset to the registers containing the | ||
149 | * DMA interface-to-MAC-pci-id mappings, and NIN contains number | ||
150 | * of total interfaces. Each register contains 4 devfns. | ||
151 | * Just do a linear search until we find the devfn of the MAC | ||
152 | * we're trying to look up. | ||
153 | */ | ||
154 | |||
155 | for (i = 0; i < (nintf+3)/4; i++) { | ||
156 | tmp = read_dma_reg(off+4*i); | ||
157 | for (j = 0; j < 4; j++) { | ||
158 | if (((tmp >> (8*j)) & 0xff) == devfn) | ||
159 | return i*4 + j; | ||
160 | } | ||
161 | } | ||
162 | return -1; | ||
163 | } | ||
164 | |||
141 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) | 165 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) |
142 | { | 166 | { |
143 | struct pci_dev *pdev = mac->pdev; | 167 | struct pci_dev *pdev = mac->pdev; |
@@ -213,13 +237,17 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
213 | { | 237 | { |
214 | struct pasemi_mac_rxring *ring; | 238 | struct pasemi_mac_rxring *ring; |
215 | struct pasemi_mac *mac = netdev_priv(dev); | 239 | struct pasemi_mac *mac = netdev_priv(dev); |
216 | int chan_id = mac->dma_rxch; | 240 | int chno; |
217 | unsigned int cfg; | 241 | unsigned int cfg; |
218 | 242 | ||
219 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | 243 | ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), |
244 | offsetof(struct pasemi_mac_rxring, chan)); | ||
220 | 245 | ||
221 | if (!ring) | 246 | if (!ring) { |
222 | goto out_ring; | 247 | dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); |
248 | goto out_chan; | ||
249 | } | ||
250 | chno = ring->chan.chno; | ||
223 | 251 | ||
224 | spin_lock_init(&ring->lock); | 252 | spin_lock_init(&ring->lock); |
225 | 253 | ||
@@ -231,84 +259,80 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
231 | goto out_ring_info; | 259 | goto out_ring_info; |
232 | 260 | ||
233 | /* Allocate descriptors */ | 261 | /* Allocate descriptors */ |
234 | ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, | 262 | if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) |
235 | RX_RING_SIZE * sizeof(u64), | ||
236 | &ring->dma, GFP_KERNEL); | ||
237 | |||
238 | if (!ring->ring) | ||
239 | goto out_ring_desc; | 263 | goto out_ring_desc; |
240 | 264 | ||
241 | memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64)); | ||
242 | |||
243 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, | 265 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, |
244 | RX_RING_SIZE * sizeof(u64), | 266 | RX_RING_SIZE * sizeof(u64), |
245 | &ring->buf_dma, GFP_KERNEL); | 267 | &ring->buf_dma, GFP_KERNEL); |
246 | if (!ring->buffers) | 268 | if (!ring->buffers) |
247 | goto out_buffers; | 269 | goto out_ring_desc; |
248 | 270 | ||
249 | memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); | 271 | memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); |
250 | 272 | ||
251 | write_dma_reg(mac, PAS_DMA_RXCHAN_BASEL(chan_id), PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); | 273 | write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), |
274 | PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); | ||
252 | 275 | ||
253 | write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id), | 276 | write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), |
254 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | | 277 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | |
255 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); | 278 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); |
256 | 279 | ||
257 | cfg = PAS_DMA_RXCHAN_CFG_HBU(2); | 280 | cfg = PAS_DMA_RXCHAN_CFG_HBU(1); |
258 | 281 | ||
259 | if (translation_enabled()) | 282 | if (translation_enabled()) |
260 | cfg |= PAS_DMA_RXCHAN_CFG_CTR; | 283 | cfg |= PAS_DMA_RXCHAN_CFG_CTR; |
261 | 284 | ||
262 | write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), cfg); | 285 | write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); |
263 | 286 | ||
264 | write_dma_reg(mac, PAS_DMA_RXINT_BASEL(mac->dma_if), | 287 | write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), |
265 | PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); | 288 | PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); |
266 | 289 | ||
267 | write_dma_reg(mac, PAS_DMA_RXINT_BASEU(mac->dma_if), | 290 | write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), |
268 | PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | | 291 | PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | |
269 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); | 292 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); |
270 | 293 | ||
271 | cfg = PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 | | 294 | cfg = PAS_DMA_RXINT_CFG_DHL(1) | PAS_DMA_RXINT_CFG_L2 | |
272 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | | 295 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | |
273 | PAS_DMA_RXINT_CFG_HEN; | 296 | PAS_DMA_RXINT_CFG_HEN; |
274 | 297 | ||
275 | if (translation_enabled()) | 298 | if (translation_enabled()) |
276 | cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; | 299 | cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; |
277 | 300 | ||
278 | write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), cfg); | 301 | write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); |
279 | 302 | ||
280 | ring->next_to_fill = 0; | 303 | ring->next_to_fill = 0; |
281 | ring->next_to_clean = 0; | 304 | ring->next_to_clean = 0; |
282 | |||
283 | ring->status = &dma_status->rx_sta[mac->dma_rxch]; | ||
284 | ring->mac = mac; | 305 | ring->mac = mac; |
285 | mac->rx = ring; | 306 | mac->rx = ring; |
286 | 307 | ||
287 | return 0; | 308 | return 0; |
288 | 309 | ||
289 | out_buffers: | ||
290 | dma_free_coherent(&mac->dma_pdev->dev, | ||
291 | RX_RING_SIZE * sizeof(u64), | ||
292 | rx_ring(mac)->ring, rx_ring(mac)->dma); | ||
293 | out_ring_desc: | 310 | out_ring_desc: |
294 | kfree(ring->ring_info); | 311 | kfree(ring->ring_info); |
295 | out_ring_info: | 312 | out_ring_info: |
296 | kfree(ring); | 313 | pasemi_dma_free_chan(&ring->chan); |
297 | out_ring: | 314 | out_chan: |
298 | return -ENOMEM; | 315 | return -ENOMEM; |
299 | } | 316 | } |
300 | 317 | ||
301 | static struct pasemi_mac_txring * | 318 | static struct pasemi_mac_txring * |
302 | pasemi_mac_setup_tx_resources(struct net_device *dev, int txch) | 319 | pasemi_mac_setup_tx_resources(struct net_device *dev) |
303 | { | 320 | { |
304 | struct pasemi_mac *mac = netdev_priv(dev); | 321 | struct pasemi_mac *mac = netdev_priv(dev); |
305 | u32 val; | 322 | u32 val; |
306 | struct pasemi_mac_txring *ring; | 323 | struct pasemi_mac_txring *ring; |
307 | unsigned int cfg; | 324 | unsigned int cfg; |
325 | int chno; | ||
308 | 326 | ||
309 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | 327 | ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), |
310 | if (!ring) | 328 | offsetof(struct pasemi_mac_txring, chan)); |
311 | goto out_ring; | 329 | |
330 | if (!ring) { | ||
331 | dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); | ||
332 | goto out_chan; | ||
333 | } | ||
334 | |||
335 | chno = ring->chan.chno; | ||
312 | 336 | ||
313 | spin_lock_init(&ring->lock); | 337 | spin_lock_init(&ring->lock); |
314 | 338 | ||
@@ -319,20 +343,15 @@ pasemi_mac_setup_tx_resources(struct net_device *dev, int txch) | |||
319 | goto out_ring_info; | 343 | goto out_ring_info; |
320 | 344 | ||
321 | /* Allocate descriptors */ | 345 | /* Allocate descriptors */ |
322 | ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, | 346 | if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) |
323 | TX_RING_SIZE * sizeof(u64), | ||
324 | &ring->dma, GFP_KERNEL); | ||
325 | if (!ring->ring) | ||
326 | goto out_ring_desc; | 347 | goto out_ring_desc; |
327 | 348 | ||
328 | memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64)); | 349 | write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), |
329 | 350 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); | |
330 | write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(txch), | 351 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); |
331 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); | ||
332 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); | ||
333 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); | 352 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); |
334 | 353 | ||
335 | write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(txch), val); | 354 | write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); |
336 | 355 | ||
337 | cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | | 356 | cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | |
338 | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | | 357 | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | |
@@ -342,12 +361,10 @@ pasemi_mac_setup_tx_resources(struct net_device *dev, int txch) | |||
342 | if (translation_enabled()) | 361 | if (translation_enabled()) |
343 | cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; | 362 | cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; |
344 | 363 | ||
345 | write_dma_reg(mac, PAS_DMA_TXCHAN_CFG(txch), cfg); | 364 | write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); |
346 | 365 | ||
347 | ring->next_to_fill = 0; | 366 | ring->next_to_fill = 0; |
348 | ring->next_to_clean = 0; | 367 | ring->next_to_clean = 0; |
349 | ring->status = &dma_status->tx_sta[txch]; | ||
350 | ring->chan = txch; | ||
351 | ring->mac = mac; | 368 | ring->mac = mac; |
352 | 369 | ||
353 | return ring; | 370 | return ring; |
@@ -355,8 +372,8 @@ pasemi_mac_setup_tx_resources(struct net_device *dev, int txch) | |||
355 | out_ring_desc: | 372 | out_ring_desc: |
356 | kfree(ring->ring_info); | 373 | kfree(ring->ring_info); |
357 | out_ring_info: | 374 | out_ring_info: |
358 | kfree(ring); | 375 | pasemi_dma_free_chan(&ring->chan); |
359 | out_ring: | 376 | out_chan: |
360 | return NULL; | 377 | return NULL; |
361 | } | 378 | } |
362 | 379 | ||
@@ -387,15 +404,9 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) | |||
387 | freed = 2; | 404 | freed = 2; |
388 | } | 405 | } |
389 | 406 | ||
390 | for (i = 0; i < TX_RING_SIZE; i++) | ||
391 | txring->ring[i] = 0; | ||
392 | |||
393 | dma_free_coherent(&mac->dma_pdev->dev, | ||
394 | TX_RING_SIZE * sizeof(u64), | ||
395 | txring->ring, txring->dma); | ||
396 | |||
397 | kfree(txring->ring_info); | 407 | kfree(txring->ring_info); |
398 | kfree(txring); | 408 | pasemi_dma_free_chan(&txring->chan); |
409 | |||
399 | } | 410 | } |
400 | 411 | ||
401 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | 412 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) |
@@ -420,15 +431,11 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | |||
420 | for (i = 0; i < RX_RING_SIZE; i++) | 431 | for (i = 0; i < RX_RING_SIZE; i++) |
421 | RX_DESC(rx, i) = 0; | 432 | RX_DESC(rx, i) = 0; |
422 | 433 | ||
423 | dma_free_coherent(&mac->dma_pdev->dev, | ||
424 | RX_RING_SIZE * sizeof(u64), | ||
425 | rx_ring(mac)->ring, rx_ring(mac)->dma); | ||
426 | |||
427 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), | 434 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), |
428 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); | 435 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); |
429 | 436 | ||
430 | kfree(rx_ring(mac)->ring_info); | 437 | kfree(rx_ring(mac)->ring_info); |
431 | kfree(rx_ring(mac)); | 438 | pasemi_dma_free_chan(&rx_ring(mac)->chan); |
432 | mac->rx = NULL; | 439 | mac->rx = NULL; |
433 | } | 440 | } |
434 | 441 | ||
@@ -479,7 +486,7 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | |||
479 | 486 | ||
480 | wmb(); | 487 | wmb(); |
481 | 488 | ||
482 | write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); | 489 | write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); |
483 | 490 | ||
484 | rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & | 491 | rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & |
485 | (RX_RING_SIZE - 1); | 492 | (RX_RING_SIZE - 1); |
@@ -492,11 +499,11 @@ static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) | |||
492 | * ack the packet count interrupt we got in rx_intr. | 499 | * ack the packet count interrupt we got in rx_intr. |
493 | */ | 500 | */ |
494 | 501 | ||
495 | pcnt = *rx_ring(mac)->status & PAS_STATUS_PCNT_M; | 502 | pcnt = *rx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; |
496 | 503 | ||
497 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; | 504 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; |
498 | 505 | ||
499 | write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); | 506 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); |
500 | } | 507 | } |
501 | 508 | ||
502 | static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) | 509 | static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) |
@@ -504,26 +511,27 @@ static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) | |||
504 | unsigned int reg, pcnt; | 511 | unsigned int reg, pcnt; |
505 | 512 | ||
506 | /* Re-enable packet count interrupts */ | 513 | /* Re-enable packet count interrupts */ |
507 | pcnt = *tx_ring(mac)->status & PAS_STATUS_PCNT_M; | 514 | pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; |
508 | 515 | ||
509 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; | 516 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; |
510 | 517 | ||
511 | write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan), reg); | 518 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); |
512 | } | 519 | } |
513 | 520 | ||
514 | 521 | ||
515 | static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx) | 522 | static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx) |
516 | { | 523 | { |
517 | unsigned int rcmdsta, ccmdsta; | 524 | unsigned int rcmdsta, ccmdsta; |
525 | struct pasemi_dmachan *chan = &rx_ring(mac)->chan; | ||
518 | 526 | ||
519 | if (!netif_msg_rx_err(mac)) | 527 | if (!netif_msg_rx_err(mac)) |
520 | return; | 528 | return; |
521 | 529 | ||
522 | rcmdsta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | 530 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
523 | ccmdsta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); | 531 | ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); |
524 | 532 | ||
525 | printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", | 533 | printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", |
526 | macrx, *rx_ring(mac)->status); | 534 | macrx, *chan->status); |
527 | 535 | ||
528 | printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", | 536 | printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", |
529 | rcmdsta, ccmdsta); | 537 | rcmdsta, ccmdsta); |
@@ -532,20 +540,22 @@ static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx) | |||
532 | static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx) | 540 | static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx) |
533 | { | 541 | { |
534 | unsigned int cmdsta; | 542 | unsigned int cmdsta; |
543 | struct pasemi_dmachan *chan = &tx_ring(mac)->chan; | ||
535 | 544 | ||
536 | if (!netif_msg_tx_err(mac)) | 545 | if (!netif_msg_tx_err(mac)) |
537 | return; | 546 | return; |
538 | 547 | ||
539 | cmdsta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); | 548 | cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); |
540 | 549 | ||
541 | printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ | 550 | printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ |
542 | "tx status 0x%016lx\n", mactx, *tx_ring(mac)->status); | 551 | "tx status 0x%016lx\n", mactx, *chan->status); |
543 | 552 | ||
544 | printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); | 553 | printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); |
545 | } | 554 | } |
546 | 555 | ||
547 | static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, int limit) | 556 | static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, int limit) |
548 | { | 557 | { |
558 | struct pasemi_dmachan *chan = &rx->chan; | ||
549 | struct pasemi_mac *mac = rx->mac; | 559 | struct pasemi_mac *mac = rx->mac; |
550 | unsigned int n; | 560 | unsigned int n; |
551 | int count; | 561 | int count; |
@@ -567,7 +577,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, int limit) | |||
567 | macrx = RX_DESC(rx, n); | 577 | macrx = RX_DESC(rx, n); |
568 | 578 | ||
569 | if ((macrx & XCT_MACRX_E) || | 579 | if ((macrx & XCT_MACRX_E) || |
570 | (*rx_ring(mac)->status & PAS_STATUS_ERROR)) | 580 | (*chan->status & PAS_STATUS_ERROR)) |
571 | pasemi_mac_rx_error(mac, macrx); | 581 | pasemi_mac_rx_error(mac, macrx); |
572 | 582 | ||
573 | if (!(macrx & XCT_MACRX_O)) | 583 | if (!(macrx & XCT_MACRX_O)) |
@@ -648,7 +658,7 @@ next: | |||
648 | 658 | ||
649 | if (n > RX_RING_SIZE) { | 659 | if (n > RX_RING_SIZE) { |
650 | /* Errata 5971 workaround: L2 target of headers */ | 660 | /* Errata 5971 workaround: L2 target of headers */ |
651 | write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); | 661 | write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); |
652 | n &= (RX_RING_SIZE-1); | 662 | n &= (RX_RING_SIZE-1); |
653 | } | 663 | } |
654 | 664 | ||
@@ -658,7 +668,7 @@ next: | |||
658 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with | 668 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with |
659 | * count*2. | 669 | * count*2. |
660 | */ | 670 | */ |
661 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1); | 671 | write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); |
662 | 672 | ||
663 | pasemi_mac_replenish_rx_ring(mac->netdev, count); | 673 | pasemi_mac_replenish_rx_ring(mac->netdev, count); |
664 | 674 | ||
@@ -672,6 +682,7 @@ next: | |||
672 | 682 | ||
673 | static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) | 683 | static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) |
674 | { | 684 | { |
685 | struct pasemi_dmachan *chan = &txring->chan; | ||
675 | struct pasemi_mac *mac = txring->mac; | 686 | struct pasemi_mac *mac = txring->mac; |
676 | int i, j; | 687 | int i, j; |
677 | unsigned int start, descr_count, buf_count, batch_limit; | 688 | unsigned int start, descr_count, buf_count, batch_limit; |
@@ -703,7 +714,7 @@ restart: | |||
703 | struct sk_buff *skb; | 714 | struct sk_buff *skb; |
704 | 715 | ||
705 | if ((mactx & XCT_MACTX_E) || | 716 | if ((mactx & XCT_MACTX_E) || |
706 | (*tx_ring(mac)->status & PAS_STATUS_ERROR)) | 717 | (*chan->status & PAS_STATUS_ERROR)) |
707 | pasemi_mac_tx_error(mac, mactx); | 718 | pasemi_mac_tx_error(mac, mactx); |
708 | 719 | ||
709 | if (unlikely(mactx & XCT_MACTX_O)) | 720 | if (unlikely(mactx & XCT_MACTX_O)) |
@@ -747,11 +758,13 @@ restart: | |||
747 | 758 | ||
748 | static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | 759 | static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) |
749 | { | 760 | { |
750 | struct net_device *dev = data; | 761 | struct pasemi_mac_rxring *rxring = data; |
751 | struct pasemi_mac *mac = netdev_priv(dev); | 762 | struct pasemi_mac *mac = rxring->mac; |
763 | struct net_device *dev = mac->netdev; | ||
764 | struct pasemi_dmachan *chan = &rxring->chan; | ||
752 | unsigned int reg; | 765 | unsigned int reg; |
753 | 766 | ||
754 | if (!(*rx_ring(mac)->status & PAS_STATUS_CAUSE_M)) | 767 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) |
755 | return IRQ_NONE; | 768 | return IRQ_NONE; |
756 | 769 | ||
757 | /* Don't reset packet count so it won't fire again but clear | 770 | /* Don't reset packet count so it won't fire again but clear |
@@ -759,16 +772,16 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | |||
759 | */ | 772 | */ |
760 | 773 | ||
761 | reg = 0; | 774 | reg = 0; |
762 | if (*rx_ring(mac)->status & PAS_STATUS_SOFT) | 775 | if (*chan->status & PAS_STATUS_SOFT) |
763 | reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; | 776 | reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; |
764 | if (*rx_ring(mac)->status & PAS_STATUS_ERROR) | 777 | if (*chan->status & PAS_STATUS_ERROR) |
765 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; | 778 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; |
766 | if (*rx_ring(mac)->status & PAS_STATUS_TIMER) | 779 | if (*chan->status & PAS_STATUS_TIMER) |
767 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; | 780 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; |
768 | 781 | ||
769 | netif_rx_schedule(dev, &mac->napi); | 782 | netif_rx_schedule(dev, &mac->napi); |
770 | 783 | ||
771 | write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); | 784 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); |
772 | 785 | ||
773 | return IRQ_HANDLED; | 786 | return IRQ_HANDLED; |
774 | } | 787 | } |
@@ -776,24 +789,24 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | |||
776 | static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) | 789 | static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) |
777 | { | 790 | { |
778 | struct pasemi_mac_txring *txring = data; | 791 | struct pasemi_mac_txring *txring = data; |
779 | struct pasemi_mac *mac = txring->mac; | 792 | struct pasemi_dmachan *chan = &txring->chan; |
780 | unsigned int reg, pcnt; | 793 | unsigned int reg, pcnt; |
781 | 794 | ||
782 | if (!(*txring->status & PAS_STATUS_CAUSE_M)) | 795 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) |
783 | return IRQ_NONE; | 796 | return IRQ_NONE; |
784 | 797 | ||
785 | pasemi_mac_clean_tx(txring); | 798 | pasemi_mac_clean_tx(txring); |
786 | 799 | ||
787 | pcnt = *txring->status & PAS_STATUS_PCNT_M; | 800 | pcnt = *chan->status & PAS_STATUS_PCNT_M; |
788 | 801 | ||
789 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; | 802 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; |
790 | 803 | ||
791 | if (*txring->status & PAS_STATUS_SOFT) | 804 | if (*chan->status & PAS_STATUS_SOFT) |
792 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; | 805 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; |
793 | if (*txring->status & PAS_STATUS_ERROR) | 806 | if (*chan->status & PAS_STATUS_ERROR) |
794 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; | 807 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; |
795 | 808 | ||
796 | write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(txring->chan), reg); | 809 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); |
797 | 810 | ||
798 | return IRQ_HANDLED; | 811 | return IRQ_HANDLED; |
799 | } | 812 | } |
@@ -909,15 +922,14 @@ err: | |||
909 | static int pasemi_mac_open(struct net_device *dev) | 922 | static int pasemi_mac_open(struct net_device *dev) |
910 | { | 923 | { |
911 | struct pasemi_mac *mac = netdev_priv(dev); | 924 | struct pasemi_mac *mac = netdev_priv(dev); |
912 | int base_irq; | ||
913 | unsigned int flags; | 925 | unsigned int flags; |
914 | int ret; | 926 | int ret; |
915 | 927 | ||
916 | /* enable rx section */ | 928 | /* enable rx section */ |
917 | write_dma_reg(mac, PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); | 929 | write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); |
918 | 930 | ||
919 | /* enable tx section */ | 931 | /* enable tx section */ |
920 | write_dma_reg(mac, PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); | 932 | write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); |
921 | 933 | ||
922 | flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | | 934 | flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | |
923 | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | | 935 | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | |
@@ -925,56 +937,53 @@ static int pasemi_mac_open(struct net_device *dev) | |||
925 | 937 | ||
926 | write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); | 938 | write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); |
927 | 939 | ||
928 | write_iob_reg(mac, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), | ||
929 | PAS_IOB_DMA_RXCH_CFG_CNTTH(0)); | ||
930 | |||
931 | write_iob_reg(mac, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch), | ||
932 | PAS_IOB_DMA_TXCH_CFG_CNTTH(128)); | ||
933 | |||
934 | /* 0xffffff is max value, about 16ms */ | 940 | /* 0xffffff is max value, about 16ms */ |
935 | write_iob_reg(mac, PAS_IOB_DMA_COM_TIMEOUTCFG, | 941 | write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, |
936 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff)); | 942 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff)); |
937 | 943 | ||
938 | ret = pasemi_mac_setup_rx_resources(dev); | 944 | ret = pasemi_mac_setup_rx_resources(dev); |
939 | if (ret) | 945 | if (ret) |
940 | goto out_rx_resources; | 946 | goto out_rx_resources; |
941 | 947 | ||
942 | mac->tx = pasemi_mac_setup_tx_resources(dev, mac->dma_txch); | 948 | mac->tx = pasemi_mac_setup_tx_resources(dev); |
943 | 949 | ||
944 | if (!mac->tx) | 950 | if (!mac->tx) |
945 | goto out_tx_ring; | 951 | goto out_tx_ring; |
946 | 952 | ||
953 | write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), | ||
954 | PAS_IOB_DMA_RXCH_CFG_CNTTH(0)); | ||
955 | |||
956 | write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), | ||
957 | PAS_IOB_DMA_TXCH_CFG_CNTTH(128)); | ||
958 | |||
947 | write_mac_reg(mac, PAS_MAC_IPC_CHNL, | 959 | write_mac_reg(mac, PAS_MAC_IPC_CHNL, |
948 | PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) | | 960 | PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | |
949 | PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch)); | 961 | PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); |
950 | 962 | ||
951 | /* enable rx if */ | 963 | /* enable rx if */ |
952 | write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | 964 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), |
953 | PAS_DMA_RXINT_RCMDSTA_EN | | 965 | PAS_DMA_RXINT_RCMDSTA_EN | |
954 | PAS_DMA_RXINT_RCMDSTA_DROPS_M | | 966 | PAS_DMA_RXINT_RCMDSTA_DROPS_M | |
955 | PAS_DMA_RXINT_RCMDSTA_BP | | 967 | PAS_DMA_RXINT_RCMDSTA_BP | |
956 | PAS_DMA_RXINT_RCMDSTA_OO | | 968 | PAS_DMA_RXINT_RCMDSTA_OO | |
957 | PAS_DMA_RXINT_RCMDSTA_BT); | 969 | PAS_DMA_RXINT_RCMDSTA_BT); |
958 | 970 | ||
959 | /* enable rx channel */ | 971 | /* enable rx channel */ |
960 | write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), | 972 | pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | |
961 | PAS_DMA_RXCHAN_CCMDSTA_EN | | 973 | PAS_DMA_RXCHAN_CCMDSTA_OD | |
962 | PAS_DMA_RXCHAN_CCMDSTA_DU | | 974 | PAS_DMA_RXCHAN_CCMDSTA_FD | |
963 | PAS_DMA_RXCHAN_CCMDSTA_OD | | 975 | PAS_DMA_RXCHAN_CCMDSTA_DT); |
964 | PAS_DMA_RXCHAN_CCMDSTA_FD | | ||
965 | PAS_DMA_RXCHAN_CCMDSTA_DT); | ||
966 | 976 | ||
967 | /* enable tx channel */ | 977 | /* enable tx channel */ |
968 | write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), | 978 | pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | |
969 | PAS_DMA_TXCHAN_TCMDSTA_EN | | 979 | PAS_DMA_TXCHAN_TCMDSTA_DB | |
970 | PAS_DMA_TXCHAN_TCMDSTA_SZ | | 980 | PAS_DMA_TXCHAN_TCMDSTA_DE | |
971 | PAS_DMA_TXCHAN_TCMDSTA_DB | | 981 | PAS_DMA_TXCHAN_TCMDSTA_DA); |
972 | PAS_DMA_TXCHAN_TCMDSTA_DE | | ||
973 | PAS_DMA_TXCHAN_TCMDSTA_DA); | ||
974 | 982 | ||
975 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); | 983 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); |
976 | 984 | ||
977 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1); | 985 | write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), |
986 | RX_RING_SIZE>>1); | ||
978 | 987 | ||
979 | /* Clear out any residual packet count state from firmware */ | 988 | /* Clear out any residual packet count state from firmware */ |
980 | pasemi_mac_restart_rx_intr(mac); | 989 | pasemi_mac_restart_rx_intr(mac); |
@@ -1001,37 +1010,25 @@ static int pasemi_mac_open(struct net_device *dev) | |||
1001 | netif_start_queue(dev); | 1010 | netif_start_queue(dev); |
1002 | napi_enable(&mac->napi); | 1011 | napi_enable(&mac->napi); |
1003 | 1012 | ||
1004 | /* Interrupts are a bit different for our DMA controller: While | ||
1005 | * it's got one a regular PCI device header, the interrupt there | ||
1006 | * is really the base of the range it's using. Each tx and rx | ||
1007 | * channel has it's own interrupt source. | ||
1008 | */ | ||
1009 | |||
1010 | base_irq = virq_to_hw(mac->dma_pdev->irq); | ||
1011 | |||
1012 | mac->tx_irq = irq_create_mapping(NULL, base_irq + mac->dma_txch); | ||
1013 | |||
1014 | snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", | 1013 | snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", |
1015 | dev->name); | 1014 | dev->name); |
1016 | 1015 | ||
1017 | ret = request_irq(mac->tx_irq, &pasemi_mac_tx_intr, IRQF_DISABLED, | 1016 | ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED, |
1018 | mac->tx_irq_name, mac->tx); | 1017 | mac->tx_irq_name, mac->tx); |
1019 | if (ret) { | 1018 | if (ret) { |
1020 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", | 1019 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", |
1021 | base_irq + mac->dma_txch, ret); | 1020 | mac->tx->chan.irq, ret); |
1022 | goto out_tx_int; | 1021 | goto out_tx_int; |
1023 | } | 1022 | } |
1024 | 1023 | ||
1025 | mac->rx_irq = irq_create_mapping(NULL, base_irq + 20 + mac->dma_rxch); | ||
1026 | |||
1027 | snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", | 1024 | snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", |
1028 | dev->name); | 1025 | dev->name); |
1029 | 1026 | ||
1030 | ret = request_irq(mac->rx_irq, &pasemi_mac_rx_intr, IRQF_DISABLED, | 1027 | ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED, |
1031 | mac->rx_irq_name, dev); | 1028 | mac->rx_irq_name, mac->rx); |
1032 | if (ret) { | 1029 | if (ret) { |
1033 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", | 1030 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", |
1034 | base_irq + 20 + mac->dma_rxch, ret); | 1031 | mac->rx->chan.irq, ret); |
1035 | goto out_rx_int; | 1032 | goto out_rx_int; |
1036 | } | 1033 | } |
1037 | 1034 | ||
@@ -1041,7 +1038,7 @@ static int pasemi_mac_open(struct net_device *dev) | |||
1041 | return 0; | 1038 | return 0; |
1042 | 1039 | ||
1043 | out_rx_int: | 1040 | out_rx_int: |
1044 | free_irq(mac->tx_irq, mac->tx); | 1041 | free_irq(mac->tx->chan.irq, mac->tx); |
1045 | out_tx_int: | 1042 | out_tx_int: |
1046 | napi_disable(&mac->napi); | 1043 | napi_disable(&mac->napi); |
1047 | netif_stop_queue(dev); | 1044 | netif_stop_queue(dev); |
@@ -1061,6 +1058,10 @@ static int pasemi_mac_close(struct net_device *dev) | |||
1061 | struct pasemi_mac *mac = netdev_priv(dev); | 1058 | struct pasemi_mac *mac = netdev_priv(dev); |
1062 | unsigned int sta; | 1059 | unsigned int sta; |
1063 | int retries; | 1060 | int retries; |
1061 | int rxch, txch; | ||
1062 | |||
1063 | rxch = rx_ring(mac)->chan.chno; | ||
1064 | txch = tx_ring(mac)->chan.chno; | ||
1064 | 1065 | ||
1065 | if (mac->phydev) { | 1066 | if (mac->phydev) { |
1066 | phy_stop(mac->phydev); | 1067 | phy_stop(mac->phydev); |
@@ -1070,20 +1071,20 @@ static int pasemi_mac_close(struct net_device *dev) | |||
1070 | netif_stop_queue(dev); | 1071 | netif_stop_queue(dev); |
1071 | napi_disable(&mac->napi); | 1072 | napi_disable(&mac->napi); |
1072 | 1073 | ||
1073 | sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | 1074 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
1074 | if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | | 1075 | if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | |
1075 | PAS_DMA_RXINT_RCMDSTA_OO | | 1076 | PAS_DMA_RXINT_RCMDSTA_OO | |
1076 | PAS_DMA_RXINT_RCMDSTA_BT)) | 1077 | PAS_DMA_RXINT_RCMDSTA_BT)) |
1077 | printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); | 1078 | printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); |
1078 | 1079 | ||
1079 | sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); | 1080 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); |
1080 | if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | | 1081 | if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | |
1081 | PAS_DMA_RXCHAN_CCMDSTA_OD | | 1082 | PAS_DMA_RXCHAN_CCMDSTA_OD | |
1082 | PAS_DMA_RXCHAN_CCMDSTA_FD | | 1083 | PAS_DMA_RXCHAN_CCMDSTA_FD | |
1083 | PAS_DMA_RXCHAN_CCMDSTA_DT)) | 1084 | PAS_DMA_RXCHAN_CCMDSTA_DT)) |
1084 | printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); | 1085 | printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); |
1085 | 1086 | ||
1086 | sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); | 1087 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); |
1087 | if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | | 1088 | if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | |
1088 | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) | 1089 | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) |
1089 | printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); | 1090 | printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); |
@@ -1093,26 +1094,25 @@ static int pasemi_mac_close(struct net_device *dev) | |||
1093 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); | 1094 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); |
1094 | 1095 | ||
1095 | /* Disable interface */ | 1096 | /* Disable interface */ |
1096 | write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), | 1097 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), |
1097 | PAS_DMA_TXCHAN_TCMDSTA_ST); | 1098 | PAS_DMA_TXCHAN_TCMDSTA_ST); |
1098 | write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | 1099 | write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if), |
1099 | PAS_DMA_RXINT_RCMDSTA_ST); | 1100 | PAS_DMA_RXINT_RCMDSTA_ST); |
1100 | write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), | 1101 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), |
1101 | PAS_DMA_RXCHAN_CCMDSTA_ST); | 1102 | PAS_DMA_RXCHAN_CCMDSTA_ST); |
1102 | 1103 | ||
1103 | for (retries = 0; retries < MAX_RETRIES; retries++) { | 1104 | for (retries = 0; retries < MAX_RETRIES; retries++) { |
1104 | sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); | 1105 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch)); |
1105 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) | 1106 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) |
1106 | break; | 1107 | break; |
1107 | cond_resched(); | 1108 | cond_resched(); |
1108 | } | 1109 | } |
1109 | 1110 | ||
1110 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) | 1111 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) |
1111 | dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel %d\n", | 1112 | dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); |
1112 | mac->dma_txch); | ||
1113 | 1113 | ||
1114 | for (retries = 0; retries < MAX_RETRIES; retries++) { | 1114 | for (retries = 0; retries < MAX_RETRIES; retries++) { |
1115 | sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); | 1115 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); |
1116 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) | 1116 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) |
1117 | break; | 1117 | break; |
1118 | cond_resched(); | 1118 | cond_resched(); |
@@ -1122,7 +1122,7 @@ static int pasemi_mac_close(struct net_device *dev) | |||
1122 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); | 1122 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); |
1123 | 1123 | ||
1124 | for (retries = 0; retries < MAX_RETRIES; retries++) { | 1124 | for (retries = 0; retries < MAX_RETRIES; retries++) { |
1125 | sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | 1125 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
1126 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) | 1126 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) |
1127 | break; | 1127 | break; |
1128 | cond_resched(); | 1128 | cond_resched(); |
@@ -1135,12 +1135,12 @@ static int pasemi_mac_close(struct net_device *dev) | |||
1135 | * stopping, since you can't disable when active. | 1135 | * stopping, since you can't disable when active. |
1136 | */ | 1136 | */ |
1137 | 1137 | ||
1138 | write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0); | 1138 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); |
1139 | write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0); | 1139 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); |
1140 | write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); | 1140 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); |
1141 | 1141 | ||
1142 | free_irq(mac->tx_irq, mac->tx); | 1142 | free_irq(mac->tx->chan.irq, mac->tx); |
1143 | free_irq(mac->rx_irq, mac->rx); | 1143 | free_irq(mac->rx->chan.irq, mac->rx); |
1144 | 1144 | ||
1145 | /* Free resources */ | 1145 | /* Free resources */ |
1146 | pasemi_mac_free_rx_resources(mac); | 1146 | pasemi_mac_free_rx_resources(mac); |
@@ -1239,7 +1239,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1239 | 1239 | ||
1240 | spin_unlock_irqrestore(&txring->lock, flags); | 1240 | spin_unlock_irqrestore(&txring->lock, flags); |
1241 | 1241 | ||
1242 | write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(txring->chan), (nfrags+2) >> 1); | 1242 | write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); |
1243 | 1243 | ||
1244 | return NETDEV_TX_OK; | 1244 | return NETDEV_TX_OK; |
1245 | 1245 | ||
@@ -1287,77 +1287,9 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) | |||
1287 | return pkts; | 1287 | return pkts; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | static void __iomem * __devinit map_onedev(struct pci_dev *p, int index) | ||
1291 | { | ||
1292 | struct device_node *dn; | ||
1293 | void __iomem *ret; | ||
1294 | |||
1295 | dn = pci_device_to_OF_node(p); | ||
1296 | if (!dn) | ||
1297 | goto fallback; | ||
1298 | |||
1299 | ret = of_iomap(dn, index); | ||
1300 | if (!ret) | ||
1301 | goto fallback; | ||
1302 | |||
1303 | return ret; | ||
1304 | fallback: | ||
1305 | /* This is hardcoded and ugly, but we have some firmware versions | ||
1306 | * that don't provide the register space in the device tree. Luckily | ||
1307 | * they are at well-known locations so we can just do the math here. | ||
1308 | */ | ||
1309 | return ioremap(0xe0000000 + (p->devfn << 12), 0x2000); | ||
1310 | } | ||
1311 | |||
1312 | static int __devinit pasemi_mac_map_regs(struct pasemi_mac *mac) | ||
1313 | { | ||
1314 | struct resource res; | ||
1315 | struct device_node *dn; | ||
1316 | int err; | ||
1317 | |||
1318 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); | ||
1319 | if (!mac->dma_pdev) { | ||
1320 | dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); | ||
1321 | return -ENODEV; | ||
1322 | } | ||
1323 | |||
1324 | mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); | ||
1325 | if (!mac->iob_pdev) { | ||
1326 | dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); | ||
1327 | return -ENODEV; | ||
1328 | } | ||
1329 | |||
1330 | mac->regs = map_onedev(mac->pdev, 0); | ||
1331 | mac->dma_regs = map_onedev(mac->dma_pdev, 0); | ||
1332 | mac->iob_regs = map_onedev(mac->iob_pdev, 0); | ||
1333 | |||
1334 | if (!mac->regs || !mac->dma_regs || !mac->iob_regs) { | ||
1335 | dev_err(&mac->pdev->dev, "Can't map registers\n"); | ||
1336 | return -ENODEV; | ||
1337 | } | ||
1338 | |||
1339 | /* The dma status structure is located in the I/O bridge, and | ||
1340 | * is cache coherent. | ||
1341 | */ | ||
1342 | if (!dma_status) { | ||
1343 | dn = pci_device_to_OF_node(mac->iob_pdev); | ||
1344 | if (dn) | ||
1345 | err = of_address_to_resource(dn, 1, &res); | ||
1346 | if (!dn || err) { | ||
1347 | /* Fallback for old firmware */ | ||
1348 | res.start = 0xfd800000; | ||
1349 | res.end = res.start + 0x1000; | ||
1350 | } | ||
1351 | dma_status = __ioremap(res.start, res.end-res.start, 0); | ||
1352 | } | ||
1353 | |||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | static int __devinit | 1290 | static int __devinit |
1358 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 1291 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1359 | { | 1292 | { |
1360 | static int index = 0; | ||
1361 | struct net_device *dev; | 1293 | struct net_device *dev; |
1362 | struct pasemi_mac *mac; | 1294 | struct pasemi_mac *mac; |
1363 | int err; | 1295 | int err; |
@@ -1387,18 +1319,33 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1387 | 1319 | ||
1388 | dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG; | 1320 | dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG; |
1389 | 1321 | ||
1390 | /* These should come out of the device tree eventually */ | 1322 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); |
1391 | mac->dma_txch = index; | 1323 | if (!mac->dma_pdev) { |
1392 | mac->dma_rxch = index; | 1324 | dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); |
1325 | err = -ENODEV; | ||
1326 | goto out; | ||
1327 | } | ||
1393 | 1328 | ||
1394 | /* We probe GMAC before XAUI, but the DMA interfaces are | 1329 | mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); |
1395 | * in XAUI, GMAC order. | 1330 | if (!mac->iob_pdev) { |
1396 | */ | 1331 | dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); |
1397 | if (index < 4) | 1332 | err = -ENODEV; |
1398 | mac->dma_if = index + 2; | 1333 | goto out; |
1399 | else | 1334 | } |
1400 | mac->dma_if = index - 4; | 1335 | |
1401 | index++; | 1336 | /* get mac addr from device tree */ |
1337 | if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { | ||
1338 | err = -ENODEV; | ||
1339 | goto out; | ||
1340 | } | ||
1341 | memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); | ||
1342 | |||
1343 | mac->dma_if = mac_to_intf(mac); | ||
1344 | if (mac->dma_if < 0) { | ||
1345 | dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); | ||
1346 | err = -ENODEV; | ||
1347 | goto out; | ||
1348 | } | ||
1402 | 1349 | ||
1403 | switch (pdev->device) { | 1350 | switch (pdev->device) { |
1404 | case 0xa005: | 1351 | case 0xa005: |
@@ -1412,19 +1359,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1412 | goto out; | 1359 | goto out; |
1413 | } | 1360 | } |
1414 | 1361 | ||
1415 | /* get mac addr from device tree */ | ||
1416 | if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { | ||
1417 | err = -ENODEV; | ||
1418 | goto out; | ||
1419 | } | ||
1420 | memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); | ||
1421 | |||
1422 | dev->open = pasemi_mac_open; | 1362 | dev->open = pasemi_mac_open; |
1423 | dev->stop = pasemi_mac_close; | 1363 | dev->stop = pasemi_mac_close; |
1424 | dev->hard_start_xmit = pasemi_mac_start_tx; | 1364 | dev->hard_start_xmit = pasemi_mac_start_tx; |
1425 | dev->set_multicast_list = pasemi_mac_set_rx_mode; | 1365 | dev->set_multicast_list = pasemi_mac_set_rx_mode; |
1426 | 1366 | ||
1427 | err = pasemi_mac_map_regs(mac); | ||
1428 | if (err) | 1367 | if (err) |
1429 | goto out; | 1368 | goto out; |
1430 | 1369 | ||
@@ -1451,12 +1390,6 @@ out: | |||
1451 | pci_dev_put(mac->iob_pdev); | 1390 | pci_dev_put(mac->iob_pdev); |
1452 | if (mac->dma_pdev) | 1391 | if (mac->dma_pdev) |
1453 | pci_dev_put(mac->dma_pdev); | 1392 | pci_dev_put(mac->dma_pdev); |
1454 | if (mac->dma_regs) | ||
1455 | iounmap(mac->dma_regs); | ||
1456 | if (mac->iob_regs) | ||
1457 | iounmap(mac->iob_regs); | ||
1458 | if (mac->regs) | ||
1459 | iounmap(mac->regs); | ||
1460 | 1393 | ||
1461 | free_netdev(dev); | 1394 | free_netdev(dev); |
1462 | out_disable_device: | 1395 | out_disable_device: |
@@ -1481,9 +1414,8 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev) | |||
1481 | pci_dev_put(mac->dma_pdev); | 1414 | pci_dev_put(mac->dma_pdev); |
1482 | pci_dev_put(mac->iob_pdev); | 1415 | pci_dev_put(mac->iob_pdev); |
1483 | 1416 | ||
1484 | iounmap(mac->regs); | 1417 | pasemi_dma_free_chan(&mac->tx->chan); |
1485 | iounmap(mac->dma_regs); | 1418 | pasemi_dma_free_chan(&mac->rx->chan); |
1486 | iounmap(mac->iob_regs); | ||
1487 | 1419 | ||
1488 | pci_set_drvdata(pdev, NULL); | 1420 | pci_set_drvdata(pdev, NULL); |
1489 | free_netdev(netdev); | 1421 | free_netdev(netdev); |
@@ -1507,12 +1439,16 @@ static struct pci_driver pasemi_mac_driver = { | |||
1507 | static void __exit pasemi_mac_cleanup_module(void) | 1439 | static void __exit pasemi_mac_cleanup_module(void) |
1508 | { | 1440 | { |
1509 | pci_unregister_driver(&pasemi_mac_driver); | 1441 | pci_unregister_driver(&pasemi_mac_driver); |
1510 | __iounmap(dma_status); | ||
1511 | dma_status = NULL; | ||
1512 | } | 1442 | } |
1513 | 1443 | ||
1514 | int pasemi_mac_init_module(void) | 1444 | int pasemi_mac_init_module(void) |
1515 | { | 1445 | { |
1446 | int err; | ||
1447 | |||
1448 | err = pasemi_dma_init(); | ||
1449 | if (err) | ||
1450 | return err; | ||
1451 | |||
1516 | return pci_register_driver(&pasemi_mac_driver); | 1452 | return pci_register_driver(&pasemi_mac_driver); |
1517 | } | 1453 | } |
1518 | 1454 | ||
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index c61c0110e085..64b2047d19f0 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -27,23 +27,18 @@ | |||
27 | #include <linux/phy.h> | 27 | #include <linux/phy.h> |
28 | 28 | ||
29 | struct pasemi_mac_txring { | 29 | struct pasemi_mac_txring { |
30 | struct pasemi_dmachan chan; /* Must be first */ | ||
30 | spinlock_t lock; | 31 | spinlock_t lock; |
31 | u64 *status; /* Ptr to cacheable status area */ | ||
32 | u64 *ring; | ||
33 | dma_addr_t dma; | ||
34 | unsigned int size; | 32 | unsigned int size; |
35 | unsigned int next_to_fill; | 33 | unsigned int next_to_fill; |
36 | unsigned int next_to_clean; | 34 | unsigned int next_to_clean; |
37 | struct pasemi_mac_buffer *ring_info; | 35 | struct pasemi_mac_buffer *ring_info; |
38 | int chan; | ||
39 | struct pasemi_mac *mac; /* Needed in intr handler */ | 36 | struct pasemi_mac *mac; /* Needed in intr handler */ |
40 | }; | 37 | }; |
41 | 38 | ||
42 | struct pasemi_mac_rxring { | 39 | struct pasemi_mac_rxring { |
40 | struct pasemi_dmachan chan; /* Must be first */ | ||
43 | spinlock_t lock; | 41 | spinlock_t lock; |
44 | u64 *status; /* Ptr to cacheable status area */ | ||
45 | u64 *ring; /* RX channel descriptor ring */ | ||
46 | dma_addr_t dma; | ||
47 | u64 *buffers; /* RX interface buffer ring */ | 42 | u64 *buffers; /* RX interface buffer ring */ |
48 | dma_addr_t buf_dma; | 43 | dma_addr_t buf_dma; |
49 | unsigned int size; | 44 | unsigned int size; |
@@ -55,9 +50,6 @@ struct pasemi_mac_rxring { | |||
55 | 50 | ||
56 | struct pasemi_mac { | 51 | struct pasemi_mac { |
57 | struct net_device *netdev; | 52 | struct net_device *netdev; |
58 | void __iomem *regs; | ||
59 | void __iomem *dma_regs; | ||
60 | void __iomem *iob_regs; | ||
61 | struct pci_dev *pdev; | 53 | struct pci_dev *pdev; |
62 | struct pci_dev *dma_pdev; | 54 | struct pci_dev *dma_pdev; |
63 | struct pci_dev *iob_pdev; | 55 | struct pci_dev *iob_pdev; |
@@ -67,8 +59,6 @@ struct pasemi_mac { | |||
67 | u8 type; | 59 | u8 type; |
68 | #define MAC_TYPE_GMAC 1 | 60 | #define MAC_TYPE_GMAC 1 |
69 | #define MAC_TYPE_XAUI 2 | 61 | #define MAC_TYPE_XAUI 2 |
70 | u32 dma_txch; | ||
71 | u32 dma_rxch; | ||
72 | u32 dma_if; | 62 | u32 dma_if; |
73 | 63 | ||
74 | u8 mac_addr[6]; | 64 | u8 mac_addr[6]; |
@@ -77,8 +67,6 @@ struct pasemi_mac { | |||
77 | 67 | ||
78 | struct pasemi_mac_txring *tx; | 68 | struct pasemi_mac_txring *tx; |
79 | struct pasemi_mac_rxring *rx; | 69 | struct pasemi_mac_rxring *rx; |
80 | unsigned int tx_irq; | ||
81 | unsigned int rx_irq; | ||
82 | char tx_irq_name[10]; /* "eth%d tx" */ | 70 | char tx_irq_name[10]; /* "eth%d tx" */ |
83 | char rx_irq_name[10]; /* "eth%d rx" */ | 71 | char rx_irq_name[10]; /* "eth%d rx" */ |
84 | int link; | 72 | int link; |