aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/rionet.c
diff options
context:
space:
mode:
authorMatt Porter <mporter@kernel.crashing.org>2005-09-09 15:10:10 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-14 09:18:21 -0400
commitf89efd523b25cc1702e074dafdcac283da657002 (patch)
treef33440ed3f4ba3594d6c3316bdcdda420ce11801 /drivers/net/rionet.c
parent43ec6e95e4d8a73afc2405a44b955c380aeeb65a (diff)
[PATCH] Add rapidio net driver
Adds an "Ethernet" driver which sends Ethernet packets over the standard RapidIO messaging. This depends on the core RIO patch for mailbox/doorbell access. Signed-off-by: Matt Porter <mporter@kernel.crashing.org> Cc: Jeff Garzik <jgarzik@pobox.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/rionet.c')
-rw-r--r--drivers/net/rionet.c574
1 files changed, 574 insertions, 0 deletions
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
new file mode 100644
index 000000000000..12cde0604580
--- /dev/null
+++ b/drivers/net/rionet.c
@@ -0,0 +1,574 @@
1/*
2 * rionet - Ethernet driver over RapidIO messaging services
3 *
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/rio.h>
18#include <linux/rio_drv.h>
19#include <linux/rio_ids.h>
20
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/crc32.h>
25#include <linux/ethtool.h>
26
27#define DRV_NAME "rionet"
28#define DRV_VERSION "0.2"
29#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
30#define DRV_DESC "Ethernet over RapidIO"
31
32MODULE_AUTHOR(DRV_AUTHOR);
33MODULE_DESCRIPTION(DRV_DESC);
34MODULE_LICENSE("GPL");
35
36#define RIONET_DEFAULT_MSGLEVEL \
37 (NETIF_MSG_DRV | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_RX_ERR | \
40 NETIF_MSG_TX_ERR)
41
42#define RIONET_DOORBELL_JOIN 0x1000
43#define RIONET_DOORBELL_LEAVE 0x1001
44
45#define RIONET_MAILBOX 0
46
47#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
48#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
49
50static LIST_HEAD(rionet_peers);
51
52struct rionet_private {
53 struct rio_mport *mport;
54 struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
55 struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
56 struct net_device_stats stats;
57 int rx_slot;
58 int tx_slot;
59 int tx_cnt;
60 int ack_slot;
61 spinlock_t lock;
62 spinlock_t tx_lock;
63 u32 msg_enable;
64};
65
66struct rionet_peer {
67 struct list_head node;
68 struct rio_dev *rdev;
69 struct resource *res;
70};
71
72static int rionet_check = 0;
73static int rionet_capable = 1;
74
75/*
76 * This is a fast lookup table for for translating TX
77 * Ethernet packets into a destination RIO device. It
78 * could be made into a hash table to save memory depending
79 * on system trade-offs.
80 */
81static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES];
82
83#define is_rionet_capable(pef, src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL))
88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
93
94static struct net_device_stats *rionet_stats(struct net_device *ndev)
95{
96 struct rionet_private *rnet = ndev->priv;
97 return &rnet->stats;
98}
99
100static int rionet_rx_clean(struct net_device *ndev)
101{
102 int i;
103 int error = 0;
104 struct rionet_private *rnet = ndev->priv;
105 void *data;
106
107 i = rnet->rx_slot;
108
109 do {
110 if (!rnet->rx_skb[i])
111 continue;
112
113 if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
114 break;
115
116 rnet->rx_skb[i]->data = data;
117 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
118 rnet->rx_skb[i]->dev = ndev;
119 rnet->rx_skb[i]->protocol =
120 eth_type_trans(rnet->rx_skb[i], ndev);
121 error = netif_rx(rnet->rx_skb[i]);
122
123 if (error == NET_RX_DROP) {
124 rnet->stats.rx_dropped++;
125 } else if (error == NET_RX_BAD) {
126 if (netif_msg_rx_err(rnet))
127 printk(KERN_WARNING "%s: bad rx packet\n",
128 DRV_NAME);
129 rnet->stats.rx_errors++;
130 } else {
131 rnet->stats.rx_packets++;
132 rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE;
133 }
134
135 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
136
137 return i;
138}
139
140static void rionet_rx_fill(struct net_device *ndev, int end)
141{
142 int i;
143 struct rionet_private *rnet = ndev->priv;
144
145 i = rnet->rx_slot;
146 do {
147 rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
148
149 if (!rnet->rx_skb[i])
150 break;
151
152 rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
153 rnet->rx_skb[i]->data);
154 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
155
156 rnet->rx_slot = i;
157}
158
159static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
160 struct rio_dev *rdev)
161{
162 struct rionet_private *rnet = ndev->priv;
163
164 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
165 rnet->tx_skb[rnet->tx_slot] = skb;
166
167 rnet->stats.tx_packets++;
168 rnet->stats.tx_bytes += skb->len;
169
170 if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
171 netif_stop_queue(ndev);
172
173 ++rnet->tx_slot;
174 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
175
176 if (netif_msg_tx_queued(rnet))
177 printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
178 (u32) skb, skb->len);
179
180 return 0;
181}
182
183static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
184{
185 int i;
186 struct rionet_private *rnet = ndev->priv;
187 struct ethhdr *eth = (struct ethhdr *)skb->data;
188 u16 destid;
189 unsigned long flags;
190
191 local_irq_save(flags);
192 if (!spin_trylock(&rnet->tx_lock)) {
193 local_irq_restore(flags);
194 return NETDEV_TX_LOCKED;
195 }
196
197 if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
198 netif_stop_queue(ndev);
199 spin_unlock_irqrestore(&rnet->tx_lock, flags);
200 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
201 ndev->name);
202 return NETDEV_TX_BUSY;
203 }
204
205 if (eth->h_dest[0] & 0x01) {
206 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++)
207 if (rionet_active[i])
208 rionet_queue_tx_msg(skb, ndev,
209 rionet_active[i]);
210 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
211 destid = RIONET_GET_DESTID(eth->h_dest);
212 if (rionet_active[destid])
213 rionet_queue_tx_msg(skb, ndev, rionet_active[destid]);
214 }
215
216 spin_unlock_irqrestore(&rnet->tx_lock, flags);
217
218 return 0;
219}
220
221static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
222 u16 info)
223{
224 struct net_device *ndev = dev_id;
225 struct rionet_private *rnet = ndev->priv;
226 struct rionet_peer *peer;
227
228 if (netif_msg_intr(rnet))
229 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
230 DRV_NAME, sid, tid, info);
231 if (info == RIONET_DOORBELL_JOIN) {
232 if (!rionet_active[sid]) {
233 list_for_each_entry(peer, &rionet_peers, node) {
234 if (peer->rdev->destid == sid)
235 rionet_active[sid] = peer->rdev;
236 }
237 rio_mport_send_doorbell(mport, sid,
238 RIONET_DOORBELL_JOIN);
239 }
240 } else if (info == RIONET_DOORBELL_LEAVE) {
241 rionet_active[sid] = NULL;
242 } else {
243 if (netif_msg_intr(rnet))
244 printk(KERN_WARNING "%s: unhandled doorbell\n",
245 DRV_NAME);
246 }
247}
248
249static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
250{
251 int n;
252 struct net_device *ndev = dev_id;
253 struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
254
255 if (netif_msg_intr(rnet))
256 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
257 DRV_NAME, mbox, slot);
258
259 spin_lock(&rnet->lock);
260 if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
261 rionet_rx_fill(ndev, n);
262 spin_unlock(&rnet->lock);
263}
264
265static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
266{
267 struct net_device *ndev = dev_id;
268 struct rionet_private *rnet = ndev->priv;
269
270 spin_lock(&rnet->lock);
271
272 if (netif_msg_intr(rnet))
273 printk(KERN_INFO
274 "%s: outbound message event, mbox %d slot %d\n",
275 DRV_NAME, mbox, slot);
276
277 while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
278 /* dma unmap single */
279 dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
280 rnet->tx_skb[rnet->ack_slot] = NULL;
281 ++rnet->ack_slot;
282 rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
283 rnet->tx_cnt--;
284 }
285
286 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
287 netif_wake_queue(ndev);
288
289 spin_unlock(&rnet->lock);
290}
291
292static int rionet_open(struct net_device *ndev)
293{
294 int i, rc = 0;
295 struct rionet_peer *peer, *tmp;
296 u32 pwdcsr;
297 struct rionet_private *rnet = ndev->priv;
298
299 if (netif_msg_ifup(rnet))
300 printk(KERN_INFO "%s: open\n", DRV_NAME);
301
302 if ((rc = rio_request_inb_dbell(rnet->mport,
303 (void *)ndev,
304 RIONET_DOORBELL_JOIN,
305 RIONET_DOORBELL_LEAVE,
306 rionet_dbell_event)) < 0)
307 goto out;
308
309 if ((rc = rio_request_inb_mbox(rnet->mport,
310 (void *)ndev,
311 RIONET_MAILBOX,
312 RIONET_RX_RING_SIZE,
313 rionet_inb_msg_event)) < 0)
314 goto out;
315
316 if ((rc = rio_request_outb_mbox(rnet->mport,
317 (void *)ndev,
318 RIONET_MAILBOX,
319 RIONET_TX_RING_SIZE,
320 rionet_outb_msg_event)) < 0)
321 goto out;
322
323 /* Initialize inbound message ring */
324 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
325 rnet->rx_skb[i] = NULL;
326 rnet->rx_slot = 0;
327 rionet_rx_fill(ndev, 0);
328
329 rnet->tx_slot = 0;
330 rnet->tx_cnt = 0;
331 rnet->ack_slot = 0;
332
333 netif_carrier_on(ndev);
334 netif_start_queue(ndev);
335
336 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
337 if (!(peer->res = rio_request_outb_dbell(peer->rdev,
338 RIONET_DOORBELL_JOIN,
339 RIONET_DOORBELL_LEAVE)))
340 {
341 printk(KERN_ERR "%s: error requesting doorbells\n",
342 DRV_NAME);
343 continue;
344 }
345
346 /*
347 * If device has initialized inbound doorbells,
348 * send a join message
349 */
350 rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
351 if (pwdcsr & RIO_DOORBELL_AVAIL)
352 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
353 }
354
355 out:
356 return rc;
357}
358
359static int rionet_close(struct net_device *ndev)
360{
361 struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
362 struct rionet_peer *peer, *tmp;
363 int i;
364
365 if (netif_msg_ifup(rnet))
366 printk(KERN_INFO "%s: close\n", DRV_NAME);
367
368 netif_stop_queue(ndev);
369 netif_carrier_off(ndev);
370
371 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
372 if (rnet->rx_skb[i])
373 kfree_skb(rnet->rx_skb[i]);
374
375 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
376 if (rionet_active[peer->rdev->destid]) {
377 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
378 rionet_active[peer->rdev->destid] = NULL;
379 }
380 rio_release_outb_dbell(peer->rdev, peer->res);
381 }
382
383 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
384 RIONET_DOORBELL_LEAVE);
385 rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
386 rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
387
388 return 0;
389}
390
391static void rionet_remove(struct rio_dev *rdev)
392{
393 struct net_device *ndev = NULL;
394 struct rionet_peer *peer, *tmp;
395
396 unregister_netdev(ndev);
397 kfree(ndev);
398
399 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
400 list_del(&peer->node);
401 kfree(peer);
402 }
403}
404
405static void rionet_get_drvinfo(struct net_device *ndev,
406 struct ethtool_drvinfo *info)
407{
408 struct rionet_private *rnet = ndev->priv;
409
410 strcpy(info->driver, DRV_NAME);
411 strcpy(info->version, DRV_VERSION);
412 strcpy(info->fw_version, "n/a");
413 strcpy(info->bus_info, rnet->mport->name);
414}
415
416static u32 rionet_get_msglevel(struct net_device *ndev)
417{
418 struct rionet_private *rnet = ndev->priv;
419
420 return rnet->msg_enable;
421}
422
423static void rionet_set_msglevel(struct net_device *ndev, u32 value)
424{
425 struct rionet_private *rnet = ndev->priv;
426
427 rnet->msg_enable = value;
428}
429
430static struct ethtool_ops rionet_ethtool_ops = {
431 .get_drvinfo = rionet_get_drvinfo,
432 .get_msglevel = rionet_get_msglevel,
433 .set_msglevel = rionet_set_msglevel,
434 .get_link = ethtool_op_get_link,
435};
436
437static int rionet_setup_netdev(struct rio_mport *mport)
438{
439 int rc = 0;
440 struct net_device *ndev = NULL;
441 struct rionet_private *rnet;
442 u16 device_id;
443
444 /* Allocate our net_device structure */
445 ndev = alloc_etherdev(sizeof(struct rionet_private));
446 if (ndev == NULL) {
447 printk(KERN_INFO "%s: could not allocate ethernet device.\n",
448 DRV_NAME);
449 rc = -ENOMEM;
450 goto out;
451 }
452
453 /* Set up private area */
454 rnet = (struct rionet_private *)ndev->priv;
455 rnet->mport = mport;
456
457 /* Set the default MAC address */
458 device_id = rio_local_get_device_id(mport);
459 ndev->dev_addr[0] = 0x00;
460 ndev->dev_addr[1] = 0x01;
461 ndev->dev_addr[2] = 0x00;
462 ndev->dev_addr[3] = 0x01;
463 ndev->dev_addr[4] = device_id >> 8;
464 ndev->dev_addr[5] = device_id & 0xff;
465
466 /* Fill in the driver function table */
467 ndev->open = &rionet_open;
468 ndev->hard_start_xmit = &rionet_start_xmit;
469 ndev->stop = &rionet_close;
470 ndev->get_stats = &rionet_stats;
471 ndev->mtu = RIO_MAX_MSG_SIZE - 14;
472 ndev->features = NETIF_F_LLTX;
473 SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
474
475 SET_MODULE_OWNER(ndev);
476
477 spin_lock_init(&rnet->lock);
478 spin_lock_init(&rnet->tx_lock);
479
480 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
481
482 rc = register_netdev(ndev);
483 if (rc != 0)
484 goto out;
485
486 printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
487 ndev->name,
488 DRV_NAME,
489 DRV_DESC,
490 DRV_VERSION,
491 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
492 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
493
494 out:
495 return rc;
496}
497
498/*
499 * XXX Make multi-net safe
500 */
501static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
502{
503 int rc = -ENODEV;
504 u32 lpef, lsrc_ops, ldst_ops;
505 struct rionet_peer *peer;
506
507 /* If local device is not rionet capable, give up quickly */
508 if (!rionet_capable)
509 goto out;
510
511 /*
512 * First time through, make sure local device is rionet
513 * capable, setup netdev, and set flags so this is skipped
514 * on later probes
515 */
516 if (!rionet_check) {
517 rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
518 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
519 &lsrc_ops);
520 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
521 &ldst_ops);
522 if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
523 printk(KERN_ERR
524 "%s: local device is not network capable\n",
525 DRV_NAME);
526 rionet_check = 1;
527 rionet_capable = 0;
528 goto out;
529 }
530
531 rc = rionet_setup_netdev(rdev->net->hport);
532 rionet_check = 1;
533 }
534
535 /*
536 * If the remote device has mailbox/doorbell capabilities,
537 * add it to the peer list.
538 */
539 if (dev_rionet_capable(rdev)) {
540 if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
541 rc = -ENOMEM;
542 goto out;
543 }
544 peer->rdev = rdev;
545 list_add_tail(&peer->node, &rionet_peers);
546 }
547
548 out:
549 return rc;
550}
551
552static struct rio_device_id rionet_id_table[] = {
553 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}
554};
555
556static struct rio_driver rionet_driver = {
557 .name = "rionet",
558 .id_table = rionet_id_table,
559 .probe = rionet_probe,
560 .remove = rionet_remove,
561};
562
563static int __init rionet_init(void)
564{
565 return rio_register_driver(&rionet_driver);
566}
567
568static void __exit rionet_exit(void)
569{
570 rio_unregister_driver(&rionet_driver);
571}
572
573module_init(rionet_init);
574module_exit(rionet_exit);