aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/rapidio/rio_cm.c
diff options
context:
space:
mode:
authorAlexandre Bounine <alexandre.bounine@idt.com>2016-08-02 17:06:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-02 19:35:31 -0400
commitb6e8d4aa1110306378af0f3472a6b85a1f039a16 (patch)
treebcf1c2b15c153284d723c2cc5826401276c7837c /drivers/rapidio/rio_cm.c
parent1730f146604ea426e54938cdbcf87df1047ef0dc (diff)
rapidio: add RapidIO channelized messaging driver
Add channelized messaging driver to support native RapidIO messaging exchange between multiple senders/recipients on devices that use kernel RapidIO subsystem services. This device driver is the result of collaboration within the RapidIO.org Software Task Group (STG) between Texas Instruments, Prodrive Technologies, Nokia Networks, BAE and IDT. Additional input was received from other members of RapidIO.org. The objective was to create a character mode driver interface which exposes messaging capabilities of RapidIO endpoint devices (mports) directly to applications, in a manner that allows the numerous and varied RapidIO implementations to interoperate. This char mode device driver allows user-space applications to setup messaging communication channels using single shared RapidIO messaging mailbox. By default this driver uses RapidIO MBOX_1 (MBOX_0 is reserved for use by RIONET Ethernet emulation driver). [weiyj.lk@gmail.com: rapidio/rio_cm: fix return value check in riocm_init()] Link: http://lkml.kernel.org/r/1469198221-21970-1-git-send-email-alexandre.bounine@idt.com Link: http://lkml.kernel.org/r/1468952862-18056-1-git-send-email-alexandre.bounine@idt.com Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com> Tested-by: Barry Wood <barry.wood@idt.com> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Aurelien Jacquiot <a-jacquiot@ti.com> Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com> Cc: Barry Wood <barry.wood@idt.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/rapidio/rio_cm.c')
-rw-r--r--drivers/rapidio/rio_cm.c2366
1 files changed, 2366 insertions, 0 deletions
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
new file mode 100644
index 000000000000..cecc15a880de
--- /dev/null
+++ b/drivers/rapidio/rio_cm.c
@@ -0,0 +1,2366 @@
1/*
2 * rio_cm - RapidIO Channelized Messaging Driver
3 *
4 * Copyright 2013-2016 Integrated Device Technology, Inc.
5 * Copyright (c) 2015, Prodrive Technologies
6 * Copyright (c) 2015, RapidIO Trade Association
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL,
14 * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE
16 * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/dma-mapping.h>
22#include <linux/delay.h>
23#include <linux/sched.h>
24#include <linux/rio.h>
25#include <linux/rio_drv.h>
26#include <linux/slab.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/cdev.h>
30#include <linux/fs.h>
31#include <linux/poll.h>
32#include <linux/reboot.h>
33#include <linux/bitops.h>
34#include <linux/printk.h>
35#include <linux/rio_cm_cdev.h>
36
37#define DRV_NAME "rio_cm"
38#define DRV_VERSION "1.0.0"
39#define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
40#define DRV_DESC "RapidIO Channelized Messaging Driver"
41#define DEV_NAME "rio_cm"
42
43/* Debug output filtering masks */
44enum {
45 DBG_NONE = 0,
46 DBG_INIT = BIT(0), /* driver init */
47 DBG_EXIT = BIT(1), /* driver exit */
48 DBG_MPORT = BIT(2), /* mport add/remove */
49 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
50 DBG_CHOP = BIT(4), /* channel operations */
51 DBG_WAIT = BIT(5), /* waiting for events */
52 DBG_TX = BIT(6), /* message TX */
53 DBG_TX_EVENT = BIT(7), /* message TX event */
54 DBG_RX_DATA = BIT(8), /* inbound data messages */
55 DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
56 DBG_ALL = ~0,
57};
58
59#ifdef DEBUG
60#define riocm_debug(level, fmt, arg...) \
61 do { \
62 if (DBG_##level & dbg_level) \
63 pr_debug(DRV_NAME ": %s " fmt "\n", \
64 __func__, ##arg); \
65 } while (0)
66#else
67#define riocm_debug(level, fmt, arg...) \
68 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
69#endif
70
71#define riocm_warn(fmt, arg...) \
72 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
73
74#define riocm_error(fmt, arg...) \
75 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
76
77
78static int cmbox = 1;
79module_param(cmbox, int, S_IRUGO);
80MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
81
82static int chstart = 256;
83module_param(chstart, int, S_IRUGO);
84MODULE_PARM_DESC(chstart,
85 "Start channel number for dynamic allocation (default 256)");
86
87#ifdef DEBUG
88static u32 dbg_level = DBG_NONE;
89module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
90MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
91#endif
92
93MODULE_AUTHOR(DRV_AUTHOR);
94MODULE_DESCRIPTION(DRV_DESC);
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
98#define RIOCM_TX_RING_SIZE 128
99#define RIOCM_RX_RING_SIZE 128
100#define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
101
102#define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
103#define RIOCM_CHNUM_AUTO 0
104#define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
105
106enum rio_cm_state {
107 RIO_CM_IDLE,
108 RIO_CM_CONNECT,
109 RIO_CM_CONNECTED,
110 RIO_CM_DISCONNECT,
111 RIO_CM_CHAN_BOUND,
112 RIO_CM_LISTEN,
113 RIO_CM_DESTROYING,
114};
115
116enum rio_cm_pkt_type {
117 RIO_CM_SYS = 0xaa,
118 RIO_CM_CHAN = 0x55,
119};
120
121enum rio_cm_chop {
122 CM_CONN_REQ,
123 CM_CONN_ACK,
124 CM_CONN_CLOSE,
125 CM_DATA_MSG,
126};
127
128struct rio_ch_base_bhdr {
129 u32 src_id;
130 u32 dst_id;
131#define RIO_HDR_LETTER_MASK 0xffff0000
132#define RIO_HDR_MBOX_MASK 0x0000ffff
133 u8 src_mbox;
134 u8 dst_mbox;
135 u8 type;
136} __attribute__((__packed__));
137
138struct rio_ch_chan_hdr {
139 struct rio_ch_base_bhdr bhdr;
140 u8 ch_op;
141 u16 dst_ch;
142 u16 src_ch;
143 u16 msg_len;
144 u16 rsrvd;
145} __attribute__((__packed__));
146
147struct tx_req {
148 struct list_head node;
149 struct rio_dev *rdev;
150 void *buffer;
151 size_t len;
152};
153
154struct cm_dev {
155 struct list_head list;
156 struct rio_mport *mport;
157 void *rx_buf[RIOCM_RX_RING_SIZE];
158 int rx_slots;
159 struct mutex rx_lock;
160
161 void *tx_buf[RIOCM_TX_RING_SIZE];
162 int tx_slot;
163 int tx_cnt;
164 int tx_ack_slot;
165 struct list_head tx_reqs;
166 spinlock_t tx_lock;
167
168 struct list_head peers;
169 u32 npeers;
170 struct workqueue_struct *rx_wq;
171 struct work_struct rx_work;
172};
173
174struct chan_rx_ring {
175 void *buf[RIOCM_RX_RING_SIZE];
176 int head;
177 int tail;
178 int count;
179
180 /* Tracking RX buffers reported to upper level */
181 void *inuse[RIOCM_RX_RING_SIZE];
182 int inuse_cnt;
183};
184
185struct rio_channel {
186 u16 id; /* local channel ID */
187 struct kref ref; /* channel refcount */
188 struct file *filp;
189 struct cm_dev *cmdev; /* associated CM device object */
190 struct rio_dev *rdev; /* remote RapidIO device */
191 enum rio_cm_state state;
192 int error;
193 spinlock_t lock;
194 void *context;
195 u32 loc_destid; /* local destID */
196 u32 rem_destid; /* remote destID */
197 u16 rem_channel; /* remote channel ID */
198 struct list_head accept_queue;
199 struct list_head ch_node;
200 struct completion comp;
201 struct completion comp_close;
202 struct chan_rx_ring rx_ring;
203};
204
205struct cm_peer {
206 struct list_head node;
207 struct rio_dev *rdev;
208};
209
210struct rio_cm_work {
211 struct work_struct work;
212 struct cm_dev *cm;
213 void *data;
214};
215
216struct conn_req {
217 struct list_head node;
218 u32 destid; /* requester destID */
219 u16 chan; /* requester channel ID */
220 struct cm_dev *cmdev;
221};
222
223/*
224 * A channel_dev structure represents a CM_CDEV
225 * @cdev Character device
226 * @dev Associated device object
227 */
228struct channel_dev {
229 struct cdev cdev;
230 struct device *dev;
231};
232
233static struct rio_channel *riocm_ch_alloc(u16 ch_num);
234static void riocm_ch_free(struct kref *ref);
235static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
236 void *buffer, size_t len);
237static int riocm_ch_close(struct rio_channel *ch);
238
239static DEFINE_SPINLOCK(idr_lock);
240static DEFINE_IDR(ch_idr);
241
242static LIST_HEAD(cm_dev_list);
243static DECLARE_RWSEM(rdev_sem);
244
245static struct class *dev_class;
246static unsigned int dev_major;
247static unsigned int dev_minor_base;
248static dev_t dev_number;
249static struct channel_dev riocm_cdev;
250
251#define is_msg_capable(src_ops, dst_ops) \
252 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
253 (dst_ops & RIO_DST_OPS_DATA_MSG))
254#define dev_cm_capable(dev) \
255 is_msg_capable(dev->src_ops, dev->dst_ops)
256
257static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
258{
259 int ret;
260
261 spin_lock_bh(&ch->lock);
262 ret = (ch->state == cmp);
263 spin_unlock_bh(&ch->lock);
264 return ret;
265}
266
267static int riocm_cmp_exch(struct rio_channel *ch,
268 enum rio_cm_state cmp, enum rio_cm_state exch)
269{
270 int ret;
271
272 spin_lock_bh(&ch->lock);
273 ret = (ch->state == cmp);
274 if (ret)
275 ch->state = exch;
276 spin_unlock_bh(&ch->lock);
277 return ret;
278}
279
280static enum rio_cm_state riocm_exch(struct rio_channel *ch,
281 enum rio_cm_state exch)
282{
283 enum rio_cm_state old;
284
285 spin_lock_bh(&ch->lock);
286 old = ch->state;
287 ch->state = exch;
288 spin_unlock_bh(&ch->lock);
289 return old;
290}
291
292static struct rio_channel *riocm_get_channel(u16 nr)
293{
294 struct rio_channel *ch;
295
296 spin_lock_bh(&idr_lock);
297 ch = idr_find(&ch_idr, nr);
298 if (ch)
299 kref_get(&ch->ref);
300 spin_unlock_bh(&idr_lock);
301 return ch;
302}
303
304static void riocm_put_channel(struct rio_channel *ch)
305{
306 kref_put(&ch->ref, riocm_ch_free);
307}
308
309static void *riocm_rx_get_msg(struct cm_dev *cm)
310{
311 void *msg;
312 int i;
313
314 msg = rio_get_inb_message(cm->mport, cmbox);
315 if (msg) {
316 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
317 if (cm->rx_buf[i] == msg) {
318 cm->rx_buf[i] = NULL;
319 cm->rx_slots++;
320 break;
321 }
322 }
323
324 if (i == RIOCM_RX_RING_SIZE)
325 riocm_warn("no record for buffer 0x%p", msg);
326 }
327
328 return msg;
329}
330
331/*
332 * riocm_rx_fill - fills a ring of receive buffers for given cm device
333 * @cm: cm_dev object
334 * @nent: max number of entries to fill
335 *
336 * Returns: none
337 */
338static void riocm_rx_fill(struct cm_dev *cm, int nent)
339{
340 int i;
341
342 if (cm->rx_slots == 0)
343 return;
344
345 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
346 if (cm->rx_buf[i] == NULL) {
347 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
348 if (cm->rx_buf[i] == NULL)
349 break;
350 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
351 cm->rx_slots--;
352 nent--;
353 }
354 }
355}
356
357/*
358 * riocm_rx_free - frees all receive buffers associated with given cm device
359 * @cm: cm_dev object
360 *
361 * Returns: none
362 */
363static void riocm_rx_free(struct cm_dev *cm)
364{
365 int i;
366
367 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
368 if (cm->rx_buf[i] != NULL) {
369 kfree(cm->rx_buf[i]);
370 cm->rx_buf[i] = NULL;
371 }
372 }
373}
374
375/*
376 * riocm_req_handler - connection request handler
377 * @cm: cm_dev object
378 * @req_data: pointer to the request packet
379 *
380 * Returns: 0 if success, or
381 * -EINVAL if channel is not in correct state,
382 * -ENODEV if cannot find a channel with specified ID,
383 * -ENOMEM if unable to allocate memory to store the request
384 */
385static int riocm_req_handler(struct cm_dev *cm, void *req_data)
386{
387 struct rio_channel *ch;
388 struct conn_req *req;
389 struct rio_ch_chan_hdr *hh = req_data;
390 u16 chnum;
391
392 chnum = ntohs(hh->dst_ch);
393
394 ch = riocm_get_channel(chnum);
395
396 if (!ch)
397 return -ENODEV;
398
399 if (ch->state != RIO_CM_LISTEN) {
400 riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
401 riocm_put_channel(ch);
402 return -EINVAL;
403 }
404
405 req = kzalloc(sizeof(*req), GFP_KERNEL);
406 if (!req) {
407 riocm_put_channel(ch);
408 return -ENOMEM;
409 }
410
411 req->destid = ntohl(hh->bhdr.src_id);
412 req->chan = ntohs(hh->src_ch);
413 req->cmdev = cm;
414
415 spin_lock_bh(&ch->lock);
416 list_add_tail(&req->node, &ch->accept_queue);
417 spin_unlock_bh(&ch->lock);
418 complete(&ch->comp);
419 riocm_put_channel(ch);
420
421 return 0;
422}
423
424/*
425 * riocm_resp_handler - response to connection request handler
426 * @resp_data: pointer to the response packet
427 *
428 * Returns: 0 if success, or
429 * -EINVAL if channel is not in correct state,
430 * -ENODEV if cannot find a channel with specified ID,
431 */
432static int riocm_resp_handler(void *resp_data)
433{
434 struct rio_channel *ch;
435 struct rio_ch_chan_hdr *hh = resp_data;
436 u16 chnum;
437
438 chnum = ntohs(hh->dst_ch);
439 ch = riocm_get_channel(chnum);
440 if (!ch)
441 return -ENODEV;
442
443 if (ch->state != RIO_CM_CONNECT) {
444 riocm_put_channel(ch);
445 return -EINVAL;
446 }
447
448 riocm_exch(ch, RIO_CM_CONNECTED);
449 ch->rem_channel = ntohs(hh->src_ch);
450 complete(&ch->comp);
451 riocm_put_channel(ch);
452
453 return 0;
454}
455
456/*
457 * riocm_close_handler - channel close request handler
458 * @req_data: pointer to the request packet
459 *
460 * Returns: 0 if success, or
461 * -ENODEV if cannot find a channel with specified ID,
462 * + error codes returned by riocm_ch_close.
463 */
464static int riocm_close_handler(void *data)
465{
466 struct rio_channel *ch;
467 struct rio_ch_chan_hdr *hh = data;
468 int ret;
469
470 riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
471
472 spin_lock_bh(&idr_lock);
473 ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
474 if (!ch) {
475 spin_unlock_bh(&idr_lock);
476 return -ENODEV;
477 }
478 idr_remove(&ch_idr, ch->id);
479 spin_unlock_bh(&idr_lock);
480
481 riocm_exch(ch, RIO_CM_DISCONNECT);
482
483 ret = riocm_ch_close(ch);
484 if (ret)
485 riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
486
487 return 0;
488}
489
490/*
491 * rio_cm_handler - function that services request (non-data) packets
492 * @cm: cm_dev object
493 * @data: pointer to the packet
494 */
495static void rio_cm_handler(struct cm_dev *cm, void *data)
496{
497 struct rio_ch_chan_hdr *hdr;
498
499 if (!rio_mport_is_running(cm->mport))
500 goto out;
501
502 hdr = data;
503
504 riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
505 hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
506
507 switch (hdr->ch_op) {
508 case CM_CONN_REQ:
509 riocm_req_handler(cm, data);
510 break;
511 case CM_CONN_ACK:
512 riocm_resp_handler(data);
513 break;
514 case CM_CONN_CLOSE:
515 riocm_close_handler(data);
516 break;
517 default:
518 riocm_error("Invalid packet header");
519 break;
520 }
521out:
522 kfree(data);
523}
524
525/*
526 * rio_rx_data_handler - received data packet handler
527 * @cm: cm_dev object
528 * @buf: data packet
529 *
530 * Returns: 0 if success, or
531 * -ENODEV if cannot find a channel with specified ID,
532 * -EIO if channel is not in CONNECTED state,
533 * -ENOMEM if channel RX queue is full (packet discarded)
534 */
535static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
536{
537 struct rio_ch_chan_hdr *hdr;
538 struct rio_channel *ch;
539
540 hdr = buf;
541
542 riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
543
544 ch = riocm_get_channel(ntohs(hdr->dst_ch));
545 if (!ch) {
546 /* Discard data message for non-existing channel */
547 kfree(buf);
548 return -ENODEV;
549 }
550
551 /* Place pointer to the buffer into channel's RX queue */
552 spin_lock(&ch->lock);
553
554 if (ch->state != RIO_CM_CONNECTED) {
555 /* Channel is not ready to receive data, discard a packet */
556 riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
557 ch->id, ch->state);
558 spin_unlock(&ch->lock);
559 kfree(buf);
560 riocm_put_channel(ch);
561 return -EIO;
562 }
563
564 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
565 /* If RX ring is full, discard a packet */
566 riocm_debug(RX_DATA, "ch=%d is full", ch->id);
567 spin_unlock(&ch->lock);
568 kfree(buf);
569 riocm_put_channel(ch);
570 return -ENOMEM;
571 }
572
573 ch->rx_ring.buf[ch->rx_ring.head] = buf;
574 ch->rx_ring.head++;
575 ch->rx_ring.count++;
576 ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
577
578 complete(&ch->comp);
579
580 spin_unlock(&ch->lock);
581 riocm_put_channel(ch);
582
583 return 0;
584}
585
586/*
587 * rio_ibmsg_handler - inbound message packet handler
588 */
589static void rio_ibmsg_handler(struct work_struct *work)
590{
591 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
592 void *data;
593 struct rio_ch_chan_hdr *hdr;
594
595 if (!rio_mport_is_running(cm->mport))
596 return;
597
598 while (1) {
599 mutex_lock(&cm->rx_lock);
600 data = riocm_rx_get_msg(cm);
601 if (data)
602 riocm_rx_fill(cm, 1);
603 mutex_unlock(&cm->rx_lock);
604
605 if (data == NULL)
606 break;
607
608 hdr = data;
609
610 if (hdr->bhdr.type != RIO_CM_CHAN) {
611 /* For now simply discard packets other than channel */
612 riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
613 hdr->bhdr.type);
614 kfree(data);
615 continue;
616 }
617
618 /* Process a channel message */
619 if (hdr->ch_op == CM_DATA_MSG)
620 rio_rx_data_handler(cm, data);
621 else
622 rio_cm_handler(cm, data);
623 }
624}
625
626static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
627 int mbox, int slot)
628{
629 struct cm_dev *cm = dev_id;
630
631 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
632 queue_work(cm->rx_wq, &cm->rx_work);
633}
634
635/*
636 * rio_txcq_handler - TX completion handler
637 * @cm: cm_dev object
638 * @slot: TX queue slot
639 *
640 * TX completion handler also ensures that pending request packets are placed
641 * into transmit queue as soon as a free slot becomes available. This is done
642 * to give higher priority to request packets during high intensity data flow.
643 */
644static void rio_txcq_handler(struct cm_dev *cm, int slot)
645{
646 int ack_slot;
647
648 /* ATTN: Add TX completion notification if/when direct buffer
649 * transfer is implemented. At this moment only correct tracking
650 * of tx_count is important.
651 */
652 riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
653 cm->mport->id, slot, cm->tx_cnt);
654
655 spin_lock(&cm->tx_lock);
656 ack_slot = cm->tx_ack_slot;
657
658 if (ack_slot == slot)
659 riocm_debug(TX_EVENT, "slot == ack_slot");
660
661 while (cm->tx_cnt && ((ack_slot != slot) ||
662 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
663
664 cm->tx_buf[ack_slot] = NULL;
665 ++ack_slot;
666 ack_slot &= (RIOCM_TX_RING_SIZE - 1);
667 cm->tx_cnt--;
668 }
669
670 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
671 riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
672
673 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
674
675 cm->tx_ack_slot = ack_slot;
676
677 /*
678 * If there are pending requests, insert them into transmit queue
679 */
680 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
681 struct tx_req *req, *_req;
682 int rc;
683
684 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
685 list_del(&req->node);
686 cm->tx_buf[cm->tx_slot] = req->buffer;
687 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
688 req->buffer, req->len);
689 kfree(req->buffer);
690 kfree(req);
691
692 ++cm->tx_cnt;
693 ++cm->tx_slot;
694 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
695 if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
696 break;
697 }
698 }
699
700 spin_unlock(&cm->tx_lock);
701}
702
703static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
704 int mbox, int slot)
705{
706 struct cm_dev *cm = dev_id;
707
708 if (cm && rio_mport_is_running(cm->mport))
709 rio_txcq_handler(cm, slot);
710}
711
712static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
713 void *buffer, size_t len)
714{
715 unsigned long flags;
716 struct tx_req *treq;
717
718 treq = kzalloc(sizeof(*treq), GFP_KERNEL);
719 if (treq == NULL)
720 return -ENOMEM;
721
722 treq->rdev = rdev;
723 treq->buffer = buffer;
724 treq->len = len;
725
726 spin_lock_irqsave(&cm->tx_lock, flags);
727 list_add_tail(&treq->node, &cm->tx_reqs);
728 spin_unlock_irqrestore(&cm->tx_lock, flags);
729 return 0;
730}
731
732/*
733 * riocm_post_send - helper function that places packet into msg TX queue
734 * @cm: cm_dev object
735 * @rdev: target RapidIO device object (required by outbound msg interface)
736 * @buffer: pointer to a packet buffer to send
737 * @len: length of data to transfer
738 * @req: request priority flag
739 *
740 * Returns: 0 if success, or error code otherwise.
741 */
742static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
743 void *buffer, size_t len)
744{
745 int rc;
746 unsigned long flags;
747
748 spin_lock_irqsave(&cm->tx_lock, flags);
749
750 if (cm->mport == NULL) {
751 rc = -ENODEV;
752 goto err_out;
753 }
754
755 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
756 riocm_debug(TX, "Tx Queue is full");
757 rc = -EBUSY;
758 goto err_out;
759 }
760
761 cm->tx_buf[cm->tx_slot] = buffer;
762 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
763
764 riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
765 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
766
767 ++cm->tx_cnt;
768 ++cm->tx_slot;
769 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
770
771err_out:
772 spin_unlock_irqrestore(&cm->tx_lock, flags);
773 return rc;
774}
775
776/*
777 * riocm_ch_send - sends a data packet to a remote device
778 * @ch_id: local channel ID
779 * @buf: pointer to a data buffer to send (including CM header)
780 * @len: length of data to transfer (including CM header)
781 *
782 * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
783 *
784 * Returns: 0 if success, or
785 * -EINVAL if one or more input parameters is/are not valid,
786 * -ENODEV if cannot find a channel with specified ID,
787 * -EAGAIN if a channel is not in CONNECTED state,
788 * + error codes returned by HW send routine.
789 */
790static int riocm_ch_send(u16 ch_id, void *buf, int len)
791{
792 struct rio_channel *ch;
793 struct rio_ch_chan_hdr *hdr;
794 int ret;
795
796 if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
797 return -EINVAL;
798
799 ch = riocm_get_channel(ch_id);
800 if (!ch) {
801 riocm_error("%s(%d) ch_%d not found", current->comm,
802 task_pid_nr(current), ch_id);
803 return -ENODEV;
804 }
805
806 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
807 ret = -EAGAIN;
808 goto err_out;
809 }
810
811 /*
812 * Fill buffer header section with corresponding channel data
813 */
814 hdr = buf;
815
816 hdr->bhdr.src_id = htonl(ch->loc_destid);
817 hdr->bhdr.dst_id = htonl(ch->rem_destid);
818 hdr->bhdr.src_mbox = cmbox;
819 hdr->bhdr.dst_mbox = cmbox;
820 hdr->bhdr.type = RIO_CM_CHAN;
821 hdr->ch_op = CM_DATA_MSG;
822 hdr->dst_ch = htons(ch->rem_channel);
823 hdr->src_ch = htons(ch->id);
824 hdr->msg_len = htons((u16)len);
825
826 /* ATTN: the function call below relies on the fact that underlying
827 * HW-specific add_outb_message() routine copies TX data into its own
828 * internal transfer buffer (true for all RIONET compatible mport
829 * drivers). Must be reviewed if mport driver uses the buffer directly.
830 */
831
832 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
833 if (ret)
834 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
835err_out:
836 riocm_put_channel(ch);
837 return ret;
838}
839
840static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
841{
842 int i, ret = -EINVAL;
843
844 spin_lock_bh(&ch->lock);
845
846 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
847 if (ch->rx_ring.inuse[i] == buf) {
848 ch->rx_ring.inuse[i] = NULL;
849 ch->rx_ring.inuse_cnt--;
850 ret = 0;
851 break;
852 }
853 }
854
855 spin_unlock_bh(&ch->lock);
856
857 if (!ret)
858 kfree(buf);
859
860 return ret;
861}
862
863/*
864 * riocm_ch_receive - fetch a data packet received for the specified channel
865 * @ch: local channel ID
866 * @buf: pointer to a packet buffer
867 * @timeout: timeout to wait for incoming packet (in jiffies)
868 *
869 * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
870 * -EAGAIN if a channel is not in CONNECTED state,
871 * -ENOMEM if in-use tracking queue is full,
872 * -ETIME if wait timeout expired,
873 * -EINTR if wait was interrupted.
874 */
875static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
876{
877 void *rxmsg = NULL;
878 int i, ret = 0;
879 long wret;
880
881 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
882 ret = -EAGAIN;
883 goto out;
884 }
885
886 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
887 /* If we do not have entries to track buffers given to upper
888 * layer, reject request.
889 */
890 ret = -ENOMEM;
891 goto out;
892 }
893
894 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
895
896 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
897
898 if (!wret)
899 ret = -ETIME;
900 else if (wret == -ERESTARTSYS)
901 ret = -EINTR;
902 else
903 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
904
905 if (ret)
906 goto out;
907
908 spin_lock_bh(&ch->lock);
909
910 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
911 ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
912 ch->rx_ring.count--;
913 ch->rx_ring.tail++;
914 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
915 ret = -ENOMEM;
916
917 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
918 if (ch->rx_ring.inuse[i] == NULL) {
919 ch->rx_ring.inuse[i] = rxmsg;
920 ch->rx_ring.inuse_cnt++;
921 ret = 0;
922 break;
923 }
924 }
925
926 if (ret) {
927 /* We have no entry to store pending message: drop it */
928 kfree(rxmsg);
929 rxmsg = NULL;
930 }
931
932 spin_unlock_bh(&ch->lock);
933out:
934 *buf = rxmsg;
935 return ret;
936}
937
938/*
939 * riocm_ch_connect - sends a connect request to a remote device
940 * @loc_ch: local channel ID
941 * @cm: CM device to send connect request
942 * @peer: target RapidIO device
943 * @rem_ch: remote channel ID
944 *
945 * Returns: 0 if success, or
946 * -EINVAL if the channel is not in IDLE state,
947 * -EAGAIN if no connection request available immediately,
948 * -ETIME if ACK response timeout expired,
949 * -EINTR if wait for response was interrupted.
950 */
951static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
952 struct cm_peer *peer, u16 rem_ch)
953{
954 struct rio_channel *ch = NULL;
955 struct rio_ch_chan_hdr *hdr;
956 int ret;
957 long wret;
958
959 ch = riocm_get_channel(loc_ch);
960 if (!ch)
961 return -ENODEV;
962
963 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
964 ret = -EINVAL;
965 goto conn_done;
966 }
967
968 ch->cmdev = cm;
969 ch->rdev = peer->rdev;
970 ch->context = NULL;
971 ch->loc_destid = cm->mport->host_deviceid;
972 ch->rem_channel = rem_ch;
973
974 /*
975 * Send connect request to the remote RapidIO device
976 */
977
978 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
979 if (hdr == NULL) {
980 ret = -ENOMEM;
981 goto conn_done;
982 }
983
984 hdr->bhdr.src_id = htonl(ch->loc_destid);
985 hdr->bhdr.dst_id = htonl(peer->rdev->destid);
986 hdr->bhdr.src_mbox = cmbox;
987 hdr->bhdr.dst_mbox = cmbox;
988 hdr->bhdr.type = RIO_CM_CHAN;
989 hdr->ch_op = CM_CONN_REQ;
990 hdr->dst_ch = htons(rem_ch);
991 hdr->src_ch = htons(loc_ch);
992
993 /* ATTN: the function call below relies on the fact that underlying
994 * HW-specific add_outb_message() routine copies TX data into its
995 * internal transfer buffer. Must be reviewed if mport driver uses
996 * this buffer directly.
997 */
998 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
999
1000 if (ret != -EBUSY) {
1001 kfree(hdr);
1002 } else {
1003 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
1004 if (ret)
1005 kfree(hdr);
1006 }
1007
1008 if (ret) {
1009 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
1010 goto conn_done;
1011 }
1012
1013 /* Wait for connect response from the remote device */
1014 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1015 RIOCM_CONNECT_TO * HZ);
1016 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1017
1018 if (!wret)
1019 ret = -ETIME;
1020 else if (wret == -ERESTARTSYS)
1021 ret = -EINTR;
1022 else
1023 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
1024
1025conn_done:
1026 riocm_put_channel(ch);
1027 return ret;
1028}
1029
1030static int riocm_send_ack(struct rio_channel *ch)
1031{
1032 struct rio_ch_chan_hdr *hdr;
1033 int ret;
1034
1035 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1036 if (hdr == NULL)
1037 return -ENOMEM;
1038
1039 hdr->bhdr.src_id = htonl(ch->loc_destid);
1040 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1041 hdr->dst_ch = htons(ch->rem_channel);
1042 hdr->src_ch = htons(ch->id);
1043 hdr->bhdr.src_mbox = cmbox;
1044 hdr->bhdr.dst_mbox = cmbox;
1045 hdr->bhdr.type = RIO_CM_CHAN;
1046 hdr->ch_op = CM_CONN_ACK;
1047
1048 /* ATTN: the function call below relies on the fact that underlying
1049 * add_outb_message() routine copies TX data into its internal transfer
1050 * buffer. Review if switching to direct buffer version.
1051 */
1052 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1053
1054 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
1055 ch->rdev, hdr, sizeof(*hdr)))
1056 return 0;
1057 kfree(hdr);
1058
1059 if (ret)
1060 riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
1061 ch->id, rio_name(ch->rdev), ret);
1062 return ret;
1063}
1064
1065/*
1066 * riocm_ch_accept - accept incoming connection request
1067 * @ch_id: channel ID
1068 * @new_ch_id: local mport device
1069 * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
1070 * request is not available).
1071 *
1072 * Returns: pointer to new channel struct if success, or error-valued pointer:
1073 * -ENODEV - cannot find specified channel or mport,
1074 * -EINVAL - the channel is not in IDLE state,
1075 * -EAGAIN - no connection request available immediately (timeout=0),
1076 * -ENOMEM - unable to allocate new channel,
1077 * -ETIME - wait timeout expired,
1078 * -EINTR - wait was interrupted.
1079 */
1080static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1081 long timeout)
1082{
1083 struct rio_channel *ch = NULL;
1084 struct rio_channel *new_ch = NULL;
1085 struct conn_req *req;
1086 struct cm_peer *peer;
1087 int found = 0;
1088 int err = 0;
1089 long wret;
1090
1091 ch = riocm_get_channel(ch_id);
1092 if (!ch)
1093 return ERR_PTR(-EINVAL);
1094
1095 if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
1096 err = -EINVAL;
1097 goto err_put;
1098 }
1099
1100 /* Don't sleep if this is a non blocking call */
1101 if (!timeout) {
1102 if (!try_wait_for_completion(&ch->comp)) {
1103 err = -EAGAIN;
1104 goto err_put;
1105 }
1106 } else {
1107 riocm_debug(WAIT, "on %d", ch->id);
1108
1109 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1110 timeout);
1111 if (!wret) {
1112 err = -ETIME;
1113 goto err_put;
1114 } else if (wret == -ERESTARTSYS) {
1115 err = -EINTR;
1116 goto err_put;
1117 }
1118 }
1119
1120 spin_lock_bh(&ch->lock);
1121
1122 if (ch->state != RIO_CM_LISTEN) {
1123 err = -ECANCELED;
1124 } else if (list_empty(&ch->accept_queue)) {
1125 riocm_debug(WAIT, "on %d accept_queue is empty on completion",
1126 ch->id);
1127 err = -EIO;
1128 }
1129
1130 spin_unlock_bh(&ch->lock);
1131
1132 if (err) {
1133 riocm_debug(WAIT, "on %d returns %d", ch->id, err);
1134 goto err_put;
1135 }
1136
1137 /* Create new channel for this connection */
1138 new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
1139
1140 if (IS_ERR(new_ch)) {
1141 riocm_error("failed to get channel for new req (%ld)",
1142 PTR_ERR(new_ch));
1143 err = -ENOMEM;
1144 goto err_put;
1145 }
1146
1147 spin_lock_bh(&ch->lock);
1148
1149 req = list_first_entry(&ch->accept_queue, struct conn_req, node);
1150 list_del(&req->node);
1151 new_ch->cmdev = ch->cmdev;
1152 new_ch->loc_destid = ch->loc_destid;
1153 new_ch->rem_destid = req->destid;
1154 new_ch->rem_channel = req->chan;
1155
1156 spin_unlock_bh(&ch->lock);
1157 riocm_put_channel(ch);
1158 kfree(req);
1159
1160 down_read(&rdev_sem);
1161 /* Find requester's device object */
1162 list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
1163 if (peer->rdev->destid == new_ch->rem_destid) {
1164 riocm_debug(RX_CMD, "found matching device(%s)",
1165 rio_name(peer->rdev));
1166 found = 1;
1167 break;
1168 }
1169 }
1170 up_read(&rdev_sem);
1171
1172 if (!found) {
1173 /* If peer device object not found, simply ignore the request */
1174 err = -ENODEV;
1175 goto err_nodev;
1176 }
1177
1178 new_ch->rdev = peer->rdev;
1179 new_ch->state = RIO_CM_CONNECTED;
1180 spin_lock_init(&new_ch->lock);
1181
1182 /* Acknowledge the connection request. */
1183 riocm_send_ack(new_ch);
1184
1185 *new_ch_id = new_ch->id;
1186 return new_ch;
1187err_put:
1188 riocm_put_channel(ch);
1189err_nodev:
1190 if (new_ch) {
1191 spin_lock_bh(&idr_lock);
1192 idr_remove(&ch_idr, new_ch->id);
1193 spin_unlock_bh(&idr_lock);
1194 riocm_put_channel(new_ch);
1195 }
1196 *new_ch_id = 0;
1197 return ERR_PTR(err);
1198}
1199
1200/*
1201 * riocm_ch_listen - puts a channel into LISTEN state
1202 * @ch_id: channel ID
1203 *
1204 * Returns: 0 if success, or
1205 * -EINVAL if the specified channel does not exists or
1206 * is not in CHAN_BOUND state.
1207 */
1208static int riocm_ch_listen(u16 ch_id)
1209{
1210 struct rio_channel *ch = NULL;
1211 int ret = 0;
1212
1213 riocm_debug(CHOP, "(ch_%d)", ch_id);
1214
1215 ch = riocm_get_channel(ch_id);
1216 if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
1217 ret = -EINVAL;
1218 riocm_put_channel(ch);
1219 return ret;
1220}
1221
1222/*
1223 * riocm_ch_bind - associate a channel object and an mport device
1224 * @ch_id: channel ID
1225 * @mport_id: local mport device ID
1226 * @context: pointer to the additional caller's context
1227 *
1228 * Returns: 0 if success, or
1229 * -ENODEV if cannot find specified mport,
1230 * -EINVAL if the specified channel does not exist or
1231 * is not in IDLE state.
1232 */
1233static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
1234{
1235 struct rio_channel *ch = NULL;
1236 struct cm_dev *cm;
1237 int rc = -ENODEV;
1238
1239 riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
1240
1241 /* Find matching cm_dev object */
1242 down_read(&rdev_sem);
1243 list_for_each_entry(cm, &cm_dev_list, list) {
1244 if ((cm->mport->id == mport_id) &&
1245 rio_mport_is_running(cm->mport)) {
1246 rc = 0;
1247 break;
1248 }
1249 }
1250
1251 if (rc)
1252 goto exit;
1253
1254 ch = riocm_get_channel(ch_id);
1255 if (!ch) {
1256 rc = -EINVAL;
1257 goto exit;
1258 }
1259
1260 spin_lock_bh(&ch->lock);
1261 if (ch->state != RIO_CM_IDLE) {
1262 spin_unlock_bh(&ch->lock);
1263 rc = -EINVAL;
1264 goto err_put;
1265 }
1266
1267 ch->cmdev = cm;
1268 ch->loc_destid = cm->mport->host_deviceid;
1269 ch->context = context;
1270 ch->state = RIO_CM_CHAN_BOUND;
1271 spin_unlock_bh(&ch->lock);
1272err_put:
1273 riocm_put_channel(ch);
1274exit:
1275 up_read(&rdev_sem);
1276 return rc;
1277}
1278
1279/*
1280 * riocm_ch_alloc - channel object allocation helper routine
1281 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1282 *
1283 * Return value: pointer to newly created channel object,
1284 * or error-valued pointer
1285 */
1286static struct rio_channel *riocm_ch_alloc(u16 ch_num)
1287{
1288 int id;
1289 int start, end;
1290 struct rio_channel *ch;
1291
1292 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
1293 if (!ch)
1294 return ERR_PTR(-ENOMEM);
1295
1296 if (ch_num) {
1297 /* If requested, try to obtain the specified channel ID */
1298 start = ch_num;
1299 end = ch_num + 1;
1300 } else {
1301 /* Obtain channel ID from the dynamic allocation range */
1302 start = chstart;
1303 end = RIOCM_MAX_CHNUM + 1;
1304 }
1305
1306 idr_preload(GFP_KERNEL);
1307 spin_lock_bh(&idr_lock);
1308 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
1309 spin_unlock_bh(&idr_lock);
1310 idr_preload_end();
1311
1312 if (id < 0) {
1313 kfree(ch);
1314 return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
1315 }
1316
1317 ch->id = (u16)id;
1318 ch->state = RIO_CM_IDLE;
1319 spin_lock_init(&ch->lock);
1320 INIT_LIST_HEAD(&ch->accept_queue);
1321 INIT_LIST_HEAD(&ch->ch_node);
1322 init_completion(&ch->comp);
1323 init_completion(&ch->comp_close);
1324 kref_init(&ch->ref);
1325 ch->rx_ring.head = 0;
1326 ch->rx_ring.tail = 0;
1327 ch->rx_ring.count = 0;
1328 ch->rx_ring.inuse_cnt = 0;
1329
1330 return ch;
1331}
1332
1333/*
1334 * riocm_ch_create - creates a new channel object and allocates ID for it
1335 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1336 *
1337 * Allocates and initializes a new channel object. If the parameter ch_num > 0
1338 * and is within the valid range, riocm_ch_create tries to allocate the
1339 * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
1340 * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
1341 * Module parameter 'chstart' defines start of an ID range available for dynamic
1342 * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
1343 * Available channel numbers are limited by 16-bit size of channel numbers used
1344 * in the packet header.
1345 *
1346 * Return value: PTR to rio_channel structure if successful (with channel number
1347 * updated via pointer) or error-valued pointer if error.
1348 */
1349static struct rio_channel *riocm_ch_create(u16 *ch_num)
1350{
1351 struct rio_channel *ch = NULL;
1352
1353 ch = riocm_ch_alloc(*ch_num);
1354
1355 if (IS_ERR(ch))
1356 riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
1357 *ch_num, PTR_ERR(ch));
1358 else
1359 *ch_num = ch->id;
1360
1361 return ch;
1362}
1363
1364/*
1365 * riocm_ch_free - channel object release routine
1366 * @ref: pointer to a channel's kref structure
1367 */
1368static void riocm_ch_free(struct kref *ref)
1369{
1370 struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
1371 int i;
1372
1373 riocm_debug(CHOP, "(ch_%d)", ch->id);
1374
1375 if (ch->rx_ring.inuse_cnt) {
1376 for (i = 0;
1377 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
1378 if (ch->rx_ring.inuse[i] != NULL) {
1379 kfree(ch->rx_ring.inuse[i]);
1380 ch->rx_ring.inuse_cnt--;
1381 }
1382 }
1383 }
1384
1385 if (ch->rx_ring.count)
1386 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
1387 if (ch->rx_ring.buf[i] != NULL) {
1388 kfree(ch->rx_ring.buf[i]);
1389 ch->rx_ring.count--;
1390 }
1391 }
1392
1393 complete(&ch->comp_close);
1394}
1395
1396static int riocm_send_close(struct rio_channel *ch)
1397{
1398 struct rio_ch_chan_hdr *hdr;
1399 int ret;
1400
1401 /*
1402 * Send CH_CLOSE notification to the remote RapidIO device
1403 */
1404
1405 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1406 if (hdr == NULL)
1407 return -ENOMEM;
1408
1409 hdr->bhdr.src_id = htonl(ch->loc_destid);
1410 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1411 hdr->bhdr.src_mbox = cmbox;
1412 hdr->bhdr.dst_mbox = cmbox;
1413 hdr->bhdr.type = RIO_CM_CHAN;
1414 hdr->ch_op = CM_CONN_CLOSE;
1415 hdr->dst_ch = htons(ch->rem_channel);
1416 hdr->src_ch = htons(ch->id);
1417
1418 /* ATTN: the function call below relies on the fact that underlying
1419 * add_outb_message() routine copies TX data into its internal transfer
1420 * buffer. Needs to be reviewed if switched to direct buffer mode.
1421 */
1422 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1423
1424 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
1425 hdr, sizeof(*hdr)))
1426 return 0;
1427 kfree(hdr);
1428
1429 if (ret)
1430 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
1431
1432 return ret;
1433}
1434
1435/*
1436 * riocm_ch_close - closes a channel object with specified ID (by local request)
1437 * @ch: channel to be closed
1438 */
1439static int riocm_ch_close(struct rio_channel *ch)
1440{
1441 unsigned long tmo = msecs_to_jiffies(3000);
1442 enum rio_cm_state state;
1443 long wret;
1444 int ret = 0;
1445
1446 riocm_debug(CHOP, "ch_%d by %s(%d)",
1447 ch->id, current->comm, task_pid_nr(current));
1448
1449 state = riocm_exch(ch, RIO_CM_DESTROYING);
1450 if (state == RIO_CM_CONNECTED)
1451 riocm_send_close(ch);
1452
1453 complete_all(&ch->comp);
1454
1455 riocm_put_channel(ch);
1456 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
1457
1458 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1459
1460 if (wret == 0) {
1461 /* Timeout on wait occurred */
1462 riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
1463 current->comm, task_pid_nr(current), ch->id);
1464 ret = -ETIMEDOUT;
1465 } else if (wret == -ERESTARTSYS) {
1466 /* Wait_for_completion was interrupted by a signal */
1467 riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
1468 current->comm, task_pid_nr(current), ch->id);
1469 ret = -EINTR;
1470 }
1471
1472 if (!ret) {
1473 riocm_debug(CHOP, "ch_%d resources released", ch->id);
1474 kfree(ch);
1475 } else {
1476 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
1477 }
1478
1479 return ret;
1480}
1481
1482/*
1483 * riocm_cdev_open() - Open character device
1484 */
1485static int riocm_cdev_open(struct inode *inode, struct file *filp)
1486{
1487 riocm_debug(INIT, "by %s(%d) filp=%p ",
1488 current->comm, task_pid_nr(current), filp);
1489
1490 if (list_empty(&cm_dev_list))
1491 return -ENODEV;
1492
1493 return 0;
1494}
1495
1496/*
1497 * riocm_cdev_release() - Release character device
1498 */
1499static int riocm_cdev_release(struct inode *inode, struct file *filp)
1500{
1501 struct rio_channel *ch, *_c;
1502 unsigned int i;
1503 LIST_HEAD(list);
1504
1505 riocm_debug(EXIT, "by %s(%d) filp=%p",
1506 current->comm, task_pid_nr(current), filp);
1507
1508 /* Check if there are channels associated with this file descriptor */
1509 spin_lock_bh(&idr_lock);
1510 idr_for_each_entry(&ch_idr, ch, i) {
1511 if (ch && ch->filp == filp) {
1512 riocm_debug(EXIT, "ch_%d not released by %s(%d)",
1513 ch->id, current->comm,
1514 task_pid_nr(current));
1515 idr_remove(&ch_idr, ch->id);
1516 list_add(&ch->ch_node, &list);
1517 }
1518 }
1519 spin_unlock_bh(&idr_lock);
1520
1521 if (!list_empty(&list)) {
1522 list_for_each_entry_safe(ch, _c, &list, ch_node) {
1523 list_del(&ch->ch_node);
1524 riocm_ch_close(ch);
1525 }
1526 }
1527
1528 return 0;
1529}
1530
1531/*
1532 * cm_ep_get_list_size() - Reports number of endpoints in the network
1533 */
1534static int cm_ep_get_list_size(void __user *arg)
1535{
1536 u32 __user *p = arg;
1537 u32 mport_id;
1538 u32 count = 0;
1539 struct cm_dev *cm;
1540
1541 if (get_user(mport_id, p))
1542 return -EFAULT;
1543 if (mport_id >= RIO_MAX_MPORTS)
1544 return -EINVAL;
1545
1546 /* Find a matching cm_dev object */
1547 down_read(&rdev_sem);
1548 list_for_each_entry(cm, &cm_dev_list, list) {
1549 if (cm->mport->id == mport_id) {
1550 count = cm->npeers;
1551 up_read(&rdev_sem);
1552 if (copy_to_user(arg, &count, sizeof(u32)))
1553 return -EFAULT;
1554 return 0;
1555 }
1556 }
1557 up_read(&rdev_sem);
1558
1559 return -ENODEV;
1560}
1561
1562/*
1563 * cm_ep_get_list() - Returns list of attached endpoints
1564 */
1565static int cm_ep_get_list(void __user *arg)
1566{
1567 struct cm_dev *cm;
1568 struct cm_peer *peer;
1569 u32 info[2];
1570 void *buf;
1571 u32 nent;
1572 u32 *entry_ptr;
1573 u32 i = 0;
1574 int ret = 0;
1575
1576 if (copy_from_user(&info, arg, sizeof(info)))
1577 return -EFAULT;
1578
1579 if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
1580 return -EINVAL;
1581
1582 /* Find a matching cm_dev object */
1583 down_read(&rdev_sem);
1584 list_for_each_entry(cm, &cm_dev_list, list)
1585 if (cm->mport->id == (u8)info[1])
1586 goto found;
1587
1588 up_read(&rdev_sem);
1589 return -ENODEV;
1590
1591found:
1592 nent = min(info[0], cm->npeers);
1593 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
1594 if (!buf) {
1595 up_read(&rdev_sem);
1596 return -ENOMEM;
1597 }
1598
1599 entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
1600
1601 list_for_each_entry(peer, &cm->peers, node) {
1602 *entry_ptr = (u32)peer->rdev->destid;
1603 entry_ptr++;
1604 if (++i == nent)
1605 break;
1606 }
1607 up_read(&rdev_sem);
1608
1609 ((u32 *)buf)[0] = i; /* report an updated number of entries */
1610 ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
1611 if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
1612 ret = -EFAULT;
1613
1614 kfree(buf);
1615 return ret;
1616}
1617
1618/*
1619 * cm_mport_get_list() - Returns list of available local mport devices
1620 */
1621static int cm_mport_get_list(void __user *arg)
1622{
1623 int ret = 0;
1624 u32 entries;
1625 void *buf;
1626 struct cm_dev *cm;
1627 u32 *entry_ptr;
1628 int count = 0;
1629
1630 if (copy_from_user(&entries, arg, sizeof(entries)))
1631 return -EFAULT;
1632 if (entries == 0 || entries > RIO_MAX_MPORTS)
1633 return -EINVAL;
1634 buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
1635 if (!buf)
1636 return -ENOMEM;
1637
1638 /* Scan all registered cm_dev objects */
1639 entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
1640 down_read(&rdev_sem);
1641 list_for_each_entry(cm, &cm_dev_list, list) {
1642 if (count++ < entries) {
1643 *entry_ptr = (cm->mport->id << 16) |
1644 cm->mport->host_deviceid;
1645 entry_ptr++;
1646 }
1647 }
1648 up_read(&rdev_sem);
1649
1650 *((u32 *)buf) = count; /* report a real number of entries */
1651 if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
1652 ret = -EFAULT;
1653
1654 kfree(buf);
1655 return ret;
1656}
1657
1658/*
1659 * cm_chan_create() - Create a message exchange channel
1660 */
1661static int cm_chan_create(struct file *filp, void __user *arg)
1662{
1663 u16 __user *p = arg;
1664 u16 ch_num;
1665 struct rio_channel *ch;
1666
1667 if (get_user(ch_num, p))
1668 return -EFAULT;
1669
1670 riocm_debug(CHOP, "ch_%d requested by %s(%d)",
1671 ch_num, current->comm, task_pid_nr(current));
1672 ch = riocm_ch_create(&ch_num);
1673 if (IS_ERR(ch))
1674 return PTR_ERR(ch);
1675
1676 ch->filp = filp;
1677 riocm_debug(CHOP, "ch_%d created by %s(%d)",
1678 ch_num, current->comm, task_pid_nr(current));
1679 return put_user(ch_num, p);
1680}
1681
1682/*
1683 * cm_chan_close() - Close channel
1684 * @filp: Pointer to file object
1685 * @arg: Channel to close
1686 */
1687static int cm_chan_close(struct file *filp, void __user *arg)
1688{
1689 u16 __user *p = arg;
1690 u16 ch_num;
1691 struct rio_channel *ch;
1692
1693 if (get_user(ch_num, p))
1694 return -EFAULT;
1695
1696 riocm_debug(CHOP, "ch_%d by %s(%d)",
1697 ch_num, current->comm, task_pid_nr(current));
1698
1699 spin_lock_bh(&idr_lock);
1700 ch = idr_find(&ch_idr, ch_num);
1701 if (!ch) {
1702 spin_unlock_bh(&idr_lock);
1703 return 0;
1704 }
1705 if (ch->filp != filp) {
1706 spin_unlock_bh(&idr_lock);
1707 return -EINVAL;
1708 }
1709 idr_remove(&ch_idr, ch->id);
1710 spin_unlock_bh(&idr_lock);
1711
1712 return riocm_ch_close(ch);
1713}
1714
1715/*
1716 * cm_chan_bind() - Bind channel
1717 * @arg: Channel number
1718 */
1719static int cm_chan_bind(void __user *arg)
1720{
1721 struct rio_cm_channel chan;
1722
1723 if (copy_from_user(&chan, arg, sizeof(chan)))
1724 return -EFAULT;
1725 if (chan.mport_id >= RIO_MAX_MPORTS)
1726 return -EINVAL;
1727
1728 return riocm_ch_bind(chan.id, chan.mport_id, NULL);
1729}
1730
1731/*
1732 * cm_chan_listen() - Listen on channel
1733 * @arg: Channel number
1734 */
1735static int cm_chan_listen(void __user *arg)
1736{
1737 u16 __user *p = arg;
1738 u16 ch_num;
1739
1740 if (get_user(ch_num, p))
1741 return -EFAULT;
1742
1743 return riocm_ch_listen(ch_num);
1744}
1745
1746/*
1747 * cm_chan_accept() - Accept incoming connection
1748 * @filp: Pointer to file object
1749 * @arg: Channel number
1750 */
1751static int cm_chan_accept(struct file *filp, void __user *arg)
1752{
1753 struct rio_cm_accept param;
1754 long accept_to;
1755 struct rio_channel *ch;
1756
1757 if (copy_from_user(&param, arg, sizeof(param)))
1758 return -EFAULT;
1759
1760 riocm_debug(CHOP, "on ch_%d by %s(%d)",
1761 param.ch_num, current->comm, task_pid_nr(current));
1762
1763 accept_to = param.wait_to ?
1764 msecs_to_jiffies(param.wait_to) : 0;
1765
1766 ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to);
1767 if (IS_ERR(ch))
1768 return PTR_ERR(ch);
1769 ch->filp = filp;
1770
1771 riocm_debug(CHOP, "new ch_%d for %s(%d)",
1772 ch->id, current->comm, task_pid_nr(current));
1773
1774 if (copy_to_user(arg, &param, sizeof(param)))
1775 return -EFAULT;
1776 return 0;
1777}
1778
1779/*
1780 * cm_chan_connect() - Connect on channel
1781 * @arg: Channel information
1782 */
1783static int cm_chan_connect(void __user *arg)
1784{
1785 struct rio_cm_channel chan;
1786 struct cm_dev *cm;
1787 struct cm_peer *peer;
1788 int ret = -ENODEV;
1789
1790 if (copy_from_user(&chan, arg, sizeof(chan)))
1791 return -EFAULT;
1792 if (chan.mport_id >= RIO_MAX_MPORTS)
1793 return -EINVAL;
1794
1795 down_read(&rdev_sem);
1796
1797 /* Find matching cm_dev object */
1798 list_for_each_entry(cm, &cm_dev_list, list) {
1799 if (cm->mport->id == chan.mport_id) {
1800 ret = 0;
1801 break;
1802 }
1803 }
1804
1805 if (ret)
1806 goto err_out;
1807
1808 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
1809 ret = -EINVAL;
1810 goto err_out;
1811 }
1812
1813 /* Find corresponding RapidIO endpoint device object */
1814 ret = -ENODEV;
1815
1816 list_for_each_entry(peer, &cm->peers, node) {
1817 if (peer->rdev->destid == chan.remote_destid) {
1818 ret = 0;
1819 break;
1820 }
1821 }
1822
1823 if (ret)
1824 goto err_out;
1825
1826 up_read(&rdev_sem);
1827
1828 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
1829err_out:
1830 up_read(&rdev_sem);
1831 return ret;
1832}
1833
1834/*
1835 * cm_chan_msg_send() - Send a message through channel
1836 * @arg: Outbound message information
1837 */
1838static int cm_chan_msg_send(void __user *arg)
1839{
1840 struct rio_cm_msg msg;
1841 void *buf;
1842 int ret = 0;
1843
1844 if (copy_from_user(&msg, arg, sizeof(msg)))
1845 return -EFAULT;
1846 if (msg.size > RIO_MAX_MSG_SIZE)
1847 return -EINVAL;
1848
1849 buf = kmalloc(msg.size, GFP_KERNEL);
1850 if (!buf)
1851 return -ENOMEM;
1852
1853 if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
1854 ret = -EFAULT;
1855 goto out;
1856 }
1857
1858 ret = riocm_ch_send(msg.ch_num, buf, msg.size);
1859out:
1860 kfree(buf);
1861 return ret;
1862}
1863
1864/*
1865 * cm_chan_msg_rcv() - Receive a message through channel
1866 * @arg: Inbound message information
1867 */
1868static int cm_chan_msg_rcv(void __user *arg)
1869{
1870 struct rio_cm_msg msg;
1871 struct rio_channel *ch;
1872 void *buf;
1873 long rxto;
1874 int ret = 0, msg_size;
1875
1876 if (copy_from_user(&msg, arg, sizeof(msg)))
1877 return -EFAULT;
1878
1879 if (msg.ch_num == 0 || msg.size == 0)
1880 return -EINVAL;
1881
1882 ch = riocm_get_channel(msg.ch_num);
1883 if (!ch)
1884 return -ENODEV;
1885
1886 rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
1887
1888 ret = riocm_ch_receive(ch, &buf, rxto);
1889 if (ret)
1890 goto out;
1891
1892 msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
1893
1894 if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
1895 ret = -EFAULT;
1896
1897 riocm_ch_free_rxbuf(ch, buf);
1898out:
1899 riocm_put_channel(ch);
1900 return ret;
1901}
1902
1903/*
1904 * riocm_cdev_ioctl() - IOCTL requests handler
1905 */
1906static long
1907riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1908{
1909 switch (cmd) {
1910 case RIO_CM_EP_GET_LIST_SIZE:
1911 return cm_ep_get_list_size((void __user *)arg);
1912 case RIO_CM_EP_GET_LIST:
1913 return cm_ep_get_list((void __user *)arg);
1914 case RIO_CM_CHAN_CREATE:
1915 return cm_chan_create(filp, (void __user *)arg);
1916 case RIO_CM_CHAN_CLOSE:
1917 return cm_chan_close(filp, (void __user *)arg);
1918 case RIO_CM_CHAN_BIND:
1919 return cm_chan_bind((void __user *)arg);
1920 case RIO_CM_CHAN_LISTEN:
1921 return cm_chan_listen((void __user *)arg);
1922 case RIO_CM_CHAN_ACCEPT:
1923 return cm_chan_accept(filp, (void __user *)arg);
1924 case RIO_CM_CHAN_CONNECT:
1925 return cm_chan_connect((void __user *)arg);
1926 case RIO_CM_CHAN_SEND:
1927 return cm_chan_msg_send((void __user *)arg);
1928 case RIO_CM_CHAN_RECEIVE:
1929 return cm_chan_msg_rcv((void __user *)arg);
1930 case RIO_CM_MPORT_GET_LIST:
1931 return cm_mport_get_list((void __user *)arg);
1932 default:
1933 break;
1934 }
1935
1936 return -EINVAL;
1937}
1938
1939static const struct file_operations riocm_cdev_fops = {
1940 .owner = THIS_MODULE,
1941 .open = riocm_cdev_open,
1942 .release = riocm_cdev_release,
1943 .unlocked_ioctl = riocm_cdev_ioctl,
1944};
1945
1946/*
1947 * riocm_add_dev - add new remote RapidIO device into channel management core
1948 * @dev: device object associated with RapidIO device
1949 * @sif: subsystem interface
1950 *
1951 * Adds the specified RapidIO device (if applicable) into peers list of
1952 * the corresponding channel management device (cm_dev).
1953 */
1954static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
1955{
1956 struct cm_peer *peer;
1957 struct rio_dev *rdev = to_rio_dev(dev);
1958 struct cm_dev *cm;
1959
1960 /* Check if the remote device has capabilities required to support CM */
1961 if (!dev_cm_capable(rdev))
1962 return 0;
1963
1964 riocm_debug(RDEV, "(%s)", rio_name(rdev));
1965
1966 peer = kmalloc(sizeof(*peer), GFP_KERNEL);
1967 if (!peer)
1968 return -ENOMEM;
1969
1970 /* Find a corresponding cm_dev object */
1971 down_write(&rdev_sem);
1972 list_for_each_entry(cm, &cm_dev_list, list) {
1973 if (cm->mport == rdev->net->hport)
1974 goto found;
1975 }
1976
1977 up_write(&rdev_sem);
1978 kfree(peer);
1979 return -ENODEV;
1980
1981found:
1982 peer->rdev = rdev;
1983 list_add_tail(&peer->node, &cm->peers);
1984 cm->npeers++;
1985
1986 up_write(&rdev_sem);
1987 return 0;
1988}
1989
1990/*
1991 * riocm_remove_dev - remove remote RapidIO device from channel management core
1992 * @dev: device object associated with RapidIO device
1993 * @sif: subsystem interface
1994 *
1995 * Removes the specified RapidIO device (if applicable) from peers list of
1996 * the corresponding channel management device (cm_dev).
1997 */
1998static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
1999{
2000 struct rio_dev *rdev = to_rio_dev(dev);
2001 struct cm_dev *cm;
2002 struct cm_peer *peer;
2003 struct rio_channel *ch, *_c;
2004 unsigned int i;
2005 bool found = false;
2006 LIST_HEAD(list);
2007
2008 /* Check if the remote device has capabilities required to support CM */
2009 if (!dev_cm_capable(rdev))
2010 return;
2011
2012 riocm_debug(RDEV, "(%s)", rio_name(rdev));
2013
2014 /* Find matching cm_dev object */
2015 down_write(&rdev_sem);
2016 list_for_each_entry(cm, &cm_dev_list, list) {
2017 if (cm->mport == rdev->net->hport) {
2018 found = true;
2019 break;
2020 }
2021 }
2022
2023 if (!found) {
2024 up_write(&rdev_sem);
2025 return;
2026 }
2027
2028 /* Remove remote device from the list of peers */
2029 found = false;
2030 list_for_each_entry(peer, &cm->peers, node) {
2031 if (peer->rdev == rdev) {
2032 riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
2033 found = true;
2034 list_del(&peer->node);
2035 cm->npeers--;
2036 kfree(peer);
2037 break;
2038 }
2039 }
2040
2041 up_write(&rdev_sem);
2042
2043 if (!found)
2044 return;
2045
2046 /*
2047 * Release channels associated with this peer
2048 */
2049
2050 spin_lock_bh(&idr_lock);
2051 idr_for_each_entry(&ch_idr, ch, i) {
2052 if (ch && ch->rdev == rdev) {
2053 if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
2054 riocm_exch(ch, RIO_CM_DISCONNECT);
2055 idr_remove(&ch_idr, ch->id);
2056 list_add(&ch->ch_node, &list);
2057 }
2058 }
2059 spin_unlock_bh(&idr_lock);
2060
2061 if (!list_empty(&list)) {
2062 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2063 list_del(&ch->ch_node);
2064 riocm_ch_close(ch);
2065 }
2066 }
2067}
2068
2069/*
2070 * riocm_cdev_add() - Create rio_cm char device
2071 * @devno: device number assigned to device (MAJ + MIN)
2072 */
2073static int riocm_cdev_add(dev_t devno)
2074{
2075 int ret;
2076
2077 cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
2078 riocm_cdev.cdev.owner = THIS_MODULE;
2079 ret = cdev_add(&riocm_cdev.cdev, devno, 1);
2080 if (ret < 0) {
2081 riocm_error("Cannot register a device with error %d", ret);
2082 return ret;
2083 }
2084
2085 riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
2086 if (IS_ERR(riocm_cdev.dev)) {
2087 cdev_del(&riocm_cdev.cdev);
2088 return PTR_ERR(riocm_cdev.dev);
2089 }
2090
2091 riocm_debug(MPORT, "Added %s cdev(%d:%d)",
2092 DEV_NAME, MAJOR(devno), MINOR(devno));
2093
2094 return 0;
2095}
2096
2097/*
2098 * riocm_add_mport - add new local mport device into channel management core
2099 * @dev: device object associated with mport
2100 * @class_intf: class interface
2101 *
2102 * When a new mport device is added, CM immediately reserves inbound and
2103 * outbound RapidIO mailboxes that will be used.
2104 */
2105static int riocm_add_mport(struct device *dev,
2106 struct class_interface *class_intf)
2107{
2108 int rc;
2109 int i;
2110 struct cm_dev *cm;
2111 struct rio_mport *mport = to_rio_mport(dev);
2112
2113 riocm_debug(MPORT, "add mport %s", mport->name);
2114
2115 cm = kzalloc(sizeof(*cm), GFP_KERNEL);
2116 if (!cm)
2117 return -ENOMEM;
2118
2119 cm->mport = mport;
2120
2121 rc = rio_request_outb_mbox(mport, cm, cmbox,
2122 RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
2123 if (rc) {
2124 riocm_error("failed to allocate OBMBOX_%d on %s",
2125 cmbox, mport->name);
2126 kfree(cm);
2127 return -ENODEV;
2128 }
2129
2130 rc = rio_request_inb_mbox(mport, cm, cmbox,
2131 RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
2132 if (rc) {
2133 riocm_error("failed to allocate IBMBOX_%d on %s",
2134 cmbox, mport->name);
2135 rio_release_outb_mbox(mport, cmbox);
2136 kfree(cm);
2137 return -ENODEV;
2138 }
2139
2140 /*
2141 * Allocate and register inbound messaging buffers to be ready
2142 * to receive channel and system management requests
2143 */
2144 for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
2145 cm->rx_buf[i] = NULL;
2146
2147 cm->rx_slots = RIOCM_RX_RING_SIZE;
2148 mutex_init(&cm->rx_lock);
2149 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
2150 cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
2151 INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
2152
2153 cm->tx_slot = 0;
2154 cm->tx_cnt = 0;
2155 cm->tx_ack_slot = 0;
2156 spin_lock_init(&cm->tx_lock);
2157
2158 INIT_LIST_HEAD(&cm->peers);
2159 cm->npeers = 0;
2160 INIT_LIST_HEAD(&cm->tx_reqs);
2161
2162 down_write(&rdev_sem);
2163 list_add_tail(&cm->list, &cm_dev_list);
2164 up_write(&rdev_sem);
2165
2166 return 0;
2167}
2168
2169/*
2170 * riocm_remove_mport - remove local mport device from channel management core
2171 * @dev: device object associated with mport
2172 * @class_intf: class interface
2173 *
2174 * Removes a local mport device from the list of registered devices that provide
2175 * channel management services. Returns an error if the specified mport is not
2176 * registered with the CM core.
2177 */
2178static void riocm_remove_mport(struct device *dev,
2179 struct class_interface *class_intf)
2180{
2181 struct rio_mport *mport = to_rio_mport(dev);
2182 struct cm_dev *cm;
2183 struct cm_peer *peer, *temp;
2184 struct rio_channel *ch, *_c;
2185 unsigned int i;
2186 bool found = false;
2187 LIST_HEAD(list);
2188
2189 riocm_debug(MPORT, "%s", mport->name);
2190
2191 /* Find a matching cm_dev object */
2192 down_write(&rdev_sem);
2193 list_for_each_entry(cm, &cm_dev_list, list) {
2194 if (cm->mport == mport) {
2195 list_del(&cm->list);
2196 found = true;
2197 break;
2198 }
2199 }
2200 up_write(&rdev_sem);
2201 if (!found)
2202 return;
2203
2204 flush_workqueue(cm->rx_wq);
2205 destroy_workqueue(cm->rx_wq);
2206
2207 /* Release channels bound to this mport */
2208 spin_lock_bh(&idr_lock);
2209 idr_for_each_entry(&ch_idr, ch, i) {
2210 if (ch->cmdev == cm) {
2211 riocm_debug(RDEV, "%s drop ch_%d",
2212 mport->name, ch->id);
2213 idr_remove(&ch_idr, ch->id);
2214 list_add(&ch->ch_node, &list);
2215 }
2216 }
2217 spin_unlock_bh(&idr_lock);
2218
2219 if (!list_empty(&list)) {
2220 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2221 list_del(&ch->ch_node);
2222 riocm_ch_close(ch);
2223 }
2224 }
2225
2226 rio_release_inb_mbox(mport, cmbox);
2227 rio_release_outb_mbox(mport, cmbox);
2228
2229 /* Remove and free peer entries */
2230 if (!list_empty(&cm->peers))
2231 riocm_debug(RDEV, "ATTN: peer list not empty");
2232 list_for_each_entry_safe(peer, temp, &cm->peers, node) {
2233 riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
2234 list_del(&peer->node);
2235 kfree(peer);
2236 }
2237
2238 riocm_rx_free(cm);
2239 kfree(cm);
2240 riocm_debug(MPORT, "%s done", mport->name);
2241}
2242
2243static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
2244 void *unused)
2245{
2246 struct rio_channel *ch;
2247 unsigned int i;
2248
2249 riocm_debug(EXIT, ".");
2250
2251 spin_lock_bh(&idr_lock);
2252 idr_for_each_entry(&ch_idr, ch, i) {
2253 riocm_debug(EXIT, "close ch %d", ch->id);
2254 if (ch->state == RIO_CM_CONNECTED)
2255 riocm_send_close(ch);
2256 }
2257 spin_unlock_bh(&idr_lock);
2258
2259 return NOTIFY_DONE;
2260}
2261
2262/*
2263 * riocm_interface handles addition/removal of remote RapidIO devices
2264 */
2265static struct subsys_interface riocm_interface = {
2266 .name = "rio_cm",
2267 .subsys = &rio_bus_type,
2268 .add_dev = riocm_add_dev,
2269 .remove_dev = riocm_remove_dev,
2270};
2271
2272/*
2273 * rio_mport_interface handles addition/removal local mport devices
2274 */
2275static struct class_interface rio_mport_interface __refdata = {
2276 .class = &rio_mport_class,
2277 .add_dev = riocm_add_mport,
2278 .remove_dev = riocm_remove_mport,
2279};
2280
2281static struct notifier_block rio_cm_notifier = {
2282 .notifier_call = rio_cm_shutdown,
2283};
2284
2285static int __init riocm_init(void)
2286{
2287 int ret;
2288
2289 /* Create device class needed by udev */
2290 dev_class = class_create(THIS_MODULE, DRV_NAME);
2291 if (IS_ERR(dev_class)) {
2292 riocm_error("Cannot create " DRV_NAME " class");
2293 return PTR_ERR(dev_class);
2294 }
2295
2296 ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
2297 if (ret) {
2298 class_destroy(dev_class);
2299 return ret;
2300 }
2301
2302 dev_major = MAJOR(dev_number);
2303 dev_minor_base = MINOR(dev_number);
2304 riocm_debug(INIT, "Registered class with %d major", dev_major);
2305
2306 /*
2307 * Register as rapidio_port class interface to get notifications about
2308 * mport additions and removals.
2309 */
2310 ret = class_interface_register(&rio_mport_interface);
2311 if (ret) {
2312 riocm_error("class_interface_register error: %d", ret);
2313 goto err_reg;
2314 }
2315
2316 /*
2317 * Register as RapidIO bus interface to get notifications about
2318 * addition/removal of remote RapidIO devices.
2319 */
2320 ret = subsys_interface_register(&riocm_interface);
2321 if (ret) {
2322 riocm_error("subsys_interface_register error: %d", ret);
2323 goto err_cl;
2324 }
2325
2326 ret = register_reboot_notifier(&rio_cm_notifier);
2327 if (ret) {
2328 riocm_error("failed to register reboot notifier (err=%d)", ret);
2329 goto err_sif;
2330 }
2331
2332 ret = riocm_cdev_add(dev_number);
2333 if (ret) {
2334 unregister_reboot_notifier(&rio_cm_notifier);
2335 ret = -ENODEV;
2336 goto err_sif;
2337 }
2338
2339 return 0;
2340err_sif:
2341 subsys_interface_unregister(&riocm_interface);
2342err_cl:
2343 class_interface_unregister(&rio_mport_interface);
2344err_reg:
2345 unregister_chrdev_region(dev_number, 1);
2346 class_destroy(dev_class);
2347 return ret;
2348}
2349
2350static void __exit riocm_exit(void)
2351{
2352 riocm_debug(EXIT, "enter");
2353 unregister_reboot_notifier(&rio_cm_notifier);
2354 subsys_interface_unregister(&riocm_interface);
2355 class_interface_unregister(&rio_mport_interface);
2356 idr_destroy(&ch_idr);
2357
2358 device_unregister(riocm_cdev.dev);
2359 cdev_del(&(riocm_cdev.cdev));
2360
2361 class_destroy(dev_class);
2362 unregister_chrdev_region(dev_number, 1);
2363}
2364
2365late_initcall(riocm_init);
2366module_exit(riocm_exit);