aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb/ntb_transport.c
diff options
context:
space:
mode:
authorJon Mason <jon.mason@intel.com>2012-11-16 21:27:12 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-17 22:11:14 -0500
commitfce8a7bb5b4bfb8a27324703fd5b002ee9247e90 (patch)
tree03ea4f4939d399265ecfa5f11081895a969115e7 /drivers/ntb/ntb_transport.c
parentea8a83a4b718f78a8ea2ce3f0237e78a23f8f12b (diff)
PCI-Express Non-Transparent Bridge Support
A PCI-Express non-transparent bridge (NTB) is a point-to-point PCIe bus connecting 2 systems, providing electrical isolation between the two subsystems. A non-transparent bridge is functionally similar to a transparent bridge except that both sides of the bridge have their own independent address domains. The host on one side of the bridge will not have the visibility of the complete memory or I/O space on the other side of the bridge. To communicate across the non-transparent bridge, each NTB endpoint has one (or more) apertures exposed to the local system. Writes to these apertures are mirrored to memory on the remote system. Communications can also occur through the use of doorbell registers that initiate interrupts to the alternate domain, and scratch-pad registers accessible from both sides. The NTB device driver is needed to configure these memory windows, doorbell, and scratch-pad registers as well as use them in such a way as they can be turned into a viable communication channel to the remote system. ntb_hw.[ch] determines the usage model (NTB to NTB or NTB to Root Port) and abstracts away the underlying hardware to provide access and a common interface to the doorbell registers, scratch pads, and memory windows. These hardware interfaces are exported so that other, non-mainlined kernel drivers can access these. ntb_transport.[ch] also uses the exported interfaces in ntb_hw.[ch] to setup a communication channel(s) and provide a reliable way of transferring data from one side to the other, which it then exports so that "client" drivers can access them. These client drivers are used to provide a standard kernel interface (i.e., Ethernet device) to NTB, such that Linux can transfer data from one system to the other in a standard way. Signed-off-by: Jon Mason <jon.mason@intel.com> Reviewed-by: Nicholas Bellinger <nab@linux-iscsi.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
-rw-r--r--drivers/ntb/ntb_transport.c1427
1 files changed, 1427 insertions, 0 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
new file mode 100644
index 000000000000..c907e0773532
--- /dev/null
+++ b/drivers/ntb/ntb_transport.c
@@ -0,0 +1,1427 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/debugfs.h>
49#include <linux/delay.h>
50#include <linux/dma-mapping.h>
51#include <linux/errno.h>
52#include <linux/export.h>
53#include <linux/interrupt.h>
54#include <linux/module.h>
55#include <linux/pci.h>
56#include <linux/slab.h>
57#include <linux/types.h>
58#include <linux/ntb.h>
59#include "ntb_hw.h"
60
61#define NTB_TRANSPORT_VERSION 1
62
63static int transport_mtu = 0x401E;
64module_param(transport_mtu, uint, 0644);
65MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
66
67static unsigned char max_num_clients = 2;
68module_param(max_num_clients, byte, 0644);
69MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
70
71struct ntb_queue_entry {
72 /* ntb_queue list reference */
73 struct list_head entry;
74 /* pointers to data to be transfered */
75 void *cb_data;
76 void *buf;
77 unsigned int len;
78 unsigned int flags;
79};
80
81struct ntb_transport_qp {
82 struct ntb_transport *transport;
83 struct ntb_device *ndev;
84 void *cb_data;
85
86 bool client_ready;
87 bool qp_link;
88 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
89
90 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
91 void *data, int len);
92 struct list_head tx_free_q;
93 spinlock_t ntb_tx_free_q_lock;
94 void *tx_mw_begin;
95 void *tx_mw_end;
96 void *tx_offset;
97
98 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
99 void *data, int len);
100 struct tasklet_struct rx_work;
101 struct list_head rx_pend_q;
102 struct list_head rx_free_q;
103 spinlock_t ntb_rx_pend_q_lock;
104 spinlock_t ntb_rx_free_q_lock;
105 void *rx_buff_begin;
106 void *rx_buff_end;
107 void *rx_offset;
108
109 void (*event_handler) (void *data, int status);
110 struct delayed_work link_work;
111
112 struct dentry *debugfs_dir;
113 struct dentry *debugfs_stats;
114
115 /* Stats */
116 u64 rx_bytes;
117 u64 rx_pkts;
118 u64 rx_ring_empty;
119 u64 rx_err_no_buf;
120 u64 rx_err_oflow;
121 u64 rx_err_ver;
122 u64 tx_bytes;
123 u64 tx_pkts;
124 u64 tx_ring_full;
125};
126
127struct ntb_transport_mw {
128 size_t size;
129 void *virt_addr;
130 dma_addr_t dma_addr;
131};
132
133struct ntb_transport_client_dev {
134 struct list_head entry;
135 struct device dev;
136};
137
138struct ntb_transport {
139 struct list_head entry;
140 struct list_head client_devs;
141
142 struct ntb_device *ndev;
143 struct ntb_transport_mw mw[NTB_NUM_MW];
144 struct ntb_transport_qp *qps;
145 unsigned int max_qps;
146 unsigned long qp_bitmap;
147 bool transport_link;
148 struct delayed_work link_work;
149 struct dentry *debugfs_dir;
150};
151
152enum {
153 DESC_DONE_FLAG = 1 << 0,
154 LINK_DOWN_FLAG = 1 << 1,
155};
156
157struct ntb_payload_header {
158 u64 ver;
159 unsigned int len;
160 unsigned int flags;
161};
162
163enum {
164 VERSION = 0,
165 MW0_SZ,
166 MW1_SZ,
167 NUM_QPS,
168 QP_LINKS,
169 MAX_SPAD,
170};
171
172#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
173#define NTB_QP_DEF_NUM_ENTRIES 100
174#define NTB_LINK_DOWN_TIMEOUT 10
175
176static int ntb_match_bus(struct device *dev, struct device_driver *drv)
177{
178 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
179}
180
181static int ntb_client_probe(struct device *dev)
182{
183 const struct ntb_client *drv = container_of(dev->driver,
184 struct ntb_client, driver);
185 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
186 int rc = -EINVAL;
187
188 get_device(dev);
189 if (drv && drv->probe)
190 rc = drv->probe(pdev);
191 if (rc)
192 put_device(dev);
193
194 return rc;
195}
196
197static int ntb_client_remove(struct device *dev)
198{
199 const struct ntb_client *drv = container_of(dev->driver,
200 struct ntb_client, driver);
201 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
202
203 if (drv && drv->remove)
204 drv->remove(pdev);
205
206 put_device(dev);
207
208 return 0;
209}
210
211struct bus_type ntb_bus_type = {
212 .name = "ntb_bus",
213 .match = ntb_match_bus,
214 .probe = ntb_client_probe,
215 .remove = ntb_client_remove,
216};
217
218static LIST_HEAD(ntb_transport_list);
219
220static int __devinit ntb_bus_init(struct ntb_transport *nt)
221{
222 if (list_empty(&ntb_transport_list)) {
223 int rc = bus_register(&ntb_bus_type);
224 if (rc)
225 return rc;
226 }
227
228 list_add(&nt->entry, &ntb_transport_list);
229
230 return 0;
231}
232
233static void __devexit ntb_bus_remove(struct ntb_transport *nt)
234{
235 struct ntb_transport_client_dev *client_dev, *cd;
236
237 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
238 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
239 dev_name(&client_dev->dev));
240 list_del(&client_dev->entry);
241 device_unregister(&client_dev->dev);
242 }
243
244 list_del(&nt->entry);
245
246 if (list_empty(&ntb_transport_list))
247 bus_unregister(&ntb_bus_type);
248}
249
250static void ntb_client_release(struct device *dev)
251{
252 struct ntb_transport_client_dev *client_dev;
253 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
254
255 kfree(client_dev);
256}
257
258/**
259 * ntb_unregister_client_dev - Unregister NTB client device
260 * @device_name: Name of NTB client device
261 *
262 * Unregister an NTB client device with the NTB transport layer
263 */
264void ntb_unregister_client_dev(char *device_name)
265{
266 struct ntb_transport_client_dev *client, *cd;
267 struct ntb_transport *nt;
268
269 list_for_each_entry(nt, &ntb_transport_list, entry)
270 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
271 if (!strncmp(dev_name(&client->dev), device_name,
272 strlen(device_name))) {
273 list_del(&client->entry);
274 device_unregister(&client->dev);
275 }
276}
277EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
278
279/**
280 * ntb_register_client_dev - Register NTB client device
281 * @device_name: Name of NTB client device
282 *
283 * Register an NTB client device with the NTB transport layer
284 */
285int ntb_register_client_dev(char *device_name)
286{
287 struct ntb_transport_client_dev *client_dev;
288 struct ntb_transport *nt;
289 int rc;
290
291 list_for_each_entry(nt, &ntb_transport_list, entry) {
292 struct device *dev;
293
294 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
295 GFP_KERNEL);
296 if (!client_dev) {
297 rc = -ENOMEM;
298 goto err;
299 }
300
301 dev = &client_dev->dev;
302
303 /* setup and register client devices */
304 dev_set_name(dev, "%s", device_name);
305 dev->bus = &ntb_bus_type;
306 dev->release = ntb_client_release;
307 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
308
309 rc = device_register(dev);
310 if (rc) {
311 kfree(client_dev);
312 goto err;
313 }
314
315 list_add_tail(&client_dev->entry, &nt->client_devs);
316 }
317
318 return 0;
319
320err:
321 ntb_unregister_client_dev(device_name);
322
323 return rc;
324}
325EXPORT_SYMBOL_GPL(ntb_register_client_dev);
326
327/**
328 * ntb_register_client - Register NTB client driver
329 * @drv: NTB client driver to be registered
330 *
331 * Register an NTB client driver with the NTB transport layer
332 *
333 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
334 */
335int ntb_register_client(struct ntb_client *drv)
336{
337 drv->driver.bus = &ntb_bus_type;
338
339 return driver_register(&drv->driver);
340}
341EXPORT_SYMBOL_GPL(ntb_register_client);
342
343/**
344 * ntb_unregister_client - Unregister NTB client driver
345 * @drv: NTB client driver to be unregistered
346 *
347 * Unregister an NTB client driver with the NTB transport layer
348 *
349 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
350 */
351void ntb_unregister_client(struct ntb_client *drv)
352{
353 driver_unregister(&drv->driver);
354}
355EXPORT_SYMBOL_GPL(ntb_unregister_client);
356
357static int debugfs_open(struct inode *inode, struct file *filp)
358{
359 filp->private_data = inode->i_private;
360 return 0;
361}
362
363static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
364 loff_t *offp)
365{
366 struct ntb_transport_qp *qp;
367 char buf[1024];
368 ssize_t ret, out_offset, out_count;
369
370 out_count = 1024;
371
372 qp = filp->private_data;
373 out_offset = 0;
374 out_offset += snprintf(buf + out_offset, out_count - out_offset,
375 "NTB QP stats\n");
376 out_offset += snprintf(buf + out_offset, out_count - out_offset,
377 "rx_bytes - \t%llu\n", qp->rx_bytes);
378 out_offset += snprintf(buf + out_offset, out_count - out_offset,
379 "rx_pkts - \t%llu\n", qp->rx_pkts);
380 out_offset += snprintf(buf + out_offset, out_count - out_offset,
381 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
382 out_offset += snprintf(buf + out_offset, out_count - out_offset,
383 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
384 out_offset += snprintf(buf + out_offset, out_count - out_offset,
385 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
386 out_offset += snprintf(buf + out_offset, out_count - out_offset,
387 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
388 out_offset += snprintf(buf + out_offset, out_count - out_offset,
389 "rx_buff_begin - %p\n", qp->rx_buff_begin);
390 out_offset += snprintf(buf + out_offset, out_count - out_offset,
391 "rx_offset - \t%p\n", qp->rx_offset);
392 out_offset += snprintf(buf + out_offset, out_count - out_offset,
393 "rx_buff_end - \t%p\n", qp->rx_buff_end);
394
395 out_offset += snprintf(buf + out_offset, out_count - out_offset,
396 "tx_bytes - \t%llu\n", qp->tx_bytes);
397 out_offset += snprintf(buf + out_offset, out_count - out_offset,
398 "tx_pkts - \t%llu\n", qp->tx_pkts);
399 out_offset += snprintf(buf + out_offset, out_count - out_offset,
400 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
401 out_offset += snprintf(buf + out_offset, out_count - out_offset,
402 "tx_mw_begin - \t%p\n", qp->tx_mw_begin);
403 out_offset += snprintf(buf + out_offset, out_count - out_offset,
404 "tx_offset - \t%p\n", qp->tx_offset);
405 out_offset += snprintf(buf + out_offset, out_count - out_offset,
406 "tx_mw_end - \t%p\n", qp->tx_mw_end);
407
408 out_offset += snprintf(buf + out_offset, out_count - out_offset,
409 "QP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
410 "Up" : "Down");
411
412 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
413 return ret;
414}
415
416static const struct file_operations ntb_qp_debugfs_stats = {
417 .owner = THIS_MODULE,
418 .open = debugfs_open,
419 .read = debugfs_read,
420};
421
422static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
423 struct list_head *list)
424{
425 unsigned long flags;
426
427 spin_lock_irqsave(lock, flags);
428 list_add_tail(entry, list);
429 spin_unlock_irqrestore(lock, flags);
430}
431
432static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
433 struct list_head *list)
434{
435 struct ntb_queue_entry *entry;
436 unsigned long flags;
437
438 spin_lock_irqsave(lock, flags);
439 if (list_empty(list)) {
440 entry = NULL;
441 goto out;
442 }
443 entry = list_first_entry(list, struct ntb_queue_entry, entry);
444 list_del(&entry->entry);
445out:
446 spin_unlock_irqrestore(lock, flags);
447
448 return entry;
449}
450
451static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
452 unsigned int qp_num)
453{
454 struct ntb_transport_qp *qp = &nt->qps[qp_num];
455 unsigned int size, num_qps_mw;
456 u8 mw_num = QP_TO_MW(qp_num);
457
458 WARN_ON(nt->mw[mw_num].virt_addr == 0);
459
460 if (nt->max_qps % NTB_NUM_MW && !mw_num)
461 num_qps_mw = nt->max_qps / NTB_NUM_MW +
462 (nt->max_qps % NTB_NUM_MW - mw_num);
463 else
464 num_qps_mw = nt->max_qps / NTB_NUM_MW;
465
466 size = nt->mw[mw_num].size / num_qps_mw;
467
468 qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
469 (qp_num / NTB_NUM_MW * size);
470 qp->rx_buff_end = qp->rx_buff_begin + size;
471 qp->rx_offset = qp->rx_buff_begin;
472
473 qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
474 (qp_num / NTB_NUM_MW * size);
475 qp->tx_mw_end = qp->tx_mw_begin + size;
476 qp->tx_offset = qp->tx_mw_begin;
477
478 qp->rx_pkts = 0;
479 qp->tx_pkts = 0;
480}
481
482static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
483{
484 struct ntb_transport_mw *mw = &nt->mw[num_mw];
485 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
486 void *offset;
487
488 /* Alloc memory for receiving data. Must be 4k aligned */
489 mw->size = ALIGN(size, 4096);
490
491 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
492 GFP_KERNEL);
493 if (!mw->virt_addr) {
494 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
495 (int) mw->size);
496 return -ENOMEM;
497 }
498
499 /* setup the hdr offsets with 0's */
500 for (offset = mw->virt_addr + transport_mtu -
501 sizeof(struct ntb_payload_header);
502 offset < mw->virt_addr + size; offset += transport_mtu)
503 memset(offset, 0, sizeof(struct ntb_payload_header));
504
505 /* Notify HW the memory location of the receive buffer */
506 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
507
508 return 0;
509}
510
511static void ntb_qp_link_down(struct ntb_transport_qp *qp)
512{
513 struct ntb_transport *nt = qp->transport;
514 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
515
516 if (qp->qp_link == NTB_LINK_DOWN) {
517 cancel_delayed_work_sync(&qp->link_work);
518 return;
519 }
520
521 if (qp->event_handler)
522 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
523
524 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
525 qp->qp_link = NTB_LINK_DOWN;
526
527 if (nt->transport_link == NTB_LINK_UP)
528 schedule_delayed_work(&qp->link_work,
529 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
530}
531
532static void ntb_transport_conn_down(struct ntb_transport *nt)
533{
534 int i;
535
536 if (nt->transport_link == NTB_LINK_DOWN)
537 cancel_delayed_work_sync(&nt->link_work);
538 else
539 nt->transport_link = NTB_LINK_DOWN;
540
541 /* Pass along the info to any clients */
542 for (i = 0; i < nt->max_qps; i++)
543 if (!test_bit(i, &nt->qp_bitmap))
544 ntb_qp_link_down(&nt->qps[i]);
545
546 /* The scratchpad registers keep the values if the remote side
547 * goes down, blast them now to give them a sane value the next
548 * time they are accessed
549 */
550 for (i = 0; i < MAX_SPAD; i++)
551 ntb_write_local_spad(nt->ndev, i, 0);
552}
553
554static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
555{
556 struct ntb_transport *nt = data;
557
558 switch (event) {
559 case NTB_EVENT_HW_LINK_UP:
560 schedule_delayed_work(&nt->link_work, 0);
561 break;
562 case NTB_EVENT_HW_LINK_DOWN:
563 ntb_transport_conn_down(nt);
564 break;
565 default:
566 BUG();
567 }
568}
569
570static void ntb_transport_link_work(struct work_struct *work)
571{
572 struct ntb_transport *nt = container_of(work, struct ntb_transport,
573 link_work.work);
574 struct ntb_device *ndev = nt->ndev;
575 struct pci_dev *pdev = ntb_query_pdev(ndev);
576 u32 val;
577 int rc, i;
578
579 /* send the local info */
580 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
581 if (rc) {
582 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
583 0, VERSION);
584 goto out;
585 }
586
587 rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
588 if (rc) {
589 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
590 (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
591 goto out;
592 }
593
594 rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
595 if (rc) {
596 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
597 (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
598 goto out;
599 }
600
601 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
602 if (rc) {
603 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
604 nt->max_qps, NUM_QPS);
605 goto out;
606 }
607
608 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
609 if (rc) {
610 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
611 goto out;
612 }
613
614 rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
615 if (rc) {
616 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
617 val, QP_LINKS);
618 goto out;
619 }
620
621 /* Query the remote side for its info */
622 rc = ntb_read_remote_spad(ndev, VERSION, &val);
623 if (rc) {
624 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
625 goto out;
626 }
627
628 if (val != NTB_TRANSPORT_VERSION)
629 goto out;
630 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
631
632 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
633 if (rc) {
634 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
635 goto out;
636 }
637
638 if (val != nt->max_qps)
639 goto out;
640 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
641
642 rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
643 if (rc) {
644 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
645 goto out;
646 }
647
648 if (!val)
649 goto out;
650 dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
651
652 rc = ntb_set_mw(nt, 0, val);
653 if (rc)
654 goto out;
655
656 rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
657 if (rc) {
658 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
659 goto out;
660 }
661
662 if (!val)
663 goto out;
664 dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
665
666 rc = ntb_set_mw(nt, 1, val);
667 if (rc)
668 goto out;
669
670 nt->transport_link = NTB_LINK_UP;
671
672 for (i = 0; i < nt->max_qps; i++) {
673 struct ntb_transport_qp *qp = &nt->qps[i];
674
675 ntb_transport_setup_qp_mw(nt, i);
676
677 if (qp->client_ready == NTB_LINK_UP)
678 schedule_delayed_work(&qp->link_work, 0);
679 }
680
681 return;
682
683out:
684 if (ntb_hw_link_status(ndev))
685 schedule_delayed_work(&nt->link_work,
686 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
687}
688
689static void ntb_qp_link_work(struct work_struct *work)
690{
691 struct ntb_transport_qp *qp = container_of(work,
692 struct ntb_transport_qp,
693 link_work.work);
694 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
695 struct ntb_transport *nt = qp->transport;
696 int rc, val;
697
698 WARN_ON(nt->transport_link != NTB_LINK_UP);
699
700 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
701 if (rc) {
702 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
703 return;
704 }
705
706 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
707 if (rc)
708 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
709 val | 1 << qp->qp_num, QP_LINKS);
710
711 /* query remote spad for qp ready bits */
712 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
713 if (rc)
714 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
715
716 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
717
718 /* See if the remote side is up */
719 if (1 << qp->qp_num & val) {
720 qp->qp_link = NTB_LINK_UP;
721
722 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
723 if (qp->event_handler)
724 qp->event_handler(qp->cb_data, NTB_LINK_UP);
725 } else if (nt->transport_link == NTB_LINK_UP)
726 schedule_delayed_work(&qp->link_work,
727 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
728}
729
730static void ntb_transport_init_queue(struct ntb_transport *nt,
731 unsigned int qp_num)
732{
733 struct ntb_transport_qp *qp;
734
735 qp = &nt->qps[qp_num];
736 qp->qp_num = qp_num;
737 qp->transport = nt;
738 qp->ndev = nt->ndev;
739 qp->qp_link = NTB_LINK_DOWN;
740 qp->client_ready = NTB_LINK_DOWN;
741 qp->event_handler = NULL;
742
743 if (nt->debugfs_dir) {
744 char debugfs_name[4];
745
746 snprintf(debugfs_name, 4, "qp%d", qp_num);
747 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
748 nt->debugfs_dir);
749
750 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
751 qp->debugfs_dir, qp,
752 &ntb_qp_debugfs_stats);
753 }
754
755 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
756
757 spin_lock_init(&qp->ntb_rx_pend_q_lock);
758 spin_lock_init(&qp->ntb_rx_free_q_lock);
759 spin_lock_init(&qp->ntb_tx_free_q_lock);
760
761 INIT_LIST_HEAD(&qp->rx_pend_q);
762 INIT_LIST_HEAD(&qp->rx_free_q);
763 INIT_LIST_HEAD(&qp->tx_free_q);
764}
765
766int ntb_transport_init(struct pci_dev *pdev)
767{
768 struct ntb_transport *nt;
769 int rc, i;
770
771 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
772 if (!nt)
773 return -ENOMEM;
774
775 if (debugfs_initialized())
776 nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
777 else
778 nt->debugfs_dir = NULL;
779
780 nt->ndev = ntb_register_transport(pdev, nt);
781 if (!nt->ndev) {
782 rc = -EIO;
783 goto err;
784 }
785
786 nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
787
788 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
789 GFP_KERNEL);
790 if (!nt->qps) {
791 rc = -ENOMEM;
792 goto err1;
793 }
794
795 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
796
797 for (i = 0; i < nt->max_qps; i++)
798 ntb_transport_init_queue(nt, i);
799
800 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
801
802 rc = ntb_register_event_callback(nt->ndev,
803 ntb_transport_event_callback);
804 if (rc)
805 goto err2;
806
807 INIT_LIST_HEAD(&nt->client_devs);
808 rc = ntb_bus_init(nt);
809 if (rc)
810 goto err3;
811
812 if (ntb_hw_link_status(nt->ndev))
813 schedule_delayed_work(&nt->link_work, 0);
814
815 return 0;
816
817err3:
818 ntb_unregister_event_callback(nt->ndev);
819err2:
820 kfree(nt->qps);
821err1:
822 ntb_unregister_transport(nt->ndev);
823err:
824 debugfs_remove_recursive(nt->debugfs_dir);
825 kfree(nt);
826 return rc;
827}
828
829void ntb_transport_free(void *transport)
830{
831 struct ntb_transport *nt = transport;
832 struct pci_dev *pdev;
833 int i;
834
835 nt->transport_link = NTB_LINK_DOWN;
836
837 /* verify that all the qp's are freed */
838 for (i = 0; i < nt->max_qps; i++)
839 if (!test_bit(i, &nt->qp_bitmap))
840 ntb_transport_free_queue(&nt->qps[i]);
841
842 ntb_bus_remove(nt);
843
844 cancel_delayed_work_sync(&nt->link_work);
845
846 debugfs_remove_recursive(nt->debugfs_dir);
847
848 ntb_unregister_event_callback(nt->ndev);
849
850 pdev = ntb_query_pdev(nt->ndev);
851
852 for (i = 0; i < NTB_NUM_MW; i++)
853 if (nt->mw[i].virt_addr)
854 dma_free_coherent(&pdev->dev, nt->mw[i].size,
855 nt->mw[i].virt_addr,
856 nt->mw[i].dma_addr);
857
858 kfree(nt->qps);
859 ntb_unregister_transport(nt->ndev);
860 kfree(nt);
861}
862
863static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
864 struct ntb_queue_entry *entry, void *offset)
865{
866
867 struct ntb_payload_header *hdr;
868
869 BUG_ON(offset < qp->rx_buff_begin ||
870 offset + transport_mtu >= qp->rx_buff_end);
871
872 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header);
873 entry->len = hdr->len;
874
875 memcpy(entry->buf, offset, entry->len);
876
877 /* Ensure that the data is fully copied out before clearing the flag */
878 wmb();
879 hdr->flags = 0;
880
881 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
882 qp->rx_handler(qp, qp->cb_data, entry->cb_data, entry->len);
883
884 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
885}
886
887static int ntb_process_rxc(struct ntb_transport_qp *qp)
888{
889 struct ntb_payload_header *hdr;
890 struct ntb_queue_entry *entry;
891 void *offset;
892
893 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
894 if (!entry) {
895 hdr = offset + transport_mtu -
896 sizeof(struct ntb_payload_header);
897 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
898 "no buffer - HDR ver %llu, len %d, flags %x\n",
899 hdr->ver, hdr->len, hdr->flags);
900 qp->rx_err_no_buf++;
901 return -ENOMEM;
902 }
903
904 offset = qp->rx_offset;
905 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header);
906
907 if (!(hdr->flags & DESC_DONE_FLAG)) {
908 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
909 &qp->rx_pend_q);
910 qp->rx_ring_empty++;
911 return -EAGAIN;
912 }
913
914 if (hdr->ver != qp->rx_pkts) {
915 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
916 "qp %d: version mismatch, expected %llu - got %llu\n",
917 qp->qp_num, qp->rx_pkts, hdr->ver);
918 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
919 &qp->rx_pend_q);
920 qp->rx_err_ver++;
921 return -EIO;
922 }
923
924 if (hdr->flags & LINK_DOWN_FLAG) {
925 ntb_qp_link_down(qp);
926
927 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
928 &qp->rx_pend_q);
929
930 /* Ensure that the data is fully copied out before clearing the
931 * done flag
932 */
933 wmb();
934 hdr->flags = 0;
935 goto out;
936 }
937
938 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
939 "rx offset %p, ver %llu - %d payload received, buf size %d\n",
940 qp->rx_offset, hdr->ver, hdr->len, entry->len);
941
942 if (hdr->len <= entry->len)
943 ntb_rx_copy_task(qp, entry, offset);
944 else {
945 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
946 &qp->rx_pend_q);
947
948 /* Ensure that the data is fully copied out before clearing the
949 * done flag
950 */
951 wmb();
952 hdr->flags = 0;
953 qp->rx_err_oflow++;
954 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
955 "RX overflow! Wanted %d got %d\n",
956 hdr->len, entry->len);
957 }
958
959 qp->rx_bytes += hdr->len;
960 qp->rx_pkts++;
961
962out:
963 qp->rx_offset += transport_mtu;
964 if (qp->rx_offset + transport_mtu >= qp->rx_buff_end)
965 qp->rx_offset = qp->rx_buff_begin;
966
967 return 0;
968}
969
970static void ntb_transport_rx(unsigned long data)
971{
972 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
973 int rc;
974
975 do {
976 rc = ntb_process_rxc(qp);
977 } while (!rc);
978}
979
980static void ntb_transport_rxc_db(void *data, int db_num)
981{
982 struct ntb_transport_qp *qp = data;
983
984 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
985 __func__, db_num);
986
987 tasklet_schedule(&qp->rx_work);
988}
989
990static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
991 struct ntb_queue_entry *entry,
992 void *offset)
993{
994 struct ntb_payload_header *hdr;
995
996 BUG_ON(offset < qp->tx_mw_begin ||
997 offset + transport_mtu >= qp->tx_mw_end);
998
999 memcpy_toio(offset, entry->buf, entry->len);
1000
1001 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header);
1002 hdr->len = entry->len;
1003 hdr->ver = qp->tx_pkts;
1004
1005 /* Ensure that the data is fully copied out before setting the flag */
1006 mmiowb();
1007 hdr->flags = entry->flags | DESC_DONE_FLAG;
1008
1009 ntb_ring_sdb(qp->ndev, qp->qp_num);
1010
1011 /* The entry length can only be zero if the packet is intended to be a
1012 * "link down" or similar. Since no payload is being sent in these
1013 * cases, there is nothing to add to the completion queue.
1014 */
1015 if (entry->len > 0) {
1016 qp->tx_bytes += entry->len;
1017
1018 if (qp->tx_handler)
1019 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1020 entry->len);
1021 }
1022
1023 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1024}
1025
1026static int ntb_process_tx(struct ntb_transport_qp *qp,
1027 struct ntb_queue_entry *entry)
1028{
1029 struct ntb_payload_header *hdr;
1030 void *offset;
1031
1032 offset = qp->tx_offset;
1033 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header);
1034
1035 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
1036 qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
1037 entry->buf);
1038 if (hdr->flags) {
1039 qp->tx_ring_full++;
1040 return -EAGAIN;
1041 }
1042
1043 if (entry->len > transport_mtu - sizeof(struct ntb_payload_header)) {
1044 if (qp->tx_handler)
1045 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1046
1047 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1048 &qp->tx_free_q);
1049 return 0;
1050 }
1051
1052 ntb_tx_copy_task(qp, entry, offset);
1053
1054 qp->tx_offset += transport_mtu;
1055 if (qp->tx_offset + transport_mtu >= qp->tx_mw_end)
1056 qp->tx_offset = qp->tx_mw_begin;
1057
1058 qp->tx_pkts++;
1059
1060 return 0;
1061}
1062
1063static void ntb_send_link_down(struct ntb_transport_qp *qp)
1064{
1065 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1066 struct ntb_queue_entry *entry;
1067 int i, rc;
1068
1069 if (qp->qp_link == NTB_LINK_DOWN)
1070 return;
1071
1072 qp->qp_link = NTB_LINK_DOWN;
1073 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1074
1075 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1076 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock,
1077 &qp->tx_free_q);
1078 if (entry)
1079 break;
1080 msleep(100);
1081 }
1082
1083 if (!entry)
1084 return;
1085
1086 entry->cb_data = NULL;
1087 entry->buf = NULL;
1088 entry->len = 0;
1089 entry->flags = LINK_DOWN_FLAG;
1090
1091 rc = ntb_process_tx(qp, entry);
1092 if (rc)
1093 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1094 qp->qp_num);
1095}
1096
1097/**
1098 * ntb_transport_create_queue - Create a new NTB transport layer queue
1099 * @rx_handler: receive callback function
1100 * @tx_handler: transmit callback function
1101 * @event_handler: event callback function
1102 *
1103 * Create a new NTB transport layer queue and provide the queue with a callback
1104 * routine for both transmit and receive. The receive callback routine will be
1105 * used to pass up data when the transport has received it on the queue. The
1106 * transmit callback routine will be called when the transport has completed the
1107 * transmission of the data on the queue and the data is ready to be freed.
1108 *
1109 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1110 */
1111struct ntb_transport_qp *
1112ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1113 const struct ntb_queue_handlers *handlers)
1114{
1115 struct ntb_queue_entry *entry;
1116 struct ntb_transport_qp *qp;
1117 struct ntb_transport *nt;
1118 unsigned int free_queue;
1119 int rc, i;
1120
1121 nt = ntb_find_transport(pdev);
1122 if (!nt)
1123 goto err;
1124
1125 free_queue = ffs(nt->qp_bitmap);
1126 if (!free_queue)
1127 goto err;
1128
1129 /* decrement free_queue to make it zero based */
1130 free_queue--;
1131
1132 clear_bit(free_queue, &nt->qp_bitmap);
1133
1134 qp = &nt->qps[free_queue];
1135 qp->cb_data = data;
1136 qp->rx_handler = handlers->rx_handler;
1137 qp->tx_handler = handlers->tx_handler;
1138 qp->event_handler = handlers->event_handler;
1139
1140 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1141 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1142 if (!entry)
1143 goto err1;
1144
1145 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1146 &qp->rx_free_q);
1147 }
1148
1149 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1150 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1151 if (!entry)
1152 goto err2;
1153
1154 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1155 &qp->tx_free_q);
1156 }
1157
1158 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1159
1160 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1161 ntb_transport_rxc_db);
1162 if (rc)
1163 goto err3;
1164
1165 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1166
1167 return qp;
1168
1169err3:
1170 tasklet_disable(&qp->rx_work);
1171err2:
1172 while ((entry =
1173 ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1174 kfree(entry);
1175err1:
1176 while ((entry =
1177 ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1178 kfree(entry);
1179 set_bit(free_queue, &nt->qp_bitmap);
1180err:
1181 return NULL;
1182}
1183EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1184
1185/**
1186 * ntb_transport_free_queue - Frees NTB transport queue
1187 * @qp: NTB queue to be freed
1188 *
1189 * Frees NTB transport queue
1190 */
1191void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1192{
1193 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1194 struct ntb_queue_entry *entry;
1195
1196 if (!qp)
1197 return;
1198
1199 cancel_delayed_work_sync(&qp->link_work);
1200
1201 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1202 tasklet_disable(&qp->rx_work);
1203
1204 while ((entry =
1205 ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1206 kfree(entry);
1207
1208 while ((entry =
1209 ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1210 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1211 kfree(entry);
1212 }
1213
1214 while ((entry =
1215 ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1216 kfree(entry);
1217
1218 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1219
1220 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1221}
1222EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1223
1224/**
1225 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1226 * @qp: NTB queue to be freed
1227 * @len: pointer to variable to write enqueued buffers length
1228 *
1229 * Dequeues unused buffers from receive queue. Should only be used during
1230 * shutdown of qp.
1231 *
1232 * RETURNS: NULL error value on error, or void* for success.
1233 */
1234void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1235{
1236 struct ntb_queue_entry *entry;
1237 void *buf;
1238
1239 if (!qp || qp->client_ready == NTB_LINK_UP)
1240 return NULL;
1241
1242 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1243 if (!entry)
1244 return NULL;
1245
1246 buf = entry->cb_data;
1247 *len = entry->len;
1248
1249 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1250 &qp->rx_free_q);
1251
1252 return buf;
1253}
1254EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1255
1256/**
1257 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1258 * @qp: NTB transport layer queue the entry is to be enqueued on
1259 * @cb: per buffer pointer for callback function to use
1260 * @data: pointer to data buffer that incoming packets will be copied into
1261 * @len: length of the data buffer
1262 *
1263 * Enqueue a new receive buffer onto the transport queue into which a NTB
1264 * payload can be received into.
1265 *
1266 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1267 */
1268int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1269 unsigned int len)
1270{
1271 struct ntb_queue_entry *entry;
1272
1273 if (!qp)
1274 return -EINVAL;
1275
1276 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1277 if (!entry)
1278 return -ENOMEM;
1279
1280 entry->cb_data = cb;
1281 entry->buf = data;
1282 entry->len = len;
1283
1284 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1285 &qp->rx_pend_q);
1286
1287 return 0;
1288}
1289EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1290
1291/**
1292 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1293 * @qp: NTB transport layer queue the entry is to be enqueued on
1294 * @cb: per buffer pointer for callback function to use
1295 * @data: pointer to data buffer that will be sent
1296 * @len: length of the data buffer
1297 *
1298 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1299 * payload will be transmitted. This assumes that a lock is behing held to
1300 * serialize access to the qp.
1301 *
1302 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1303 */
1304int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1305 unsigned int len)
1306{
1307 struct ntb_queue_entry *entry;
1308 int rc;
1309
1310 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1311 return -EINVAL;
1312
1313 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1314 if (!entry)
1315 return -ENOMEM;
1316
1317 entry->cb_data = cb;
1318 entry->buf = data;
1319 entry->len = len;
1320 entry->flags = 0;
1321
1322 rc = ntb_process_tx(qp, entry);
1323 if (rc)
1324 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1325 &qp->tx_free_q);
1326
1327 return rc;
1328}
1329EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1330
1331/**
1332 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1333 * @qp: NTB transport layer queue to be enabled
1334 *
1335 * Notify NTB transport layer of client readiness to use queue
1336 */
1337void ntb_transport_link_up(struct ntb_transport_qp *qp)
1338{
1339 if (!qp)
1340 return;
1341
1342 qp->client_ready = NTB_LINK_UP;
1343
1344 if (qp->transport->transport_link == NTB_LINK_UP)
1345 schedule_delayed_work(&qp->link_work, 0);
1346}
1347EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1348
1349/**
1350 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1351 * @qp: NTB transport layer queue to be disabled
1352 *
1353 * Notify NTB transport layer of client's desire to no longer receive data on
1354 * transport queue specified. It is the client's responsibility to ensure all
1355 * entries on queue are purged or otherwise handled appropraitely.
1356 */
1357void ntb_transport_link_down(struct ntb_transport_qp *qp)
1358{
1359 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1360 int rc, val;
1361
1362 if (!qp)
1363 return;
1364
1365 qp->client_ready = NTB_LINK_DOWN;
1366
1367 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1368 if (rc) {
1369 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1370 return;
1371 }
1372
1373 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1374 val & ~(1 << qp->qp_num));
1375 if (rc)
1376 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1377 val & ~(1 << qp->qp_num), QP_LINKS);
1378
1379 if (qp->qp_link == NTB_LINK_UP)
1380 ntb_send_link_down(qp);
1381 else
1382 cancel_delayed_work_sync(&qp->link_work);
1383}
1384EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1385
1386/**
1387 * ntb_transport_link_query - Query transport link state
1388 * @qp: NTB transport layer queue to be queried
1389 *
1390 * Query connectivity to the remote system of the NTB transport queue
1391 *
1392 * RETURNS: true for link up or false for link down
1393 */
1394bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1395{
1396 return qp->qp_link == NTB_LINK_UP;
1397}
1398EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1399
1400/**
1401 * ntb_transport_qp_num - Query the qp number
1402 * @qp: NTB transport layer queue to be queried
1403 *
1404 * Query qp number of the NTB transport queue
1405 *
1406 * RETURNS: a zero based number specifying the qp number
1407 */
1408unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1409{
1410 return qp->qp_num;
1411}
1412EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1413
1414/**
1415 * ntb_transport_max_size - Query the max payload size of a qp
1416 * @qp: NTB transport layer queue to be queried
1417 *
1418 * Query the maximum payload size permissible on the given qp
1419 *
1420 * RETURNS: the max payload size of a qp
1421 */
1422unsigned int
1423ntb_transport_max_size(__attribute__((unused)) struct ntb_transport_qp *qp)
1424{
1425 return transport_mtu - sizeof(struct ntb_payload_header);
1426}
1427EXPORT_SYMBOL_GPL(ntb_transport_max_size);