aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb
diff options
context:
space:
mode:
authorJon Mason <jon.mason@intel.com>2013-01-19 04:02:18 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-20 18:45:50 -0500
commitef114ed5064d35982c16f5cbb338fb586ef48bf7 (patch)
tree7fe60efcc1b81904fa09a9c43194013f35fe5a12 /drivers/ntb
parent842c1ddea5f9949cb21e568408d2af9d986eee69 (diff)
NTB: separate transmit and receive windows
Since it is possible for the memory windows on the two NTB connected systems to be different sizes, the divergent sizes must be accounted for in the segmentation of the MW's on each side. Create separate size variables and initialization as necessary. Signed-off-by: Jon Mason <jon.mason@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/ntb')
-rw-r--r--drivers/ntb/ntb_transport.c79
1 files changed, 44 insertions, 35 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e9666bd7ef41..2823087a1338 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -60,7 +60,7 @@
60 60
61#define NTB_TRANSPORT_VERSION 1 61#define NTB_TRANSPORT_VERSION 1
62 62
63static int transport_mtu = 0x401E; 63static unsigned int transport_mtu = 0x401E;
64module_param(transport_mtu, uint, 0644); 64module_param(transport_mtu, uint, 0644);
65MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 65MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
66 66
@@ -94,6 +94,7 @@ struct ntb_transport_qp {
94 void *tx_mw_begin; 94 void *tx_mw_begin;
95 void *tx_mw_end; 95 void *tx_mw_end;
96 void *tx_offset; 96 void *tx_offset;
97 unsigned int tx_max_frame;
97 98
98 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 99 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
99 void *data, int len); 100 void *data, int len);
@@ -105,6 +106,7 @@ struct ntb_transport_qp {
105 void *rx_buff_begin; 106 void *rx_buff_begin;
106 void *rx_buff_end; 107 void *rx_buff_end;
107 void *rx_offset; 108 void *rx_offset;
109 unsigned int rx_max_frame;
108 110
109 void (*event_handler) (void *data, int status); 111 void (*event_handler) (void *data, int status);
110 struct delayed_work link_work; 112 struct delayed_work link_work;
@@ -458,28 +460,29 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
458 unsigned int qp_num) 460 unsigned int qp_num)
459{ 461{
460 struct ntb_transport_qp *qp = &nt->qps[qp_num]; 462 struct ntb_transport_qp *qp = &nt->qps[qp_num];
461 unsigned int size, num_qps_mw; 463 unsigned int rx_size, num_qps_mw;
462 u8 mw_num = QP_TO_MW(qp_num); 464 u8 mw_num = QP_TO_MW(qp_num);
465 void *offset;
463 466
464 WARN_ON(nt->mw[mw_num].virt_addr == 0); 467 WARN_ON(nt->mw[mw_num].virt_addr == 0);
465 468
466 if (nt->max_qps % NTB_NUM_MW && !mw_num) 469 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
467 num_qps_mw = nt->max_qps / NTB_NUM_MW + 470 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
468 (nt->max_qps % NTB_NUM_MW - mw_num);
469 else 471 else
470 num_qps_mw = nt->max_qps / NTB_NUM_MW; 472 num_qps_mw = nt->max_qps / NTB_NUM_MW;
471 473
472 size = nt->mw[mw_num].size / num_qps_mw; 474 rx_size = nt->mw[mw_num].size / num_qps_mw;
473
474 qp->rx_buff_begin = nt->mw[mw_num].virt_addr + 475 qp->rx_buff_begin = nt->mw[mw_num].virt_addr +
475 (qp_num / NTB_NUM_MW * size); 476 (qp_num / NTB_NUM_MW * rx_size);
476 qp->rx_buff_end = qp->rx_buff_begin + size; 477 qp->rx_buff_end = qp->rx_buff_begin + rx_size;
477 qp->rx_offset = qp->rx_buff_begin; 478 qp->rx_offset = qp->rx_buff_begin;
479 qp->rx_max_frame = min(transport_mtu, rx_size);
478 480
479 qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) + 481 /* setup the hdr offsets with 0's */
480 (qp_num / NTB_NUM_MW * size); 482 for (offset = qp->rx_buff_begin + qp->rx_max_frame -
481 qp->tx_mw_end = qp->tx_mw_begin + size; 483 sizeof(struct ntb_payload_header);
482 qp->tx_offset = qp->tx_mw_begin; 484 offset < qp->rx_buff_end; offset += qp->rx_max_frame)
485 memset(offset, 0, sizeof(struct ntb_payload_header));
483 486
484 qp->rx_pkts = 0; 487 qp->rx_pkts = 0;
485 qp->tx_pkts = 0; 488 qp->tx_pkts = 0;
@@ -489,7 +492,6 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
489{ 492{
490 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 493 struct ntb_transport_mw *mw = &nt->mw[num_mw];
491 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 494 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
492 void *offset;
493 495
494 /* Alloc memory for receiving data. Must be 4k aligned */ 496 /* Alloc memory for receiving data. Must be 4k aligned */
495 mw->size = ALIGN(size, 4096); 497 mw->size = ALIGN(size, 4096);
@@ -502,12 +504,6 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
502 return -ENOMEM; 504 return -ENOMEM;
503 } 505 }
504 506
505 /* setup the hdr offsets with 0's */
506 for (offset = mw->virt_addr + transport_mtu -
507 sizeof(struct ntb_payload_header);
508 offset < mw->virt_addr + size; offset += transport_mtu)
509 memset(offset, 0, sizeof(struct ntb_payload_header));
510
511 /* Notify HW the memory location of the receive buffer */ 507 /* Notify HW the memory location of the receive buffer */
512 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr); 508 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
513 509
@@ -737,6 +733,8 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
737 unsigned int qp_num) 733 unsigned int qp_num)
738{ 734{
739 struct ntb_transport_qp *qp; 735 struct ntb_transport_qp *qp;
736 unsigned int num_qps_mw, tx_size;
737 u8 mw_num = QP_TO_MW(qp_num);
740 738
741 qp = &nt->qps[qp_num]; 739 qp = &nt->qps[qp_num];
742 qp->qp_num = qp_num; 740 qp->qp_num = qp_num;
@@ -746,6 +744,18 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
746 qp->client_ready = NTB_LINK_DOWN; 744 qp->client_ready = NTB_LINK_DOWN;
747 qp->event_handler = NULL; 745 qp->event_handler = NULL;
748 746
747 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
748 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
749 else
750 num_qps_mw = nt->max_qps / NTB_NUM_MW;
751
752 tx_size = ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
753 qp->tx_mw_begin = ntb_get_mw_vbase(nt->ndev, mw_num) +
754 (qp_num / NTB_NUM_MW * tx_size);
755 qp->tx_mw_end = qp->tx_mw_begin + tx_size;
756 qp->tx_offset = qp->tx_mw_begin;
757 qp->tx_max_frame = min(transport_mtu, tx_size);
758
749 if (nt->debugfs_dir) { 759 if (nt->debugfs_dir) {
750 char debugfs_name[4]; 760 char debugfs_name[4];
751 761
@@ -873,9 +883,9 @@ static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
873 struct ntb_payload_header *hdr; 883 struct ntb_payload_header *hdr;
874 884
875 BUG_ON(offset < qp->rx_buff_begin || 885 BUG_ON(offset < qp->rx_buff_begin ||
876 offset + transport_mtu >= qp->rx_buff_end); 886 offset + qp->rx_max_frame >= qp->rx_buff_end);
877 887
878 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header); 888 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
879 entry->len = hdr->len; 889 entry->len = hdr->len;
880 890
881 memcpy(entry->buf, offset, entry->len); 891 memcpy(entry->buf, offset, entry->len);
@@ -898,7 +908,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
898 908
899 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 909 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
900 if (!entry) { 910 if (!entry) {
901 hdr = offset + transport_mtu - 911 hdr = offset + qp->rx_max_frame -
902 sizeof(struct ntb_payload_header); 912 sizeof(struct ntb_payload_header);
903 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, 913 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
904 "no buffer - HDR ver %llu, len %d, flags %x\n", 914 "no buffer - HDR ver %llu, len %d, flags %x\n",
@@ -908,7 +918,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
908 } 918 }
909 919
910 offset = qp->rx_offset; 920 offset = qp->rx_offset;
911 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header); 921 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
912 922
913 if (!(hdr->flags & DESC_DONE_FLAG)) { 923 if (!(hdr->flags & DESC_DONE_FLAG)) {
914 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, 924 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
@@ -966,8 +976,8 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
966 qp->rx_pkts++; 976 qp->rx_pkts++;
967 977
968out: 978out:
969 qp->rx_offset += transport_mtu; 979 qp->rx_offset += qp->rx_max_frame;
970 if (qp->rx_offset + transport_mtu >= qp->rx_buff_end) 980 if (qp->rx_offset + qp->rx_max_frame >= qp->rx_buff_end)
971 qp->rx_offset = qp->rx_buff_begin; 981 qp->rx_offset = qp->rx_buff_begin;
972 982
973 return 0; 983 return 0;
@@ -1000,11 +1010,11 @@ static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
1000 struct ntb_payload_header *hdr; 1010 struct ntb_payload_header *hdr;
1001 1011
1002 BUG_ON(offset < qp->tx_mw_begin || 1012 BUG_ON(offset < qp->tx_mw_begin ||
1003 offset + transport_mtu >= qp->tx_mw_end); 1013 offset + qp->tx_max_frame >= qp->tx_mw_end);
1004 1014
1005 memcpy_toio(offset, entry->buf, entry->len); 1015 memcpy_toio(offset, entry->buf, entry->len);
1006 1016
1007 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header); 1017 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1008 hdr->len = entry->len; 1018 hdr->len = entry->len;
1009 hdr->ver = qp->tx_pkts; 1019 hdr->ver = qp->tx_pkts;
1010 1020
@@ -1036,7 +1046,7 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
1036 void *offset; 1046 void *offset;
1037 1047
1038 offset = qp->tx_offset; 1048 offset = qp->tx_offset;
1039 hdr = offset + transport_mtu - sizeof(struct ntb_payload_header); 1049 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1040 1050
1041 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n", 1051 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %p, entry len %d flags %x buff %p\n",
1042 qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags, 1052 qp->tx_pkts, offset, qp->tx_offset, entry->len, entry->flags,
@@ -1046,7 +1056,7 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
1046 return -EAGAIN; 1056 return -EAGAIN;
1047 } 1057 }
1048 1058
1049 if (entry->len > transport_mtu - sizeof(struct ntb_payload_header)) { 1059 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1050 if (qp->tx_handler) 1060 if (qp->tx_handler)
1051 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); 1061 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1052 1062
@@ -1057,8 +1067,8 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
1057 1067
1058 ntb_tx_copy_task(qp, entry, offset); 1068 ntb_tx_copy_task(qp, entry, offset);
1059 1069
1060 qp->tx_offset += transport_mtu; 1070 qp->tx_offset += qp->tx_max_frame;
1061 if (qp->tx_offset + transport_mtu >= qp->tx_mw_end) 1071 if (qp->tx_offset + qp->tx_max_frame >= qp->tx_mw_end)
1062 qp->tx_offset = qp->tx_mw_begin; 1072 qp->tx_offset = qp->tx_mw_begin;
1063 1073
1064 qp->tx_pkts++; 1074 qp->tx_pkts++;
@@ -1425,9 +1435,8 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1425 * 1435 *
1426 * RETURNS: the max payload size of a qp 1436 * RETURNS: the max payload size of a qp
1427 */ 1437 */
1428unsigned int 1438unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1429ntb_transport_max_size(__attribute__((unused)) struct ntb_transport_qp *qp)
1430{ 1439{
1431 return transport_mtu - sizeof(struct ntb_payload_header); 1440 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1432} 1441}
1433EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1442EXPORT_SYMBOL_GPL(ntb_transport_max_size);