diff options
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
| -rw-r--r-- | drivers/ntb/ntb_transport.c | 201 |
1 files changed, 124 insertions, 77 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index efe3ad4122f2..1c6386d5f79c 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
| @@ -142,10 +142,11 @@ struct ntb_transport_qp { | |||
| 142 | 142 | ||
| 143 | void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, | 143 | void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, |
| 144 | void *data, int len); | 144 | void *data, int len); |
| 145 | struct list_head rx_post_q; | ||
| 145 | struct list_head rx_pend_q; | 146 | struct list_head rx_pend_q; |
| 146 | struct list_head rx_free_q; | 147 | struct list_head rx_free_q; |
| 147 | spinlock_t ntb_rx_pend_q_lock; | 148 | /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ |
| 148 | spinlock_t ntb_rx_free_q_lock; | 149 | spinlock_t ntb_rx_q_lock; |
| 149 | void *rx_buff; | 150 | void *rx_buff; |
| 150 | unsigned int rx_index; | 151 | unsigned int rx_index; |
| 151 | unsigned int rx_max_entry; | 152 | unsigned int rx_max_entry; |
| @@ -211,6 +212,8 @@ struct ntb_transport_ctx { | |||
| 211 | bool link_is_up; | 212 | bool link_is_up; |
| 212 | struct delayed_work link_work; | 213 | struct delayed_work link_work; |
| 213 | struct work_struct link_cleanup; | 214 | struct work_struct link_cleanup; |
| 215 | |||
| 216 | struct dentry *debugfs_node_dir; | ||
| 214 | }; | 217 | }; |
| 215 | 218 | ||
| 216 | enum { | 219 | enum { |
| @@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, | |||
| 436 | char *buf; | 439 | char *buf; |
| 437 | ssize_t ret, out_offset, out_count; | 440 | ssize_t ret, out_offset, out_count; |
| 438 | 441 | ||
| 442 | qp = filp->private_data; | ||
| 443 | |||
| 444 | if (!qp || !qp->link_is_up) | ||
| 445 | return 0; | ||
| 446 | |||
| 439 | out_count = 1000; | 447 | out_count = 1000; |
| 440 | 448 | ||
| 441 | buf = kmalloc(out_count, GFP_KERNEL); | 449 | buf = kmalloc(out_count, GFP_KERNEL); |
| 442 | if (!buf) | 450 | if (!buf) |
| 443 | return -ENOMEM; | 451 | return -ENOMEM; |
| 444 | 452 | ||
| 445 | qp = filp->private_data; | ||
| 446 | out_offset = 0; | 453 | out_offset = 0; |
| 447 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 454 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
| 448 | "NTB QP stats\n"); | 455 | "NTB QP stats\n"); |
| @@ -534,6 +541,27 @@ out: | |||
| 534 | return entry; | 541 | return entry; |
| 535 | } | 542 | } |
| 536 | 543 | ||
| 544 | static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, | ||
| 545 | struct list_head *list, | ||
| 546 | struct list_head *to_list) | ||
| 547 | { | ||
| 548 | struct ntb_queue_entry *entry; | ||
| 549 | unsigned long flags; | ||
| 550 | |||
| 551 | spin_lock_irqsave(lock, flags); | ||
| 552 | |||
| 553 | if (list_empty(list)) { | ||
| 554 | entry = NULL; | ||
| 555 | } else { | ||
| 556 | entry = list_first_entry(list, struct ntb_queue_entry, entry); | ||
| 557 | list_move_tail(&entry->entry, to_list); | ||
| 558 | } | ||
| 559 | |||
| 560 | spin_unlock_irqrestore(lock, flags); | ||
| 561 | |||
| 562 | return entry; | ||
| 563 | } | ||
| 564 | |||
| 537 | static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, | 565 | static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, |
| 538 | unsigned int qp_num) | 566 | unsigned int qp_num) |
| 539 | { | 567 | { |
| @@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) | |||
| 601 | } | 629 | } |
| 602 | 630 | ||
| 603 | static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, | 631 | static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, |
| 604 | unsigned int size) | 632 | resource_size_t size) |
| 605 | { | 633 | { |
| 606 | struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; | 634 | struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; |
| 607 | struct pci_dev *pdev = nt->ndev->pdev; | 635 | struct pci_dev *pdev = nt->ndev->pdev; |
| 608 | unsigned int xlat_size, buff_size; | 636 | size_t xlat_size, buff_size; |
| 609 | int rc; | 637 | int rc; |
| 610 | 638 | ||
| 639 | if (!size) | ||
| 640 | return -EINVAL; | ||
| 641 | |||
| 611 | xlat_size = round_up(size, mw->xlat_align_size); | 642 | xlat_size = round_up(size, mw->xlat_align_size); |
| 612 | buff_size = round_up(size, mw->xlat_align); | 643 | buff_size = round_up(size, mw->xlat_align); |
| 613 | 644 | ||
| @@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, | |||
| 627 | if (!mw->virt_addr) { | 658 | if (!mw->virt_addr) { |
| 628 | mw->xlat_size = 0; | 659 | mw->xlat_size = 0; |
| 629 | mw->buff_size = 0; | 660 | mw->buff_size = 0; |
| 630 | dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", | 661 | dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", |
| 631 | buff_size); | 662 | buff_size); |
| 632 | return -ENOMEM; | 663 | return -ENOMEM; |
| 633 | } | 664 | } |
| @@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work) | |||
| 867 | 898 | ||
| 868 | if (qp->event_handler) | 899 | if (qp->event_handler) |
| 869 | qp->event_handler(qp->cb_data, qp->link_is_up); | 900 | qp->event_handler(qp->cb_data, qp->link_is_up); |
| 901 | |||
| 902 | tasklet_schedule(&qp->rxc_db_work); | ||
| 870 | } else if (nt->link_is_up) | 903 | } else if (nt->link_is_up) |
| 871 | schedule_delayed_work(&qp->link_work, | 904 | schedule_delayed_work(&qp->link_work, |
| 872 | msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); | 905 | msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); |
| @@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, | |||
| 923 | qp->tx_max_frame = min(transport_mtu, tx_size / 2); | 956 | qp->tx_max_frame = min(transport_mtu, tx_size / 2); |
| 924 | qp->tx_max_entry = tx_size / qp->tx_max_frame; | 957 | qp->tx_max_entry = tx_size / qp->tx_max_frame; |
| 925 | 958 | ||
| 926 | if (nt_debugfs_dir) { | 959 | if (nt->debugfs_node_dir) { |
| 927 | char debugfs_name[4]; | 960 | char debugfs_name[4]; |
| 928 | 961 | ||
| 929 | snprintf(debugfs_name, 4, "qp%d", qp_num); | 962 | snprintf(debugfs_name, 4, "qp%d", qp_num); |
| 930 | qp->debugfs_dir = debugfs_create_dir(debugfs_name, | 963 | qp->debugfs_dir = debugfs_create_dir(debugfs_name, |
| 931 | nt_debugfs_dir); | 964 | nt->debugfs_node_dir); |
| 932 | 965 | ||
| 933 | qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, | 966 | qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, |
| 934 | qp->debugfs_dir, qp, | 967 | qp->debugfs_dir, qp, |
| @@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, | |||
| 941 | INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); | 974 | INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); |
| 942 | INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); | 975 | INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); |
| 943 | 976 | ||
| 944 | spin_lock_init(&qp->ntb_rx_pend_q_lock); | 977 | spin_lock_init(&qp->ntb_rx_q_lock); |
| 945 | spin_lock_init(&qp->ntb_rx_free_q_lock); | ||
| 946 | spin_lock_init(&qp->ntb_tx_free_q_lock); | 978 | spin_lock_init(&qp->ntb_tx_free_q_lock); |
| 947 | 979 | ||
| 980 | INIT_LIST_HEAD(&qp->rx_post_q); | ||
| 948 | INIT_LIST_HEAD(&qp->rx_pend_q); | 981 | INIT_LIST_HEAD(&qp->rx_pend_q); |
| 949 | INIT_LIST_HEAD(&qp->rx_free_q); | 982 | INIT_LIST_HEAD(&qp->rx_free_q); |
| 950 | INIT_LIST_HEAD(&qp->tx_free_q); | 983 | INIT_LIST_HEAD(&qp->tx_free_q); |
| @@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) | |||
| 1031 | goto err2; | 1064 | goto err2; |
| 1032 | } | 1065 | } |
| 1033 | 1066 | ||
| 1067 | if (nt_debugfs_dir) { | ||
| 1068 | nt->debugfs_node_dir = | ||
| 1069 | debugfs_create_dir(pci_name(ndev->pdev), | ||
| 1070 | nt_debugfs_dir); | ||
| 1071 | } | ||
| 1072 | |||
| 1034 | for (i = 0; i < qp_count; i++) { | 1073 | for (i = 0; i < qp_count; i++) { |
| 1035 | rc = ntb_transport_init_queue(nt, i); | 1074 | rc = ntb_transport_init_queue(nt, i); |
| 1036 | if (rc) | 1075 | if (rc) |
| @@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) | |||
| 1107 | kfree(nt); | 1146 | kfree(nt); |
| 1108 | } | 1147 | } |
| 1109 | 1148 | ||
| 1110 | static void ntb_rx_copy_callback(void *data) | 1149 | static void ntb_complete_rxc(struct ntb_transport_qp *qp) |
| 1111 | { | 1150 | { |
| 1112 | struct ntb_queue_entry *entry = data; | 1151 | struct ntb_queue_entry *entry; |
| 1113 | struct ntb_transport_qp *qp = entry->qp; | 1152 | void *cb_data; |
| 1114 | void *cb_data = entry->cb_data; | 1153 | unsigned int len; |
| 1115 | unsigned int len = entry->len; | 1154 | unsigned long irqflags; |
| 1116 | struct ntb_payload_header *hdr = entry->rx_hdr; | 1155 | |
| 1156 | spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); | ||
| 1157 | |||
| 1158 | while (!list_empty(&qp->rx_post_q)) { | ||
| 1159 | entry = list_first_entry(&qp->rx_post_q, | ||
| 1160 | struct ntb_queue_entry, entry); | ||
| 1161 | if (!(entry->flags & DESC_DONE_FLAG)) | ||
| 1162 | break; | ||
| 1163 | |||
| 1164 | entry->rx_hdr->flags = 0; | ||
| 1165 | iowrite32(entry->index, &qp->rx_info->entry); | ||
| 1117 | 1166 | ||
| 1118 | hdr->flags = 0; | 1167 | cb_data = entry->cb_data; |
| 1168 | len = entry->len; | ||
| 1119 | 1169 | ||
| 1120 | iowrite32(entry->index, &qp->rx_info->entry); | 1170 | list_move_tail(&entry->entry, &qp->rx_free_q); |
| 1121 | 1171 | ||
| 1122 | ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); | 1172 | spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); |
| 1123 | 1173 | ||
| 1124 | if (qp->rx_handler && qp->client_ready) | 1174 | if (qp->rx_handler && qp->client_ready) |
| 1125 | qp->rx_handler(qp, qp->cb_data, cb_data, len); | 1175 | qp->rx_handler(qp, qp->cb_data, cb_data, len); |
| 1176 | |||
| 1177 | spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | static void ntb_rx_copy_callback(void *data) | ||
| 1184 | { | ||
| 1185 | struct ntb_queue_entry *entry = data; | ||
| 1186 | |||
| 1187 | entry->flags |= DESC_DONE_FLAG; | ||
| 1188 | |||
| 1189 | ntb_complete_rxc(entry->qp); | ||
| 1126 | } | 1190 | } |
| 1127 | 1191 | ||
| 1128 | static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) | 1192 | static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) |
| @@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) | |||
| 1138 | ntb_rx_copy_callback(entry); | 1202 | ntb_rx_copy_callback(entry); |
| 1139 | } | 1203 | } |
| 1140 | 1204 | ||
| 1141 | static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | 1205 | static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) |
| 1142 | size_t len) | ||
| 1143 | { | 1206 | { |
| 1144 | struct dma_async_tx_descriptor *txd; | 1207 | struct dma_async_tx_descriptor *txd; |
| 1145 | struct ntb_transport_qp *qp = entry->qp; | 1208 | struct ntb_transport_qp *qp = entry->qp; |
| 1146 | struct dma_chan *chan = qp->dma_chan; | 1209 | struct dma_chan *chan = qp->dma_chan; |
| 1147 | struct dma_device *device; | 1210 | struct dma_device *device; |
| 1148 | size_t pay_off, buff_off; | 1211 | size_t pay_off, buff_off, len; |
| 1149 | struct dmaengine_unmap_data *unmap; | 1212 | struct dmaengine_unmap_data *unmap; |
| 1150 | dma_cookie_t cookie; | 1213 | dma_cookie_t cookie; |
| 1151 | void *buf = entry->buf; | 1214 | void *buf = entry->buf; |
| 1152 | 1215 | ||
| 1153 | entry->len = len; | 1216 | len = entry->len; |
| 1154 | 1217 | ||
| 1155 | if (!chan) | 1218 | if (!chan) |
| 1156 | goto err; | 1219 | goto err; |
| @@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) | |||
| 1226 | struct ntb_payload_header *hdr; | 1289 | struct ntb_payload_header *hdr; |
| 1227 | struct ntb_queue_entry *entry; | 1290 | struct ntb_queue_entry *entry; |
| 1228 | void *offset; | 1291 | void *offset; |
| 1229 | int rc; | ||
| 1230 | 1292 | ||
| 1231 | offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; | 1293 | offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; |
| 1232 | hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); | 1294 | hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); |
| @@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) | |||
| 1255 | return -EIO; | 1317 | return -EIO; |
| 1256 | } | 1318 | } |
| 1257 | 1319 | ||
| 1258 | entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); | 1320 | entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); |
| 1259 | if (!entry) { | 1321 | if (!entry) { |
| 1260 | dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); | 1322 | dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); |
| 1261 | qp->rx_err_no_buf++; | 1323 | qp->rx_err_no_buf++; |
| 1262 | 1324 | return -EAGAIN; | |
| 1263 | rc = -ENOMEM; | ||
| 1264 | goto err; | ||
| 1265 | } | 1325 | } |
| 1266 | 1326 | ||
| 1327 | entry->rx_hdr = hdr; | ||
| 1328 | entry->index = qp->rx_index; | ||
| 1329 | |||
| 1267 | if (hdr->len > entry->len) { | 1330 | if (hdr->len > entry->len) { |
| 1268 | dev_dbg(&qp->ndev->pdev->dev, | 1331 | dev_dbg(&qp->ndev->pdev->dev, |
| 1269 | "receive buffer overflow! Wanted %d got %d\n", | 1332 | "receive buffer overflow! Wanted %d got %d\n", |
| 1270 | hdr->len, entry->len); | 1333 | hdr->len, entry->len); |
| 1271 | qp->rx_err_oflow++; | 1334 | qp->rx_err_oflow++; |
| 1272 | 1335 | ||
| 1273 | rc = -EIO; | 1336 | entry->len = -EIO; |
| 1274 | goto err; | 1337 | entry->flags |= DESC_DONE_FLAG; |
| 1275 | } | ||
| 1276 | 1338 | ||
| 1277 | dev_dbg(&qp->ndev->pdev->dev, | 1339 | ntb_complete_rxc(qp); |
| 1278 | "RX OK index %u ver %u size %d into buf size %d\n", | 1340 | } else { |
| 1279 | qp->rx_index, hdr->ver, hdr->len, entry->len); | 1341 | dev_dbg(&qp->ndev->pdev->dev, |
| 1342 | "RX OK index %u ver %u size %d into buf size %d\n", | ||
| 1343 | qp->rx_index, hdr->ver, hdr->len, entry->len); | ||
| 1280 | 1344 | ||
| 1281 | qp->rx_bytes += hdr->len; | 1345 | qp->rx_bytes += hdr->len; |
| 1282 | qp->rx_pkts++; | 1346 | qp->rx_pkts++; |
| 1283 | 1347 | ||
| 1284 | entry->index = qp->rx_index; | 1348 | entry->len = hdr->len; |
| 1285 | entry->rx_hdr = hdr; | ||
| 1286 | 1349 | ||
| 1287 | ntb_async_rx(entry, offset, hdr->len); | 1350 | ntb_async_rx(entry, offset); |
| 1351 | } | ||
| 1288 | 1352 | ||
| 1289 | qp->rx_index++; | 1353 | qp->rx_index++; |
| 1290 | qp->rx_index %= qp->rx_max_entry; | 1354 | qp->rx_index %= qp->rx_max_entry; |
| 1291 | 1355 | ||
| 1292 | return 0; | 1356 | return 0; |
| 1293 | |||
| 1294 | err: | ||
| 1295 | /* FIXME: if this syncrhonous update of the rx_index gets ahead of | ||
| 1296 | * asyncrhonous ntb_rx_copy_callback of previous entry, there are three | ||
| 1297 | * scenarios: | ||
| 1298 | * | ||
| 1299 | * 1) The peer might miss this update, but observe the update | ||
| 1300 | * from the memcpy completion callback. In this case, the buffer will | ||
| 1301 | * not be freed on the peer to be reused for a different packet. The | ||
| 1302 | * successful rx of a later packet would clear the condition, but the | ||
| 1303 | * condition could persist if several rx fail in a row. | ||
| 1304 | * | ||
| 1305 | * 2) The peer may observe this update before the asyncrhonous copy of | ||
| 1306 | * prior packets is completed. The peer may overwrite the buffers of | ||
| 1307 | * the prior packets before they are copied. | ||
| 1308 | * | ||
| 1309 | * 3) Both: the peer may observe the update, and then observe the index | ||
| 1310 | * decrement by the asynchronous completion callback. Who knows what | ||
| 1311 | * badness that will cause. | ||
| 1312 | */ | ||
| 1313 | hdr->flags = 0; | ||
| 1314 | iowrite32(qp->rx_index, &qp->rx_info->entry); | ||
| 1315 | |||
| 1316 | return rc; | ||
| 1317 | } | 1357 | } |
| 1318 | 1358 | ||
| 1319 | static void ntb_transport_rxc_db(unsigned long data) | 1359 | static void ntb_transport_rxc_db(unsigned long data) |
| @@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data) | |||
| 1333 | break; | 1373 | break; |
| 1334 | } | 1374 | } |
| 1335 | 1375 | ||
| 1336 | if (qp->dma_chan) | 1376 | if (i && qp->dma_chan) |
| 1337 | dma_async_issue_pending(qp->dma_chan); | 1377 | dma_async_issue_pending(qp->dma_chan); |
| 1338 | 1378 | ||
| 1339 | if (i == qp->rx_max_entry) { | 1379 | if (i == qp->rx_max_entry) { |
| @@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, | |||
| 1609 | goto err1; | 1649 | goto err1; |
| 1610 | 1650 | ||
| 1611 | entry->qp = qp; | 1651 | entry->qp = qp; |
| 1612 | ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, | 1652 | ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, |
| 1613 | &qp->rx_free_q); | 1653 | &qp->rx_free_q); |
| 1614 | } | 1654 | } |
| 1615 | 1655 | ||
| @@ -1634,7 +1674,7 @@ err2: | |||
| 1634 | while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) | 1674 | while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) |
| 1635 | kfree(entry); | 1675 | kfree(entry); |
| 1636 | err1: | 1676 | err1: |
| 1637 | while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) | 1677 | while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) |
| 1638 | kfree(entry); | 1678 | kfree(entry); |
| 1639 | if (qp->dma_chan) | 1679 | if (qp->dma_chan) |
| 1640 | dma_release_channel(qp->dma_chan); | 1680 | dma_release_channel(qp->dma_chan); |
| @@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); | |||
| 1652 | */ | 1692 | */ |
| 1653 | void ntb_transport_free_queue(struct ntb_transport_qp *qp) | 1693 | void ntb_transport_free_queue(struct ntb_transport_qp *qp) |
| 1654 | { | 1694 | { |
| 1655 | struct ntb_transport_ctx *nt = qp->transport; | ||
| 1656 | struct pci_dev *pdev; | 1695 | struct pci_dev *pdev; |
| 1657 | struct ntb_queue_entry *entry; | 1696 | struct ntb_queue_entry *entry; |
| 1658 | u64 qp_bit; | 1697 | u64 qp_bit; |
| @@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) | |||
| 1689 | qp->tx_handler = NULL; | 1728 | qp->tx_handler = NULL; |
| 1690 | qp->event_handler = NULL; | 1729 | qp->event_handler = NULL; |
| 1691 | 1730 | ||
| 1692 | while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) | 1731 | while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) |
| 1693 | kfree(entry); | 1732 | kfree(entry); |
| 1694 | 1733 | ||
| 1695 | while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { | 1734 | while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { |
| 1696 | dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); | 1735 | dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); |
| 1736 | kfree(entry); | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { | ||
| 1740 | dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); | ||
| 1697 | kfree(entry); | 1741 | kfree(entry); |
| 1698 | } | 1742 | } |
| 1699 | 1743 | ||
| 1700 | while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) | 1744 | while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) |
| 1701 | kfree(entry); | 1745 | kfree(entry); |
| 1702 | 1746 | ||
| 1703 | nt->qp_bitmap_free |= qp_bit; | 1747 | qp->transport->qp_bitmap_free |= qp_bit; |
| 1704 | 1748 | ||
| 1705 | dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); | 1749 | dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); |
| 1706 | } | 1750 | } |
| @@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) | |||
| 1724 | if (!qp || qp->client_ready) | 1768 | if (!qp || qp->client_ready) |
| 1725 | return NULL; | 1769 | return NULL; |
| 1726 | 1770 | ||
| 1727 | entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); | 1771 | entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); |
| 1728 | if (!entry) | 1772 | if (!entry) |
| 1729 | return NULL; | 1773 | return NULL; |
| 1730 | 1774 | ||
| 1731 | buf = entry->cb_data; | 1775 | buf = entry->cb_data; |
| 1732 | *len = entry->len; | 1776 | *len = entry->len; |
| 1733 | 1777 | ||
| 1734 | ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); | 1778 | ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); |
| 1735 | 1779 | ||
| 1736 | return buf; | 1780 | return buf; |
| 1737 | } | 1781 | } |
| @@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, | |||
| 1757 | if (!qp) | 1801 | if (!qp) |
| 1758 | return -EINVAL; | 1802 | return -EINVAL; |
| 1759 | 1803 | ||
| 1760 | entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); | 1804 | entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); |
| 1761 | if (!entry) | 1805 | if (!entry) |
| 1762 | return -ENOMEM; | 1806 | return -ENOMEM; |
| 1763 | 1807 | ||
| 1764 | entry->cb_data = cb; | 1808 | entry->cb_data = cb; |
| 1765 | entry->buf = data; | 1809 | entry->buf = data; |
| 1766 | entry->len = len; | 1810 | entry->len = len; |
| 1811 | entry->flags = 0; | ||
| 1812 | |||
| 1813 | ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); | ||
| 1767 | 1814 | ||
| 1768 | ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); | 1815 | tasklet_schedule(&qp->rxc_db_work); |
| 1769 | 1816 | ||
| 1770 | return 0; | 1817 | return 0; |
| 1771 | } | 1818 | } |
