aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c136
1 files changed, 123 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 165aee2ca8a0..d2806cae234c 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -52,6 +52,17 @@
52/* attempt to drain the queue for 5secs */ 52/* attempt to drain the queue for 5secs */
53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
54 54
55/*
56 * track how many times a process open this driver.
57 */
58static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59
60struct qib_user_sdma_rb_node {
61 struct rb_node node;
62 int refcount;
63 pid_t pid;
64};
65
55struct qib_user_sdma_pkt { 66struct qib_user_sdma_pkt {
56 struct list_head list; /* list element */ 67 struct list_head list; /* list element */
57 68
@@ -120,15 +131,60 @@ struct qib_user_sdma_queue {
120 /* dma page table */ 131 /* dma page table */
121 struct rb_root dma_pages_root; 132 struct rb_root dma_pages_root;
122 133
134 struct qib_user_sdma_rb_node *sdma_rb_node;
135
123 /* protect everything above... */ 136 /* protect everything above... */
124 struct mutex lock; 137 struct mutex lock;
125}; 138};
126 139
140static struct qib_user_sdma_rb_node *
141qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142{
143 struct qib_user_sdma_rb_node *sdma_rb_node;
144 struct rb_node *node = root->rb_node;
145
146 while (node) {
147 sdma_rb_node = container_of(node,
148 struct qib_user_sdma_rb_node, node);
149 if (pid < sdma_rb_node->pid)
150 node = node->rb_left;
151 else if (pid > sdma_rb_node->pid)
152 node = node->rb_right;
153 else
154 return sdma_rb_node;
155 }
156 return NULL;
157}
158
159static int
160qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161{
162 struct rb_node **node = &(root->rb_node);
163 struct rb_node *parent = NULL;
164 struct qib_user_sdma_rb_node *got;
165
166 while (*node) {
167 got = container_of(*node, struct qib_user_sdma_rb_node, node);
168 parent = *node;
169 if (new->pid < got->pid)
170 node = &((*node)->rb_left);
171 else if (new->pid > got->pid)
172 node = &((*node)->rb_right);
173 else
174 return 0;
175 }
176
177 rb_link_node(&new->node, parent, node);
178 rb_insert_color(&new->node, root);
179 return 1;
180}
181
127struct qib_user_sdma_queue * 182struct qib_user_sdma_queue *
128qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) 183qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
129{ 184{
130 struct qib_user_sdma_queue *pq = 185 struct qib_user_sdma_queue *pq =
131 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); 186 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187 struct qib_user_sdma_rb_node *sdma_rb_node;
132 188
133 if (!pq) 189 if (!pq)
134 goto done; 190 goto done;
@@ -138,6 +194,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
138 pq->num_pending = 0; 194 pq->num_pending = 0;
139 pq->num_sending = 0; 195 pq->num_sending = 0;
140 pq->added = 0; 196 pq->added = 0;
197 pq->sdma_rb_node = NULL;
141 198
142 INIT_LIST_HEAD(&pq->sent); 199 INIT_LIST_HEAD(&pq->sent);
143 spin_lock_init(&pq->sent_lock); 200 spin_lock_init(&pq->sent_lock);
@@ -163,8 +220,30 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
163 220
164 pq->dma_pages_root = RB_ROOT; 221 pq->dma_pages_root = RB_ROOT;
165 222
223 sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224 current->pid);
225 if (sdma_rb_node) {
226 sdma_rb_node->refcount++;
227 } else {
228 int ret;
229 sdma_rb_node = kmalloc(sizeof(
230 struct qib_user_sdma_rb_node), GFP_KERNEL);
231 if (!sdma_rb_node)
232 goto err_rb;
233
234 sdma_rb_node->refcount = 1;
235 sdma_rb_node->pid = current->pid;
236
237 ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
238 sdma_rb_node);
239 BUG_ON(ret == 0);
240 }
241 pq->sdma_rb_node = sdma_rb_node;
242
166 goto done; 243 goto done;
167 244
245err_rb:
246 dma_pool_destroy(pq->header_cache);
168err_slab: 247err_slab:
169 kmem_cache_destroy(pq->pkt_slab); 248 kmem_cache_destroy(pq->pkt_slab);
170err_kfree: 249err_kfree:
@@ -1020,8 +1099,13 @@ void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1020 if (!pq) 1099 if (!pq)
1021 return; 1100 return;
1022 1101
1023 kmem_cache_destroy(pq->pkt_slab); 1102 pq->sdma_rb_node->refcount--;
1103 if (pq->sdma_rb_node->refcount == 0) {
1104 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1105 kfree(pq->sdma_rb_node);
1106 }
1024 dma_pool_destroy(pq->header_cache); 1107 dma_pool_destroy(pq->header_cache);
1108 kmem_cache_destroy(pq->pkt_slab);
1025 kfree(pq); 1109 kfree(pq);
1026} 1110}
1027 1111
@@ -1241,26 +1325,52 @@ static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1241 struct qib_user_sdma_queue *pq, 1325 struct qib_user_sdma_queue *pq,
1242 struct list_head *pktlist, int count) 1326 struct list_head *pktlist, int count)
1243{ 1327{
1244 int ret = 0;
1245 unsigned long flags; 1328 unsigned long flags;
1246 1329
1247 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) 1330 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1248 return -ECOMM; 1331 return -ECOMM;
1249 1332
1250 spin_lock_irqsave(&ppd->sdma_lock, flags); 1333 /* non-blocking mode */
1251 1334 if (pq->sdma_rb_node->refcount > 1) {
1252 if (unlikely(!__qib_sdma_running(ppd))) { 1335 spin_lock_irqsave(&ppd->sdma_lock, flags);
1253 ret = -ECOMM; 1336 if (unlikely(!__qib_sdma_running(ppd))) {
1254 goto unlock; 1337 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1338 return -ECOMM;
1339 }
1340 pq->num_pending += count;
1341 list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1342 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1343 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1344 return 0;
1255 } 1345 }
1256 1346
1347 /* In this case, descriptors from this process are not
1348 * linked to ppd pending queue, interrupt handler
1349 * won't update this process, it is OK to directly
1350 * modify without sdma lock.
1351 */
1352
1353
1257 pq->num_pending += count; 1354 pq->num_pending += count;
1258 list_splice_tail_init(pktlist, &ppd->sdma_userpending); 1355 /*
1259 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); 1356 * Blocking mode for single rail process, we must
1357 * release/regain sdma_lock to give other process
1358 * chance to make progress. This is important for
1359 * performance.
1360 */
1361 do {
1362 spin_lock_irqsave(&ppd->sdma_lock, flags);
1363 if (unlikely(!__qib_sdma_running(ppd))) {
1364 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1365 return -ECOMM;
1366 }
1367 qib_user_sdma_send_desc(ppd, pktlist);
1368 if (!list_empty(pktlist))
1369 qib_sdma_make_progress(ppd);
1370 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1371 } while (!list_empty(pktlist));
1260 1372
1261unlock: 1373 return 0;
1262 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1263 return ret;
1264} 1374}
1265 1375
1266int qib_user_sdma_writev(struct qib_ctxtdata *rcd, 1376int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
@@ -1290,7 +1400,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1290 qib_user_sdma_queue_clean(ppd, pq); 1400 qib_user_sdma_queue_clean(ppd, pq);
1291 1401
1292 while (dim) { 1402 while (dim) {
1293 int mxp = 8; 1403 int mxp = 1;
1294 int ndesc = 0; 1404 int ndesc = 0;
1295 1405
1296 ret = qib_user_sdma_queue_pkts(dd, ppd, pq, 1406 ret = qib_user_sdma_queue_pkts(dd, ppd, pq,