aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorJohn Gregor <john.gregor@qlogic.com>2008-04-17 00:09:31 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:09:31 -0400
commitf7a60d71af49d7d23d8b827833e4258eba00479d (patch)
tree215553e30b15612cddafb213f7667544b0deafb3 /drivers/infiniband
parent2c19643563aed5593c62525e9941a47e56273ccf (diff)
IB/ipath: Add code for IBA7220 send DMA
The IBA7220 HCA has a new feature to DMA data to the on chip send buffers instead of or in addition to the host CPU doing the data transfer. This patch adds code to support the send DMA queue. Signed-off-by: John Gregor <john.gregor@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c743
1 files changed, 743 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
new file mode 100644
index 000000000000..5918cafb880b
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -0,0 +1,743 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34
35#include "ipath_kernel.h"
36#include "ipath_verbs.h"
37#include "ipath_common.h"
38
39#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
40
41static void vl15_watchdog_enq(struct ipath_devdata *dd)
42{
43 /* ipath_sdma_lock must already be held */
44 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
45 unsigned long interval = (HZ + 19) / 20;
46 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
47 add_timer(&dd->ipath_sdma_vl15_timer);
48 }
49}
50
51static void vl15_watchdog_deq(struct ipath_devdata *dd)
52{
53 /* ipath_sdma_lock must already be held */
54 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
55 unsigned long interval = (HZ + 19) / 20;
56 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
57 } else {
58 del_timer(&dd->ipath_sdma_vl15_timer);
59 }
60}
61
62static void vl15_watchdog_timeout(unsigned long opaque)
63{
64 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
65
66 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
67 ipath_dbg("vl15 watchdog timeout - clearing\n");
68 ipath_cancel_sends(dd, 1);
69 ipath_hol_down(dd);
70 } else {
71 ipath_dbg("vl15 watchdog timeout - "
72 "condition already cleared\n");
73 }
74}
75
76static void unmap_desc(struct ipath_devdata *dd, unsigned head)
77{
78 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
79 u64 desc[2];
80 dma_addr_t addr;
81 size_t len;
82
83 desc[0] = le64_to_cpu(descqp[0]);
84 desc[1] = le64_to_cpu(descqp[1]);
85
86 addr = (desc[1] << 32) | (desc[0] >> 32);
87 len = (desc[0] >> 14) & (0x7ffULL << 2);
88 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
89}
90
91/*
92 * ipath_sdma_lock should be locked before calling this.
93 */
94int ipath_sdma_make_progress(struct ipath_devdata *dd)
95{
96 struct list_head *lp = NULL;
97 struct ipath_sdma_txreq *txp = NULL;
98 u16 dmahead;
99 u16 start_idx = 0;
100 int progress = 0;
101
102 if (!list_empty(&dd->ipath_sdma_activelist)) {
103 lp = dd->ipath_sdma_activelist.next;
104 txp = list_entry(lp, struct ipath_sdma_txreq, list);
105 start_idx = txp->start_idx;
106 }
107
108 /*
109 * Read the SDMA head register in order to know that the
110 * interrupt clear has been written to the chip.
111 * Otherwise, we may not get an interrupt for the last
112 * descriptor in the queue.
113 */
114 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
115 /* sanity check return value for error handling (chip reset, etc.) */
116 if (dmahead >= dd->ipath_sdma_descq_cnt)
117 goto done;
118
119 while (dd->ipath_sdma_descq_head != dmahead) {
120 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
121 dd->ipath_sdma_descq_head == start_idx) {
122 unmap_desc(dd, dd->ipath_sdma_descq_head);
123 start_idx++;
124 if (start_idx == dd->ipath_sdma_descq_cnt)
125 start_idx = 0;
126 }
127
128 /* increment free count and head */
129 dd->ipath_sdma_descq_removed++;
130 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
131 dd->ipath_sdma_descq_head = 0;
132
133 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
134 /* move to notify list */
135 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
136 vl15_watchdog_deq(dd);
137 list_move_tail(lp, &dd->ipath_sdma_notifylist);
138 if (!list_empty(&dd->ipath_sdma_activelist)) {
139 lp = dd->ipath_sdma_activelist.next;
140 txp = list_entry(lp, struct ipath_sdma_txreq,
141 list);
142 start_idx = txp->start_idx;
143 } else {
144 lp = NULL;
145 txp = NULL;
146 }
147 }
148 progress = 1;
149 }
150
151 if (progress)
152 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
153
154done:
155 return progress;
156}
157
158static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
159{
160 struct ipath_sdma_txreq *txp, *txp_next;
161
162 list_for_each_entry_safe(txp, txp_next, list, list) {
163 list_del_init(&txp->list);
164
165 if (txp->callback)
166 (*txp->callback)(txp->callback_cookie,
167 txp->callback_status);
168 }
169}
170
171static void sdma_notify_taskbody(struct ipath_devdata *dd)
172{
173 unsigned long flags;
174 struct list_head list;
175
176 INIT_LIST_HEAD(&list);
177
178 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
179
180 list_splice_init(&dd->ipath_sdma_notifylist, &list);
181
182 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
183
184 ipath_sdma_notify(dd, &list);
185
186 /*
187 * The IB verbs layer needs to see the callback before getting
188 * the call to ipath_ib_piobufavail() because the callback
189 * handles releasing resources the next send will need.
190 * Otherwise, we could do these calls in
191 * ipath_sdma_make_progress().
192 */
193 ipath_ib_piobufavail(dd->verbs_dev);
194}
195
196static void sdma_notify_task(unsigned long opaque)
197{
198 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
199
200 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
201 sdma_notify_taskbody(dd);
202}
203
204static void dump_sdma_state(struct ipath_devdata *dd)
205{
206 unsigned long reg;
207
208 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
209 ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
210
211 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
212 ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
213
214 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
215 ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
216
217 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
218 ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
219
220 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
221 ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
222
223 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
224 ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
225
226 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
227 ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
228}
229
230static void sdma_abort_task(unsigned long opaque)
231{
232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
233 int kick = 0;
234 u64 status;
235 unsigned long flags;
236
237 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
238 return;
239
240 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
241
242 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
243
244 /* nothing to do */
245 if (status == IPATH_SDMA_ABORT_NONE)
246 goto unlock;
247
248 /* ipath_sdma_abort() is done, waiting for interrupt */
249 if (status == IPATH_SDMA_ABORT_DISARMED) {
250 if (jiffies < dd->ipath_sdma_abort_intr_timeout)
251 goto resched_noprint;
252 /* give up, intr got lost somewhere */
253 ipath_dbg("give up waiting for SDMADISABLED intr\n");
254 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
255 status = IPATH_SDMA_ABORT_ABORTED;
256 }
257
258 /* everything is stopped, time to clean up and restart */
259 if (status == IPATH_SDMA_ABORT_ABORTED) {
260 struct ipath_sdma_txreq *txp, *txpnext;
261 u64 hwstatus;
262 int notify = 0;
263
264 hwstatus = ipath_read_kreg64(dd,
265 dd->ipath_kregs->kr_senddmastatus);
266
267 if (/* ScoreBoardDrainInProg */
268 test_bit(63, &hwstatus) ||
269 /* AbortInProg */
270 test_bit(62, &hwstatus) ||
271 /* InternalSDmaEnable */
272 test_bit(61, &hwstatus) ||
273 /* ScbEmpty */
274 !test_bit(30, &hwstatus)) {
275 if (dd->ipath_sdma_reset_wait > 0) {
276 /* not done shutting down sdma */
277 --dd->ipath_sdma_reset_wait;
278 goto resched;
279 }
280 ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
281 "status after SDMA reset, continuing\n");
282 dump_sdma_state(dd);
283 }
284
285 /* dequeue all "sent" requests */
286 list_for_each_entry_safe(txp, txpnext,
287 &dd->ipath_sdma_activelist, list) {
288 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
289 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
290 vl15_watchdog_deq(dd);
291 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
292 notify = 1;
293 }
294 if (notify)
295 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
296
297 /* reset our notion of head and tail */
298 dd->ipath_sdma_descq_tail = 0;
299 dd->ipath_sdma_descq_head = 0;
300 dd->ipath_sdma_head_dma[0] = 0;
301 dd->ipath_sdma_generation = 0;
302 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
303
304 /* Reset SendDmaLenGen */
305 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
306 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
307
308 /* done with sdma state for a bit */
309 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
310
311 /* restart sdma engine */
312 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
313 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
314 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
315 dd->ipath_sendctrl);
316 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
317 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
318 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
319 dd->ipath_sendctrl);
320 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
321 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
322 kick = 1;
323 ipath_dbg("sdma restarted from abort\n");
324
325 /* now clear status bits */
326 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
327 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
328 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
329 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
330
331 /* make sure I see next message */
332 dd->ipath_sdma_abort_jiffies = 0;
333
334 goto unlock;
335 }
336
337resched:
338 /*
339 * for now, keep spinning
340 * JAG - this is bad to just have default be a loop without
341 * state change
342 */
343 if (jiffies > dd->ipath_sdma_abort_jiffies) {
344 ipath_dbg("looping with status 0x%016llx\n",
345 dd->ipath_sdma_status);
346 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
347 }
348resched_noprint:
349 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
350 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
351 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
352 return;
353
354unlock:
355 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
356
357 /* kick upper layers */
358 if (kick)
359 ipath_ib_piobufavail(dd->verbs_dev);
360}
361
362/*
363 * This is called from interrupt context.
364 */
365void ipath_sdma_intr(struct ipath_devdata *dd)
366{
367 unsigned long flags;
368
369 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
370
371 (void) ipath_sdma_make_progress(dd);
372
373 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
374}
375
376static int alloc_sdma(struct ipath_devdata *dd)
377{
378 int ret = 0;
379
380 /* Allocate memory for SendDMA descriptor FIFO */
381 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
382 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
383
384 if (!dd->ipath_sdma_descq) {
385 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
386 "FIFO memory\n");
387 ret = -ENOMEM;
388 goto done;
389 }
390
391 dd->ipath_sdma_descq_cnt =
392 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
393
394 /* Allocate memory for DMA of head register to memory */
395 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
396 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
397 if (!dd->ipath_sdma_head_dma) {
398 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
399 ret = -ENOMEM;
400 goto cleanup_descq;
401 }
402 dd->ipath_sdma_head_dma[0] = 0;
403
404 init_timer(&dd->ipath_sdma_vl15_timer);
405 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
406 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
407 atomic_set(&dd->ipath_sdma_vl15_count, 0);
408
409 goto done;
410
411cleanup_descq:
412 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
413 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
414 dd->ipath_sdma_descq = NULL;
415 dd->ipath_sdma_descq_phys = 0;
416done:
417 return ret;
418}
419
420int setup_sdma(struct ipath_devdata *dd)
421{
422 int ret = 0;
423 unsigned i, n;
424 u64 tmp64;
425 u64 senddmabufmask[3] = { 0 };
426 unsigned long flags;
427
428 ret = alloc_sdma(dd);
429 if (ret)
430 goto done;
431
432 if (!dd->ipath_sdma_descq) {
433 ipath_dev_err(dd, "SendDMA memory not allocated\n");
434 goto done;
435 }
436
437 dd->ipath_sdma_status = 0;
438 dd->ipath_sdma_abort_jiffies = 0;
439 dd->ipath_sdma_generation = 0;
440 dd->ipath_sdma_descq_tail = 0;
441 dd->ipath_sdma_descq_head = 0;
442 dd->ipath_sdma_descq_removed = 0;
443 dd->ipath_sdma_descq_added = 0;
444
445 /* Set SendDmaBase */
446 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
447 dd->ipath_sdma_descq_phys);
448 /* Set SendDmaLenGen */
449 tmp64 = dd->ipath_sdma_descq_cnt;
450 tmp64 |= 1<<18; /* enable generation checking */
451 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
452 /* Set SendDmaTail */
453 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
454 dd->ipath_sdma_descq_tail);
455 /* Set SendDmaHeadAddr */
456 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
457 dd->ipath_sdma_head_phys);
458
459 /* Reserve all the former "kernel" piobufs */
460 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
461 for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
462 unsigned word = i / 64;
463 unsigned bit = i & 63;
464 BUG_ON(word >= 3);
465 senddmabufmask[word] |= 1ULL << bit;
466 }
467 ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
468 n - dd->ipath_lastport_piobuf, 0);
469 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
470 senddmabufmask[0]);
471 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
472 senddmabufmask[1]);
473 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
474 senddmabufmask[2]);
475
476 INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
477 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
478
479 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
480 (unsigned long) dd);
481 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
482 (unsigned long) dd);
483
484 /* Turn on SDMA */
485 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
486 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE |
487 INFINIPATH_S_SDMAINTENABLE;
488 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
489 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
490 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
491 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
492
493done:
494 return ret;
495}
496
497void teardown_sdma(struct ipath_devdata *dd)
498{
499 struct ipath_sdma_txreq *txp, *txpnext;
500 unsigned long flags;
501 dma_addr_t sdma_head_phys = 0;
502 dma_addr_t sdma_descq_phys = 0;
503 void *sdma_descq = NULL;
504 void *sdma_head_dma = NULL;
505
506 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
507 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
508 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
509 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
510 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
511
512 tasklet_kill(&dd->ipath_sdma_abort_task);
513 tasklet_kill(&dd->ipath_sdma_notify_task);
514
515 /* turn off sdma */
516 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
517 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
518 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
519 dd->ipath_sendctrl);
520 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
521 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
522
523 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
524 /* dequeue all "sent" requests */
525 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
526 list) {
527 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
528 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
529 vl15_watchdog_deq(dd);
530 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
531 }
532 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
533
534 sdma_notify_taskbody(dd);
535
536 del_timer_sync(&dd->ipath_sdma_vl15_timer);
537
538 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
539
540 dd->ipath_sdma_abort_jiffies = 0;
541
542 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
543 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
544 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
545 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
546 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
547 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
548 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
549
550 if (dd->ipath_sdma_head_dma) {
551 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
552 sdma_head_phys = dd->ipath_sdma_head_phys;
553 dd->ipath_sdma_head_dma = NULL;
554 dd->ipath_sdma_head_phys = 0;
555 }
556
557 if (dd->ipath_sdma_descq) {
558 sdma_descq = dd->ipath_sdma_descq;
559 sdma_descq_phys = dd->ipath_sdma_descq_phys;
560 dd->ipath_sdma_descq = NULL;
561 dd->ipath_sdma_descq_phys = 0;
562 }
563
564 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
565
566 if (sdma_head_dma)
567 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
568 sdma_head_dma, sdma_head_phys);
569
570 if (sdma_descq)
571 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
572 sdma_descq, sdma_descq_phys);
573}
574
575static inline void make_sdma_desc(struct ipath_devdata *dd,
576 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
577{
578 WARN_ON(addr & 3);
579 /* SDmaPhyAddr[47:32] */
580 sdmadesc[1] = addr >> 32;
581 /* SDmaPhyAddr[31:0] */
582 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
583 /* SDmaGeneration[1:0] */
584 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
585 /* SDmaDwordCount[10:0] */
586 sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
587 /* SDmaBufOffset[12:2] */
588 sdmadesc[0] |= dwoffset & 0x7ffULL;
589}
590
591/*
592 * This function queues one IB packet onto the send DMA queue per call.
593 * The caller is responsible for checking:
594 * 1) The number of send DMA descriptor entries is less than the size of
595 * the descriptor queue.
596 * 2) The IB SGE addresses and lengths are 32-bit aligned
597 * (except possibly the last SGE's length)
598 * 3) The SGE addresses are suitable for passing to dma_map_single().
599 */
600int ipath_sdma_verbs_send(struct ipath_devdata *dd,
601 struct ipath_sge_state *ss, u32 dwords,
602 struct ipath_verbs_txreq *tx)
603{
604
605 unsigned long flags;
606 struct ipath_sge *sge;
607 int ret = 0;
608 u16 tail;
609 __le64 *descqp;
610 u64 sdmadesc[2];
611 u32 dwoffset;
612 dma_addr_t addr;
613
614 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
615 ipath_dbg("packet size %X > ibmax %X, fail\n",
616 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
617 ret = -EMSGSIZE;
618 goto fail;
619 }
620
621 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
622
623retry:
624 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
625 ret = -EBUSY;
626 goto unlock;
627 }
628
629 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
630 if (ipath_sdma_make_progress(dd))
631 goto retry;
632 ret = -ENOBUFS;
633 goto unlock;
634 }
635
636 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
637 tx->map_len, DMA_TO_DEVICE);
638 if (dma_mapping_error(addr)) {
639 ret = -EIO;
640 goto unlock;
641 }
642
643 dwoffset = tx->map_len >> 2;
644 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
645
646 /* SDmaFirstDesc */
647 sdmadesc[0] |= 1ULL << 12;
648 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
649 sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
650
651 /* write to the descq */
652 tail = dd->ipath_sdma_descq_tail;
653 descqp = &dd->ipath_sdma_descq[tail].qw[0];
654 *descqp++ = cpu_to_le64(sdmadesc[0]);
655 *descqp++ = cpu_to_le64(sdmadesc[1]);
656
657 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
658 tx->txreq.start_idx = tail;
659
660 /* increment the tail */
661 if (++tail == dd->ipath_sdma_descq_cnt) {
662 tail = 0;
663 descqp = &dd->ipath_sdma_descq[0].qw[0];
664 ++dd->ipath_sdma_generation;
665 }
666
667 sge = &ss->sge;
668 while (dwords) {
669 u32 dw;
670 u32 len;
671
672 len = dwords << 2;
673 if (len > sge->length)
674 len = sge->length;
675 if (len > sge->sge_length)
676 len = sge->sge_length;
677 BUG_ON(len == 0);
678 dw = (len + 3) >> 2;
679 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
680 DMA_TO_DEVICE);
681 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
682 /* SDmaUseLargeBuf has to be set in every descriptor */
683 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
684 sdmadesc[0] |= 1ULL << 14;
685 /* write to the descq */
686 *descqp++ = cpu_to_le64(sdmadesc[0]);
687 *descqp++ = cpu_to_le64(sdmadesc[1]);
688
689 /* increment the tail */
690 if (++tail == dd->ipath_sdma_descq_cnt) {
691 tail = 0;
692 descqp = &dd->ipath_sdma_descq[0].qw[0];
693 ++dd->ipath_sdma_generation;
694 }
695 sge->vaddr += len;
696 sge->length -= len;
697 sge->sge_length -= len;
698 if (sge->sge_length == 0) {
699 if (--ss->num_sge)
700 *sge = *ss->sg_list++;
701 } else if (sge->length == 0 && sge->mr != NULL) {
702 if (++sge->n >= IPATH_SEGSZ) {
703 if (++sge->m >= sge->mr->mapsz)
704 break;
705 sge->n = 0;
706 }
707 sge->vaddr =
708 sge->mr->map[sge->m]->segs[sge->n].vaddr;
709 sge->length =
710 sge->mr->map[sge->m]->segs[sge->n].length;
711 }
712
713 dwoffset += dw;
714 dwords -= dw;
715 }
716
717 if (!tail)
718 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
719 descqp -= 2;
720 /* SDmaLastDesc */
721 descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
722 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
723 /* SDmaIntReq */
724 descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
725 }
726
727 /* Commit writes to memory and advance the tail on the chip */
728 wmb();
729 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
730
731 tx->txreq.next_descq_idx = tail;
732 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
733 dd->ipath_sdma_descq_tail = tail;
734 dd->ipath_sdma_descq_added += tx->txreq.sg_count;
735 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
736 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
737 vl15_watchdog_enq(dd);
738
739unlock:
740 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
741fail:
742 return ret;
743}