aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Leech <christopher.leech@intel.com>2006-05-23 20:35:34 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:18:46 -0400
commit0bbd5f4e97ff9c057b385a1886b4aed1fb0300f1 (patch)
tree0c3d8528c31e8291fb78c2e7a287910987ed2888 /drivers
parentc13c8260da3155f2cefb63b0d1b7dcdcb405c644 (diff)
[I/OAT]: Driver for the Intel(R) I/OAT DMA engine
Adds a new ioatdma driver Signed-off-by: Chris Leech <christopher.leech@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/ioatdma.c839
-rw-r--r--drivers/dma/ioatdma.h126
-rw-r--r--drivers/dma/ioatdma_hw.h52
-rw-r--r--drivers/dma/ioatdma_io.h118
-rw-r--r--drivers/dma/ioatdma_registers.h126
7 files changed, 1271 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f9ac4bcf8652..0f15e769c6bc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -10,4 +10,13 @@ config DMA_ENGINE
10 DMA engines offload copy operations from the CPU to dedicated 10 DMA engines offload copy operations from the CPU to dedicated
11 hardware, allowing the copies to happen asynchronously. 11 hardware, allowing the copies to happen asynchronously.
12 12
13comment "DMA Devices"
14
15config INTEL_IOATDMA
16 tristate "Intel I/OAT DMA support"
17 depends on DMA_ENGINE && PCI
18 default m
19 ---help---
20 Enable support for the Intel(R) I/OAT DMA engine.
21
13endmenu 22endmenu
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 10b739138c93..c8a5f5677313 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1 +1,2 @@
1obj-y += dmaengine.o 1obj-y += dmaengine.o
2obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
new file mode 100644
index 000000000000..11d48b97b36e
--- /dev/null
+++ b/drivers/dma/ioatdma.c
@@ -0,0 +1,839 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 * copy operations.
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/interrupt.h>
31#include <linux/dmaengine.h>
32#include <linux/delay.h>
33#include "ioatdma.h"
34#include "ioatdma_io.h"
35#include "ioatdma_registers.h"
36#include "ioatdma_hw.h"
37
38#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
39#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
40#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
41
42/* internal functions */
43static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
44static void __devexit ioat_remove(struct pci_dev *pdev);
45
46static int enumerate_dma_channels(struct ioat_device *device)
47{
48 u8 xfercap_scale;
49 u32 xfercap;
50 int i;
51 struct ioat_dma_chan *ioat_chan;
52
53 device->common.chancnt = ioatdma_read8(device, IOAT_CHANCNT_OFFSET);
54 xfercap_scale = ioatdma_read8(device, IOAT_XFERCAP_OFFSET);
55 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
56
57 for (i = 0; i < device->common.chancnt; i++) {
58 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
59 if (!ioat_chan) {
60 device->common.chancnt = i;
61 break;
62 }
63
64 ioat_chan->device = device;
65 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
66 ioat_chan->xfercap = xfercap;
67 spin_lock_init(&ioat_chan->cleanup_lock);
68 spin_lock_init(&ioat_chan->desc_lock);
69 INIT_LIST_HEAD(&ioat_chan->free_desc);
70 INIT_LIST_HEAD(&ioat_chan->used_desc);
71 /* This should be made common somewhere in dmaengine.c */
72 ioat_chan->common.device = &device->common;
73 ioat_chan->common.client = NULL;
74 list_add_tail(&ioat_chan->common.device_node,
75 &device->common.channels);
76 }
77 return device->common.chancnt;
78}
79
80static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
81 struct ioat_dma_chan *ioat_chan,
82 int flags)
83{
84 struct ioat_dma_descriptor *desc;
85 struct ioat_desc_sw *desc_sw;
86 struct ioat_device *ioat_device;
87 dma_addr_t phys;
88
89 ioat_device = to_ioat_device(ioat_chan->common.device);
90 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
91 if (unlikely(!desc))
92 return NULL;
93
94 desc_sw = kzalloc(sizeof(*desc_sw), flags);
95 if (unlikely(!desc_sw)) {
96 pci_pool_free(ioat_device->dma_pool, desc, phys);
97 return NULL;
98 }
99
100 memset(desc, 0, sizeof(*desc));
101 desc_sw->hw = desc;
102 desc_sw->phys = phys;
103
104 return desc_sw;
105}
106
107#define INITIAL_IOAT_DESC_COUNT 128
108
109static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);
110
111/* returns the actual number of allocated descriptors */
112static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
113{
114 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
115 struct ioat_desc_sw *desc = NULL;
116 u16 chanctrl;
117 u32 chanerr;
118 int i;
119 LIST_HEAD(tmp_list);
120
121 /*
122 * In-use bit automatically set by reading chanctrl
123 * If 0, we got it, if 1, someone else did
124 */
125 chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
126 if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
127 return -EBUSY;
128
129 /* Setup register to interrupt and write completion status on error */
130 chanctrl = IOAT_CHANCTRL_CHANNEL_IN_USE |
131 IOAT_CHANCTRL_ERR_INT_EN |
132 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
133 IOAT_CHANCTRL_ERR_COMPLETION_EN;
134 ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
135
136 chanerr = ioatdma_chan_read32(ioat_chan, IOAT_CHANERR_OFFSET);
137 if (chanerr) {
138 printk("IOAT: CHANERR = %x, clearing\n", chanerr);
139 ioatdma_chan_write32(ioat_chan, IOAT_CHANERR_OFFSET, chanerr);
140 }
141
142 /* Allocate descriptors */
143 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
144 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
145 if (!desc) {
146 printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);
147 break;
148 }
149 list_add_tail(&desc->node, &tmp_list);
150 }
151 spin_lock_bh(&ioat_chan->desc_lock);
152 list_splice(&tmp_list, &ioat_chan->free_desc);
153 spin_unlock_bh(&ioat_chan->desc_lock);
154
155 /* allocate a completion writeback area */
156 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
157 ioat_chan->completion_virt =
158 pci_pool_alloc(ioat_chan->device->completion_pool,
159 GFP_KERNEL,
160 &ioat_chan->completion_addr);
161 memset(ioat_chan->completion_virt, 0,
162 sizeof(*ioat_chan->completion_virt));
163 ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_LOW,
164 ((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF);
165 ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_HIGH,
166 ((u64) ioat_chan->completion_addr) >> 32);
167
168 ioat_start_null_desc(ioat_chan);
169 return i;
170}
171
172static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
173
174static void ioat_dma_free_chan_resources(struct dma_chan *chan)
175{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177 struct ioat_device *ioat_device = to_ioat_device(chan->device);
178 struct ioat_desc_sw *desc, *_desc;
179 u16 chanctrl;
180 int in_use_descs = 0;
181
182 ioat_dma_memcpy_cleanup(ioat_chan);
183
184 ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
185
186 spin_lock_bh(&ioat_chan->desc_lock);
187 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
188 in_use_descs++;
189 list_del(&desc->node);
190 pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
191 kfree(desc);
192 }
193 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
194 list_del(&desc->node);
195 pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
196 kfree(desc);
197 }
198 spin_unlock_bh(&ioat_chan->desc_lock);
199
200 pci_pool_free(ioat_device->completion_pool,
201 ioat_chan->completion_virt,
202 ioat_chan->completion_addr);
203
204 /* one is ok since we left it on there on purpose */
205 if (in_use_descs > 1)
206 printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",
207 in_use_descs - 1);
208
209 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
210
211 /* Tell hw the chan is free */
212 chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
213 chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
214 ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
215}
216
217/**
218 * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction
219 * @chan: IOAT DMA channel handle
220 * @dest: DMA destination address
221 * @src: DMA source address
222 * @len: transaction length in bytes
223 */
224
225static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,
226 dma_addr_t dest,
227 dma_addr_t src,
228 size_t len)
229{
230 struct ioat_desc_sw *first;
231 struct ioat_desc_sw *prev;
232 struct ioat_desc_sw *new;
233 dma_cookie_t cookie;
234 LIST_HEAD(new_chain);
235 u32 copy;
236 size_t orig_len;
237 dma_addr_t orig_src, orig_dst;
238 unsigned int desc_count = 0;
239 unsigned int append = 0;
240
241 if (!ioat_chan || !dest || !src)
242 return -EFAULT;
243
244 if (!len)
245 return ioat_chan->common.cookie;
246
247 orig_len = len;
248 orig_src = src;
249 orig_dst = dest;
250
251 first = NULL;
252 prev = NULL;
253
254 spin_lock_bh(&ioat_chan->desc_lock);
255
256 while (len) {
257 if (!list_empty(&ioat_chan->free_desc)) {
258 new = to_ioat_desc(ioat_chan->free_desc.next);
259 list_del(&new->node);
260 } else {
261 /* try to get another desc */
262 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
263 /* will this ever happen? */
264 /* TODO add upper limit on these */
265 BUG_ON(!new);
266 }
267
268 copy = min((u32) len, ioat_chan->xfercap);
269
270 new->hw->size = copy;
271 new->hw->ctl = 0;
272 new->hw->src_addr = src;
273 new->hw->dst_addr = dest;
274 new->cookie = 0;
275
276 /* chain together the physical address list for the HW */
277 if (!first)
278 first = new;
279 else
280 prev->hw->next = (u64) new->phys;
281
282 prev = new;
283
284 len -= copy;
285 dest += copy;
286 src += copy;
287
288 list_add_tail(&new->node, &new_chain);
289 desc_count++;
290 }
291 new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
292 new->hw->next = 0;
293
294 /* cookie incr and addition to used_list must be atomic */
295
296 cookie = ioat_chan->common.cookie;
297 cookie++;
298 if (cookie < 0)
299 cookie = 1;
300 ioat_chan->common.cookie = new->cookie = cookie;
301
302 pci_unmap_addr_set(new, src, orig_src);
303 pci_unmap_addr_set(new, dst, orig_dst);
304 pci_unmap_len_set(new, src_len, orig_len);
305 pci_unmap_len_set(new, dst_len, orig_len);
306
307 /* write address into NextDescriptor field of last desc in chain */
308 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->phys;
309 list_splice_init(&new_chain, ioat_chan->used_desc.prev);
310
311 ioat_chan->pending += desc_count;
312 if (ioat_chan->pending >= 20) {
313 append = 1;
314 ioat_chan->pending = 0;
315 }
316
317 spin_unlock_bh(&ioat_chan->desc_lock);
318
319 if (append)
320 ioatdma_chan_write8(ioat_chan,
321 IOAT_CHANCMD_OFFSET,
322 IOAT_CHANCMD_APPEND);
323 return cookie;
324}
325
326/**
327 * ioat_dma_memcpy_buf_to_buf - wrapper that takes src & dest bufs
328 * @chan: IOAT DMA channel handle
329 * @dest: DMA destination address
330 * @src: DMA source address
331 * @len: transaction length in bytes
332 */
333
334static dma_cookie_t ioat_dma_memcpy_buf_to_buf(struct dma_chan *chan,
335 void *dest,
336 void *src,
337 size_t len)
338{
339 dma_addr_t dest_addr;
340 dma_addr_t src_addr;
341 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
342
343 dest_addr = pci_map_single(ioat_chan->device->pdev,
344 dest, len, PCI_DMA_FROMDEVICE);
345 src_addr = pci_map_single(ioat_chan->device->pdev,
346 src, len, PCI_DMA_TODEVICE);
347
348 return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
349}
350
351/**
352 * ioat_dma_memcpy_buf_to_pg - wrapper, copying from a buf to a page
353 * @chan: IOAT DMA channel handle
354 * @page: pointer to the page to copy to
355 * @offset: offset into that page
356 * @src: DMA source address
357 * @len: transaction length in bytes
358 */
359
360static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,
361 struct page *page,
362 unsigned int offset,
363 void *src,
364 size_t len)
365{
366 dma_addr_t dest_addr;
367 dma_addr_t src_addr;
368 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
369
370 dest_addr = pci_map_page(ioat_chan->device->pdev,
371 page, offset, len, PCI_DMA_FROMDEVICE);
372 src_addr = pci_map_single(ioat_chan->device->pdev,
373 src, len, PCI_DMA_TODEVICE);
374
375 return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
376}
377
378/**
379 * ioat_dma_memcpy_pg_to_pg - wrapper, copying between two pages
380 * @chan: IOAT DMA channel handle
381 * @dest_pg: pointer to the page to copy to
382 * @dest_off: offset into that page
383 * @src_pg: pointer to the page to copy from
384 * @src_off: offset into that page
385 * @len: transaction length in bytes. This is guaranteed to not make a copy
386 * across a page boundary.
387 */
388
389static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,
390 struct page *dest_pg,
391 unsigned int dest_off,
392 struct page *src_pg,
393 unsigned int src_off,
394 size_t len)
395{
396 dma_addr_t dest_addr;
397 dma_addr_t src_addr;
398 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
399
400 dest_addr = pci_map_page(ioat_chan->device->pdev,
401 dest_pg, dest_off, len, PCI_DMA_FROMDEVICE);
402 src_addr = pci_map_page(ioat_chan->device->pdev,
403 src_pg, src_off, len, PCI_DMA_TODEVICE);
404
405 return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
406}
407
408/**
409 * ioat_dma_memcpy_issue_pending - push potentially unrecognoized appended descriptors to hw
410 * @chan: DMA channel handle
411 */
412
413static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
414{
415 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
416
417 if (ioat_chan->pending != 0) {
418 ioat_chan->pending = 0;
419 ioatdma_chan_write8(ioat_chan,
420 IOAT_CHANCMD_OFFSET,
421 IOAT_CHANCMD_APPEND);
422 }
423}
424
425static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
426{
427 unsigned long phys_complete;
428 struct ioat_desc_sw *desc, *_desc;
429 dma_cookie_t cookie = 0;
430
431 prefetch(chan->completion_virt);
432
433 if (!spin_trylock(&chan->cleanup_lock))
434 return;
435
436 /* The completion writeback can happen at any time,
437 so reads by the driver need to be atomic operations
438 The descriptor physical addresses are limited to 32-bits
439 when the CPU can only do a 32-bit mov */
440
441#if (BITS_PER_LONG == 64)
442 phys_complete =
443 chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
444#else
445 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
446#endif
447
448 if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
449 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
450 printk("IOAT: Channel halted, chanerr = %x\n",
451 ioatdma_chan_read32(chan, IOAT_CHANERR_OFFSET));
452
453 /* TODO do something to salvage the situation */
454 }
455
456 if (phys_complete == chan->last_completion) {
457 spin_unlock(&chan->cleanup_lock);
458 return;
459 }
460
461 spin_lock_bh(&chan->desc_lock);
462 list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {
463
464 /*
465 * Incoming DMA requests may use multiple descriptors, due to
466 * exceeding xfercap, perhaps. If so, only the last one will
467 * have a cookie, and require unmapping.
468 */
469 if (desc->cookie) {
470 cookie = desc->cookie;
471
472 /* yes we are unmapping both _page and _single alloc'd
473 regions with unmap_page. Is this *really* that bad?
474 */
475 pci_unmap_page(chan->device->pdev,
476 pci_unmap_addr(desc, dst),
477 pci_unmap_len(desc, dst_len),
478 PCI_DMA_FROMDEVICE);
479 pci_unmap_page(chan->device->pdev,
480 pci_unmap_addr(desc, src),
481 pci_unmap_len(desc, src_len),
482 PCI_DMA_TODEVICE);
483 }
484
485 if (desc->phys != phys_complete) {
486 /* a completed entry, but not the last, so cleanup */
487 list_del(&desc->node);
488 list_add_tail(&desc->node, &chan->free_desc);
489 } else {
490 /* last used desc. Do not remove, so we can append from
491 it, but don't look at it next time, either */
492 desc->cookie = 0;
493
494 /* TODO check status bits? */
495 break;
496 }
497 }
498
499 spin_unlock_bh(&chan->desc_lock);
500
501 chan->last_completion = phys_complete;
502 if (cookie != 0)
503 chan->completed_cookie = cookie;
504
505 spin_unlock(&chan->cleanup_lock);
506}
507
508/**
509 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
510 * @chan: IOAT DMA channel handle
511 * @cookie: DMA transaction identifier
512 */
513
514static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
515 dma_cookie_t cookie,
516 dma_cookie_t *done,
517 dma_cookie_t *used)
518{
519 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
520 dma_cookie_t last_used;
521 dma_cookie_t last_complete;
522 enum dma_status ret;
523
524 last_used = chan->cookie;
525 last_complete = ioat_chan->completed_cookie;
526
527 if (done)
528 *done= last_complete;
529 if (used)
530 *used = last_used;
531
532 ret = dma_async_is_complete(cookie, last_complete, last_used);
533 if (ret == DMA_SUCCESS)
534 return ret;
535
536 ioat_dma_memcpy_cleanup(ioat_chan);
537
538 last_used = chan->cookie;
539 last_complete = ioat_chan->completed_cookie;
540
541 if (done)
542 *done= last_complete;
543 if (used)
544 *used = last_used;
545
546 return dma_async_is_complete(cookie, last_complete, last_used);
547}
548
549/* PCI API */
550
551static struct pci_device_id ioat_pci_tbl[] = {
552 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
553 { 0, }
554};
555
556static struct pci_driver ioat_pci_drv = {
557 .name = "ioatdma",
558 .id_table = ioat_pci_tbl,
559 .probe = ioat_probe,
560 .remove = __devexit_p(ioat_remove),
561};
562
563static irqreturn_t ioat_do_interrupt(int irq, void *data, struct pt_regs *regs)
564{
565 struct ioat_device *instance = data;
566 unsigned long attnstatus;
567 u8 intrctrl;
568
569 intrctrl = ioatdma_read8(instance, IOAT_INTRCTRL_OFFSET);
570
571 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
572 return IRQ_NONE;
573
574 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
575 ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
576 return IRQ_NONE;
577 }
578
579 attnstatus = ioatdma_read32(instance, IOAT_ATTNSTATUS_OFFSET);
580
581 printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
582
583 ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
584 return IRQ_HANDLED;
585}
586
587static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
588{
589 struct ioat_desc_sw *desc;
590
591 spin_lock_bh(&ioat_chan->desc_lock);
592
593 if (!list_empty(&ioat_chan->free_desc)) {
594 desc = to_ioat_desc(ioat_chan->free_desc.next);
595 list_del(&desc->node);
596 } else {
597 /* try to get another desc */
598 spin_unlock_bh(&ioat_chan->desc_lock);
599 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
600 spin_lock_bh(&ioat_chan->desc_lock);
601 /* will this ever happen? */
602 BUG_ON(!desc);
603 }
604
605 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
606 desc->hw->next = 0;
607
608 list_add_tail(&desc->node, &ioat_chan->used_desc);
609 spin_unlock_bh(&ioat_chan->desc_lock);
610
611#if (BITS_PER_LONG == 64)
612 ioatdma_chan_write64(ioat_chan, IOAT_CHAINADDR_OFFSET, desc->phys);
613#else
614 ioatdma_chan_write32(ioat_chan,
615 IOAT_CHAINADDR_OFFSET_LOW,
616 (u32) desc->phys);
617 ioatdma_chan_write32(ioat_chan, IOAT_CHAINADDR_OFFSET_HIGH, 0);
618#endif
619 ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_START);
620}
621
622/*
623 * Perform a IOAT transaction to verify the HW works.
624 */
625#define IOAT_TEST_SIZE 2000
626
627static int ioat_self_test(struct ioat_device *device)
628{
629 int i;
630 u8 *src;
631 u8 *dest;
632 struct dma_chan *dma_chan;
633 dma_cookie_t cookie;
634 int err = 0;
635
636 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
637 if (!src)
638 return -ENOMEM;
639 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
640 if (!dest) {
641 kfree(src);
642 return -ENOMEM;
643 }
644
645 /* Fill in src buffer */
646 for (i = 0; i < IOAT_TEST_SIZE; i++)
647 src[i] = (u8)i;
648
649 /* Start copy, using first DMA channel */
650 dma_chan = container_of(device->common.channels.next,
651 struct dma_chan,
652 device_node);
653 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
654 err = -ENODEV;
655 goto out;
656 }
657
658 cookie = ioat_dma_memcpy_buf_to_buf(dma_chan, dest, src, IOAT_TEST_SIZE);
659 ioat_dma_memcpy_issue_pending(dma_chan);
660 msleep(1);
661
662 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
663 printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");
664 err = -ENODEV;
665 goto free_resources;
666 }
667 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
668 printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");
669 err = -ENODEV;
670 goto free_resources;
671 }
672
673free_resources:
674 ioat_dma_free_chan_resources(dma_chan);
675out:
676 kfree(src);
677 kfree(dest);
678 return err;
679}
680
681static int __devinit ioat_probe(struct pci_dev *pdev,
682 const struct pci_device_id *ent)
683{
684 int err;
685 unsigned long mmio_start, mmio_len;
686 void *reg_base;
687 struct ioat_device *device;
688
689 err = pci_enable_device(pdev);
690 if (err)
691 goto err_enable_device;
692
693 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
694 if (err)
695 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
696 if (err)
697 goto err_set_dma_mask;
698
699 err = pci_request_regions(pdev, ioat_pci_drv.name);
700 if (err)
701 goto err_request_regions;
702
703 mmio_start = pci_resource_start(pdev, 0);
704 mmio_len = pci_resource_len(pdev, 0);
705
706 reg_base = ioremap(mmio_start, mmio_len);
707 if (!reg_base) {
708 err = -ENOMEM;
709 goto err_ioremap;
710 }
711
712 device = kzalloc(sizeof(*device), GFP_KERNEL);
713 if (!device) {
714 err = -ENOMEM;
715 goto err_kzalloc;
716 }
717
718 /* DMA coherent memory pool for DMA descriptor allocations */
719 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
720 sizeof(struct ioat_dma_descriptor), 64, 0);
721 if (!device->dma_pool) {
722 err = -ENOMEM;
723 goto err_dma_pool;
724 }
725
726 device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);
727 if (!device->completion_pool) {
728 err = -ENOMEM;
729 goto err_completion_pool;
730 }
731
732 device->pdev = pdev;
733 pci_set_drvdata(pdev, device);
734#ifdef CONFIG_PCI_MSI
735 if (pci_enable_msi(pdev) == 0) {
736 device->msi = 1;
737 } else {
738 device->msi = 0;
739 }
740#endif
741 err = request_irq(pdev->irq, &ioat_do_interrupt, SA_SHIRQ, "ioat",
742 device);
743 if (err)
744 goto err_irq;
745
746 device->reg_base = reg_base;
747
748 ioatdma_write8(device, IOAT_INTRCTRL_OFFSET, IOAT_INTRCTRL_MASTER_INT_EN);
749 pci_set_master(pdev);
750
751 INIT_LIST_HEAD(&device->common.channels);
752 enumerate_dma_channels(device);
753
754 device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
755 device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
756 device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf;
757 device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg;
758 device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg;
759 device->common.device_memcpy_complete = ioat_dma_is_complete;
760 device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending;
761 printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
762 device->common.chancnt);
763
764 err = ioat_self_test(device);
765 if (err)
766 goto err_self_test;
767
768 dma_async_device_register(&device->common);
769
770 return 0;
771
772err_self_test:
773err_irq:
774 pci_pool_destroy(device->completion_pool);
775err_completion_pool:
776 pci_pool_destroy(device->dma_pool);
777err_dma_pool:
778 kfree(device);
779err_kzalloc:
780 iounmap(reg_base);
781err_ioremap:
782 pci_release_regions(pdev);
783err_request_regions:
784err_set_dma_mask:
785 pci_disable_device(pdev);
786err_enable_device:
787 return err;
788}
789
790static void __devexit ioat_remove(struct pci_dev *pdev)
791{
792 struct ioat_device *device;
793 struct dma_chan *chan, *_chan;
794 struct ioat_dma_chan *ioat_chan;
795
796 device = pci_get_drvdata(pdev);
797 dma_async_device_unregister(&device->common);
798
799 free_irq(device->pdev->irq, device);
800#ifdef CONFIG_PCI_MSI
801 if (device->msi)
802 pci_disable_msi(device->pdev);
803#endif
804 pci_pool_destroy(device->dma_pool);
805 pci_pool_destroy(device->completion_pool);
806 iounmap(device->reg_base);
807 pci_release_regions(pdev);
808 pci_disable_device(pdev);
809 list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {
810 ioat_chan = to_ioat_chan(chan);
811 list_del(&chan->device_node);
812 kfree(ioat_chan);
813 }
814 kfree(device);
815}
816
817/* MODULE API */
818MODULE_VERSION("1.7");
819MODULE_LICENSE("GPL");
820MODULE_AUTHOR("Intel Corporation");
821
822static int __init ioat_init_module(void)
823{
824 /* it's currently unsafe to unload this module */
825 /* if forced, worst case is that rmmod hangs */
826 if (THIS_MODULE != NULL)
827 THIS_MODULE->unsafe = 1;
828
829 return pci_module_init(&ioat_pci_drv);
830}
831
832module_init(ioat_init_module);
833
834static void __exit ioat_exit_module(void)
835{
836 pci_unregister_driver(&ioat_pci_drv);
837}
838
839module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
new file mode 100644
index 000000000000..312353d12af2
--- /dev/null
+++ b/drivers/dma/ioatdma.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef IOATDMA_H
22#define IOATDMA_H
23
24#include <linux/dmaengine.h>
25#include "ioatdma_hw.h"
26#include <linux/init.h>
27#include <linux/dmapool.h>
28#include <linux/cache.h>
29
30#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
31
32#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
33
34extern struct list_head dma_device_list;
35extern struct list_head dma_client_list;
36
37/**
38 * struct ioat_device - internal representation of a IOAT device
39 * @pdev: PCI-Express device
40 * @reg_base: MMIO register space base address
41 * @dma_pool: for allocating DMA descriptors
42 * @common: embedded struct dma_device
43 * @msi: Message Signaled Interrupt number
44 */
45
46struct ioat_device {
47 struct pci_dev *pdev;
48 void *reg_base;
49 struct pci_pool *dma_pool;
50 struct pci_pool *completion_pool;
51
52 struct dma_device common;
53 u8 msi;
54};
55
56/**
57 * struct ioat_dma_chan - internal representation of a DMA channel
58 * @device:
59 * @reg_base:
60 * @sw_in_use:
61 * @completion:
62 * @completion_low:
63 * @completion_high:
64 * @completed_cookie: last cookie seen completed on cleanup
65 * @cookie: value of last cookie given to client
66 * @last_completion:
67 * @xfercap:
68 * @desc_lock:
69 * @free_desc:
70 * @used_desc:
71 * @resource:
72 * @device_node:
73 */
74
75struct ioat_dma_chan {
76
77 void *reg_base;
78
79 dma_cookie_t completed_cookie;
80 unsigned long last_completion;
81
82 u32 xfercap; /* XFERCAP register value expanded out */
83
84 spinlock_t cleanup_lock;
85 spinlock_t desc_lock;
86 struct list_head free_desc;
87 struct list_head used_desc;
88
89 int pending;
90
91 struct ioat_device *device;
92 struct dma_chan common;
93
94 dma_addr_t completion_addr;
95 union {
96 u64 full; /* HW completion writeback */
97 struct {
98 u32 low;
99 u32 high;
100 };
101 } *completion_virt;
102};
103
104/* wrapper around hardware descriptor format + additional software fields */
105
106/**
107 * struct ioat_desc_sw - wrapper around hardware descriptor
108 * @hw: hardware DMA descriptor
109 * @node:
110 * @cookie:
111 * @phys:
112 */
113
114struct ioat_desc_sw {
115 struct ioat_dma_descriptor *hw;
116 struct list_head node;
117 dma_cookie_t cookie;
118 dma_addr_t phys;
119 DECLARE_PCI_UNMAP_ADDR(src)
120 DECLARE_PCI_UNMAP_LEN(src_len)
121 DECLARE_PCI_UNMAP_ADDR(dst)
122 DECLARE_PCI_UNMAP_LEN(dst_len)
123};
124
125#endif /* IOATDMA_H */
126
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
new file mode 100644
index 000000000000..4d7a12880be3
--- /dev/null
+++ b/drivers/dma/ioatdma_hw.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef _IOAT_HW_H_
22#define _IOAT_HW_H_
23
24/* PCI Configuration Space Values */
25#define IOAT_PCI_VID 0x8086
26#define IOAT_PCI_DID 0x1A38
27#define IOAT_PCI_RID 0x00
28#define IOAT_PCI_SVID 0x8086
29#define IOAT_PCI_SID 0x8086
30#define IOAT_VER 0x12 /* Version 1.2 */
31
32struct ioat_dma_descriptor {
33 uint32_t size;
34 uint32_t ctl;
35 uint64_t src_addr;
36 uint64_t dst_addr;
37 uint64_t next;
38 uint64_t rsv1;
39 uint64_t rsv2;
40 uint64_t user1;
41 uint64_t user2;
42};
43
44#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001
45#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002
46#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004
47#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
48#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
49#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
50#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF000000
51
52#endif
diff --git a/drivers/dma/ioatdma_io.h b/drivers/dma/ioatdma_io.h
new file mode 100644
index 000000000000..c0b4bf66c920
--- /dev/null
+++ b/drivers/dma/ioatdma_io.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef IOATDMA_IO_H
22#define IOATDMA_IO_H
23
24#include <asm/io.h>
25
26/*
27 * device and per-channel MMIO register read and write functions
28 * this is a lot of anoying inline functions, but it's typesafe
29 */
30
31static inline u8 ioatdma_read8(struct ioat_device *device,
32 unsigned int offset)
33{
34 return readb(device->reg_base + offset);
35}
36
37static inline u16 ioatdma_read16(struct ioat_device *device,
38 unsigned int offset)
39{
40 return readw(device->reg_base + offset);
41}
42
43static inline u32 ioatdma_read32(struct ioat_device *device,
44 unsigned int offset)
45{
46 return readl(device->reg_base + offset);
47}
48
49static inline void ioatdma_write8(struct ioat_device *device,
50 unsigned int offset, u8 value)
51{
52 writeb(value, device->reg_base + offset);
53}
54
55static inline void ioatdma_write16(struct ioat_device *device,
56 unsigned int offset, u16 value)
57{
58 writew(value, device->reg_base + offset);
59}
60
61static inline void ioatdma_write32(struct ioat_device *device,
62 unsigned int offset, u32 value)
63{
64 writel(value, device->reg_base + offset);
65}
66
67static inline u8 ioatdma_chan_read8(struct ioat_dma_chan *chan,
68 unsigned int offset)
69{
70 return readb(chan->reg_base + offset);
71}
72
73static inline u16 ioatdma_chan_read16(struct ioat_dma_chan *chan,
74 unsigned int offset)
75{
76 return readw(chan->reg_base + offset);
77}
78
79static inline u32 ioatdma_chan_read32(struct ioat_dma_chan *chan,
80 unsigned int offset)
81{
82 return readl(chan->reg_base + offset);
83}
84
85static inline void ioatdma_chan_write8(struct ioat_dma_chan *chan,
86 unsigned int offset, u8 value)
87{
88 writeb(value, chan->reg_base + offset);
89}
90
91static inline void ioatdma_chan_write16(struct ioat_dma_chan *chan,
92 unsigned int offset, u16 value)
93{
94 writew(value, chan->reg_base + offset);
95}
96
97static inline void ioatdma_chan_write32(struct ioat_dma_chan *chan,
98 unsigned int offset, u32 value)
99{
100 writel(value, chan->reg_base + offset);
101}
102
103#if (BITS_PER_LONG == 64)
104static inline u64 ioatdma_chan_read64(struct ioat_dma_chan *chan,
105 unsigned int offset)
106{
107 return readq(chan->reg_base + offset);
108}
109
110static inline void ioatdma_chan_write64(struct ioat_dma_chan *chan,
111 unsigned int offset, u64 value)
112{
113 writeq(value, chan->reg_base + offset);
114}
115#endif
116
117#endif /* IOATDMA_IO_H */
118
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
new file mode 100644
index 000000000000..41a21ab2b000
--- /dev/null
+++ b/drivers/dma/ioatdma_registers.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef _IOAT_REGISTERS_H_
22#define _IOAT_REGISTERS_H_
23
24
25/* MMIO Device Registers */
26#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
27
28#define IOAT_XFERCAP_OFFSET 0x01 /* 8-bit */
29#define IOAT_XFERCAP_4KB 12
30#define IOAT_XFERCAP_8KB 13
31#define IOAT_XFERCAP_16KB 14
32#define IOAT_XFERCAP_32KB 15
33#define IOAT_XFERCAP_32GB 0
34
35#define IOAT_GENCTRL_OFFSET 0x02 /* 8-bit */
36#define IOAT_GENCTRL_DEBUG_EN 0x01
37
38#define IOAT_INTRCTRL_OFFSET 0x03 /* 8-bit */
39#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */
40#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */
41#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */
42
43#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */
44
45#define IOAT_VER_OFFSET 0x08 /* 8-bit */
46#define IOAT_VER_MAJOR_MASK 0xF0
47#define IOAT_VER_MINOR_MASK 0x0F
48#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK)
49#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK)
50
51#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
52
53#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
54#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
55#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */
56
57#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
58#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
59
60
61#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
62
63/* DMA Channel Registers */
64#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */
65#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
66#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
67#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
68#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
69#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
70#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
71#define IOAT_CHANCTRL_INT_DISABLE 0x0001
72
73#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */
74#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */
75
76#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
77#define IOAT_CHANSTS_OFFSET_LOW 0x04
78#define IOAT_CHANSTS_OFFSET_HIGH 0x08
79#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0
80#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
81#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
82#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
83#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
84#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
85#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
86
87#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
88#define IOAT_CHAINADDR_OFFSET_LOW 0x0C
89#define IOAT_CHAINADDR_OFFSET_HIGH 0x10
90
91#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
92#define IOAT_CHANCMD_RESET 0x20
93#define IOAT_CHANCMD_RESUME 0x10
94#define IOAT_CHANCMD_ABORT 0x08
95#define IOAT_CHANCMD_SUSPEND 0x04
96#define IOAT_CHANCMD_APPEND 0x02
97#define IOAT_CHANCMD_START 0x01
98
99#define IOAT_CHANCMP_OFFSET 0x18 /* 64-bit Channel Completion Address Register */
100#define IOAT_CHANCMP_OFFSET_LOW 0x18
101#define IOAT_CHANCMP_OFFSET_HIGH 0x1C
102
103#define IOAT_CDAR_OFFSET 0x20 /* 64-bit Current Descriptor Address Register */
104#define IOAT_CDAR_OFFSET_LOW 0x20
105#define IOAT_CDAR_OFFSET_HIGH 0x24
106
107#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */
108#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001
109#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002
110#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004
111#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008
112#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010
113#define IOAT_CHANERR_CHANCMD_ERR 0x0020
114#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040
115#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080
116#define IOAT_CHANERR_READ_DATA_ERR 0x0100
117#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200
118#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400
119#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800
120#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
121#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
122#define IOAT_CHANERR_SOFT_ERR 0x4000
123
124#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
125
126#endif /* _IOAT_REGISTERS_H_ */