diff options
author | Dave Jiang <dave.jiang@intel.com> | 2015-08-11 11:48:43 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-08-17 04:07:30 -0400 |
commit | c0f28ce66ecfd9fa0ae662a2c7f3e68e537e77f4 (patch) | |
tree | 29b72d67fd006b880c44882fbe412204850657a9 /drivers/dma/ioat | |
parent | 80b1973659949fbdcbfe9e086e2370313a9f1288 (diff) |
dmaengine: ioatdma: move all the init routines
Moving all the init routines to init.c and fixup anything broken during
the move.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 509 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 39 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 626 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 1293 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 278 |
6 files changed, 1375 insertions, 1372 deletions
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index d2555f4881d7..f785f8f42f7d 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
2 | ioatdma-y := pci.o dma.o dma_v3.o dca.o sysfs.o | 2 | ioatdma-y := init.o dma.o dma_v3.o dca.o sysfs.o |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1746f7b4c3b4..5d78cafdd3f2 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -37,30 +37,12 @@ | |||
37 | 37 | ||
38 | #include "../dmaengine.h" | 38 | #include "../dmaengine.h" |
39 | 39 | ||
40 | int ioat_pending_level = 4; | ||
41 | module_param(ioat_pending_level, int, 0644); | ||
42 | MODULE_PARM_DESC(ioat_pending_level, | ||
43 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
44 | int ioat_ring_alloc_order = 8; | ||
45 | module_param(ioat_ring_alloc_order, int, 0644); | ||
46 | MODULE_PARM_DESC(ioat_ring_alloc_order, | ||
47 | "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); | ||
48 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; | ||
49 | module_param(ioat_ring_max_alloc_order, int, 0644); | ||
50 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | ||
51 | "ioat+: upper limit for ring size (default: 16)"); | ||
52 | static char ioat_interrupt_style[32] = "msix"; | ||
53 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
54 | sizeof(ioat_interrupt_style), 0644); | ||
55 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
56 | "set ioat interrupt style: msix (default), msi, intx"); | ||
57 | |||
58 | /** | 40 | /** |
59 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | 41 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode |
60 | * @irq: interrupt id | 42 | * @irq: interrupt id |
61 | * @data: interrupt data | 43 | * @data: interrupt data |
62 | */ | 44 | */ |
63 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | 45 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data) |
64 | { | 46 | { |
65 | struct ioatdma_device *instance = data; | 47 | struct ioatdma_device *instance = data; |
66 | struct ioatdma_chan *ioat_chan; | 48 | struct ioatdma_chan *ioat_chan; |
@@ -94,7 +76,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |||
94 | * @irq: interrupt id | 76 | * @irq: interrupt id |
95 | * @data: interrupt data | 77 | * @data: interrupt data |
96 | */ | 78 | */ |
97 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | 79 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) |
98 | { | 80 | { |
99 | struct ioatdma_chan *ioat_chan = data; | 81 | struct ioatdma_chan *ioat_chan = data; |
100 | 82 | ||
@@ -104,28 +86,6 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |||
104 | return IRQ_HANDLED; | 86 | return IRQ_HANDLED; |
105 | } | 87 | } |
106 | 88 | ||
107 | /* common channel initialization */ | ||
108 | void | ||
109 | ioat_init_channel(struct ioatdma_device *ioat_dma, | ||
110 | struct ioatdma_chan *ioat_chan, int idx) | ||
111 | { | ||
112 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
113 | struct dma_chan *c = &ioat_chan->dma_chan; | ||
114 | unsigned long data = (unsigned long) c; | ||
115 | |||
116 | ioat_chan->ioat_dma = ioat_dma; | ||
117 | ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); | ||
118 | spin_lock_init(&ioat_chan->cleanup_lock); | ||
119 | ioat_chan->dma_chan.device = dma; | ||
120 | dma_cookie_init(&ioat_chan->dma_chan); | ||
121 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); | ||
122 | ioat_dma->idx[idx] = ioat_chan; | ||
123 | init_timer(&ioat_chan->timer); | ||
124 | ioat_chan->timer.function = ioat_dma->timer_fn; | ||
125 | ioat_chan->timer.data = data; | ||
126 | tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); | ||
127 | } | ||
128 | |||
129 | void ioat_stop(struct ioatdma_chan *ioat_chan) | 89 | void ioat_stop(struct ioatdma_chan *ioat_chan) |
130 | { | 90 | { |
131 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | 91 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
@@ -214,299 +174,6 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
214 | return dma_cookie_status(c, cookie, txstate); | 174 | return dma_cookie_status(c, cookie, txstate); |
215 | } | 175 | } |
216 | 176 | ||
217 | /* | ||
218 | * Perform a IOAT transaction to verify the HW works. | ||
219 | */ | ||
220 | #define IOAT_TEST_SIZE 2000 | ||
221 | |||
222 | static void ioat_dma_test_callback(void *dma_async_param) | ||
223 | { | ||
224 | struct completion *cmp = dma_async_param; | ||
225 | |||
226 | complete(cmp); | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
231 | * @ioat_dma: dma device to be tested | ||
232 | */ | ||
233 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma) | ||
234 | { | ||
235 | int i; | ||
236 | u8 *src; | ||
237 | u8 *dest; | ||
238 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
239 | struct device *dev = &ioat_dma->pdev->dev; | ||
240 | struct dma_chan *dma_chan; | ||
241 | struct dma_async_tx_descriptor *tx; | ||
242 | dma_addr_t dma_dest, dma_src; | ||
243 | dma_cookie_t cookie; | ||
244 | int err = 0; | ||
245 | struct completion cmp; | ||
246 | unsigned long tmo; | ||
247 | unsigned long flags; | ||
248 | |||
249 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
250 | if (!src) | ||
251 | return -ENOMEM; | ||
252 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
253 | if (!dest) { | ||
254 | kfree(src); | ||
255 | return -ENOMEM; | ||
256 | } | ||
257 | |||
258 | /* Fill in src buffer */ | ||
259 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
260 | src[i] = (u8)i; | ||
261 | |||
262 | /* Start copy, using first DMA channel */ | ||
263 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
264 | device_node); | ||
265 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
266 | dev_err(dev, "selftest cannot allocate chan resource\n"); | ||
267 | err = -ENODEV; | ||
268 | goto out; | ||
269 | } | ||
270 | |||
271 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
272 | if (dma_mapping_error(dev, dma_src)) { | ||
273 | dev_err(dev, "mapping src buffer failed\n"); | ||
274 | goto free_resources; | ||
275 | } | ||
276 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
277 | if (dma_mapping_error(dev, dma_dest)) { | ||
278 | dev_err(dev, "mapping dest buffer failed\n"); | ||
279 | goto unmap_src; | ||
280 | } | ||
281 | flags = DMA_PREP_INTERRUPT; | ||
282 | tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, | ||
283 | dma_src, IOAT_TEST_SIZE, | ||
284 | flags); | ||
285 | if (!tx) { | ||
286 | dev_err(dev, "Self-test prep failed, disabling\n"); | ||
287 | err = -ENODEV; | ||
288 | goto unmap_dma; | ||
289 | } | ||
290 | |||
291 | async_tx_ack(tx); | ||
292 | init_completion(&cmp); | ||
293 | tx->callback = ioat_dma_test_callback; | ||
294 | tx->callback_param = &cmp; | ||
295 | cookie = tx->tx_submit(tx); | ||
296 | if (cookie < 0) { | ||
297 | dev_err(dev, "Self-test setup failed, disabling\n"); | ||
298 | err = -ENODEV; | ||
299 | goto unmap_dma; | ||
300 | } | ||
301 | dma->device_issue_pending(dma_chan); | ||
302 | |||
303 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
304 | |||
305 | if (tmo == 0 || | ||
306 | dma->device_tx_status(dma_chan, cookie, NULL) | ||
307 | != DMA_COMPLETE) { | ||
308 | dev_err(dev, "Self-test copy timed out, disabling\n"); | ||
309 | err = -ENODEV; | ||
310 | goto unmap_dma; | ||
311 | } | ||
312 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
313 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | ||
314 | err = -ENODEV; | ||
315 | goto free_resources; | ||
316 | } | ||
317 | |||
318 | unmap_dma: | ||
319 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
320 | unmap_src: | ||
321 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
322 | free_resources: | ||
323 | dma->device_free_chan_resources(dma_chan); | ||
324 | out: | ||
325 | kfree(src); | ||
326 | kfree(dest); | ||
327 | return err; | ||
328 | } | ||
329 | |||
330 | /** | ||
331 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
332 | * @ioat_dma: ioat dma device | ||
333 | */ | ||
334 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) | ||
335 | { | ||
336 | struct ioatdma_chan *ioat_chan; | ||
337 | struct pci_dev *pdev = ioat_dma->pdev; | ||
338 | struct device *dev = &pdev->dev; | ||
339 | struct msix_entry *msix; | ||
340 | int i, j, msixcnt; | ||
341 | int err = -EINVAL; | ||
342 | u8 intrctrl = 0; | ||
343 | |||
344 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
345 | goto msix; | ||
346 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
347 | goto msi; | ||
348 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
349 | goto intx; | ||
350 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); | ||
351 | goto err_no_irq; | ||
352 | |||
353 | msix: | ||
354 | /* The number of MSI-X vectors should equal the number of channels */ | ||
355 | msixcnt = ioat_dma->dma_dev.chancnt; | ||
356 | for (i = 0; i < msixcnt; i++) | ||
357 | ioat_dma->msix_entries[i].entry = i; | ||
358 | |||
359 | err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); | ||
360 | if (err) | ||
361 | goto msi; | ||
362 | |||
363 | for (i = 0; i < msixcnt; i++) { | ||
364 | msix = &ioat_dma->msix_entries[i]; | ||
365 | ioat_chan = ioat_chan_by_index(ioat_dma, i); | ||
366 | err = devm_request_irq(dev, msix->vector, | ||
367 | ioat_dma_do_interrupt_msix, 0, | ||
368 | "ioat-msix", ioat_chan); | ||
369 | if (err) { | ||
370 | for (j = 0; j < i; j++) { | ||
371 | msix = &ioat_dma->msix_entries[j]; | ||
372 | ioat_chan = ioat_chan_by_index(ioat_dma, j); | ||
373 | devm_free_irq(dev, msix->vector, ioat_chan); | ||
374 | } | ||
375 | goto msi; | ||
376 | } | ||
377 | } | ||
378 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
379 | ioat_dma->irq_mode = IOAT_MSIX; | ||
380 | goto done; | ||
381 | |||
382 | msi: | ||
383 | err = pci_enable_msi(pdev); | ||
384 | if (err) | ||
385 | goto intx; | ||
386 | |||
387 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | ||
388 | "ioat-msi", ioat_dma); | ||
389 | if (err) { | ||
390 | pci_disable_msi(pdev); | ||
391 | goto intx; | ||
392 | } | ||
393 | ioat_dma->irq_mode = IOAT_MSI; | ||
394 | goto done; | ||
395 | |||
396 | intx: | ||
397 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | ||
398 | IRQF_SHARED, "ioat-intx", ioat_dma); | ||
399 | if (err) | ||
400 | goto err_no_irq; | ||
401 | |||
402 | ioat_dma->irq_mode = IOAT_INTX; | ||
403 | done: | ||
404 | if (ioat_dma->intr_quirk) | ||
405 | ioat_dma->intr_quirk(ioat_dma); | ||
406 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
407 | writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | ||
408 | return 0; | ||
409 | |||
410 | err_no_irq: | ||
411 | /* Disable all interrupt generation */ | ||
412 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | ||
413 | ioat_dma->irq_mode = IOAT_NOIRQ; | ||
414 | dev_err(dev, "no usable interrupts\n"); | ||
415 | return err; | ||
416 | } | ||
417 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); | ||
418 | |||
419 | static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) | ||
420 | { | ||
421 | /* Disable all interrupt generation */ | ||
422 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | ||
423 | } | ||
424 | |||
425 | int ioat_probe(struct ioatdma_device *ioat_dma) | ||
426 | { | ||
427 | int err = -ENODEV; | ||
428 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
429 | struct pci_dev *pdev = ioat_dma->pdev; | ||
430 | struct device *dev = &pdev->dev; | ||
431 | |||
432 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
433 | ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
434 | sizeof(struct ioat_dma_descriptor), | ||
435 | 64, 0); | ||
436 | if (!ioat_dma->dma_pool) { | ||
437 | err = -ENOMEM; | ||
438 | goto err_dma_pool; | ||
439 | } | ||
440 | |||
441 | ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, | ||
442 | sizeof(u64), | ||
443 | SMP_CACHE_BYTES, | ||
444 | SMP_CACHE_BYTES); | ||
445 | |||
446 | if (!ioat_dma->completion_pool) { | ||
447 | err = -ENOMEM; | ||
448 | goto err_completion_pool; | ||
449 | } | ||
450 | |||
451 | ioat_dma->enumerate_channels(ioat_dma); | ||
452 | |||
453 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
454 | dma->dev = &pdev->dev; | ||
455 | |||
456 | if (!dma->chancnt) { | ||
457 | dev_err(dev, "channel enumeration error\n"); | ||
458 | goto err_setup_interrupts; | ||
459 | } | ||
460 | |||
461 | err = ioat_dma_setup_interrupts(ioat_dma); | ||
462 | if (err) | ||
463 | goto err_setup_interrupts; | ||
464 | |||
465 | err = ioat_dma->self_test(ioat_dma); | ||
466 | if (err) | ||
467 | goto err_self_test; | ||
468 | |||
469 | return 0; | ||
470 | |||
471 | err_self_test: | ||
472 | ioat_disable_interrupts(ioat_dma); | ||
473 | err_setup_interrupts: | ||
474 | pci_pool_destroy(ioat_dma->completion_pool); | ||
475 | err_completion_pool: | ||
476 | pci_pool_destroy(ioat_dma->dma_pool); | ||
477 | err_dma_pool: | ||
478 | return err; | ||
479 | } | ||
480 | |||
481 | int ioat_register(struct ioatdma_device *ioat_dma) | ||
482 | { | ||
483 | int err = dma_async_device_register(&ioat_dma->dma_dev); | ||
484 | |||
485 | if (err) { | ||
486 | ioat_disable_interrupts(ioat_dma); | ||
487 | pci_pool_destroy(ioat_dma->completion_pool); | ||
488 | pci_pool_destroy(ioat_dma->dma_pool); | ||
489 | } | ||
490 | |||
491 | return err; | ||
492 | } | ||
493 | |||
494 | void ioat_dma_remove(struct ioatdma_device *ioat_dma) | ||
495 | { | ||
496 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
497 | |||
498 | ioat_disable_interrupts(ioat_dma); | ||
499 | |||
500 | ioat_kobject_del(ioat_dma); | ||
501 | |||
502 | dma_async_device_unregister(dma); | ||
503 | |||
504 | pci_pool_destroy(ioat_dma->dma_pool); | ||
505 | pci_pool_destroy(ioat_dma->completion_pool); | ||
506 | |||
507 | INIT_LIST_HEAD(&dma->channels); | ||
508 | } | ||
509 | |||
510 | void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) | 177 | void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) |
511 | { | 178 | { |
512 | ioat_chan->dmacount += ioat_ring_pending(ioat_chan); | 179 | ioat_chan->dmacount += ioat_ring_pending(ioat_chan); |
@@ -577,7 +244,7 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan) | |||
577 | __ioat_issue_pending(ioat_chan); | 244 | __ioat_issue_pending(ioat_chan); |
578 | } | 245 | } |
579 | 246 | ||
580 | static void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) | 247 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) |
581 | { | 248 | { |
582 | spin_lock_bh(&ioat_chan->prep_lock); | 249 | spin_lock_bh(&ioat_chan->prep_lock); |
583 | __ioat_start_null_desc(ioat_chan); | 250 | __ioat_start_null_desc(ioat_chan); |
@@ -645,49 +312,6 @@ int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) | |||
645 | return err; | 312 | return err; |
646 | } | 313 | } |
647 | 314 | ||
648 | /** | ||
649 | * ioat_enumerate_channels - find and initialize the device's channels | ||
650 | * @ioat_dma: the ioat dma device to be enumerated | ||
651 | */ | ||
652 | int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) | ||
653 | { | ||
654 | struct ioatdma_chan *ioat_chan; | ||
655 | struct device *dev = &ioat_dma->pdev->dev; | ||
656 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
657 | u8 xfercap_log; | ||
658 | int i; | ||
659 | |||
660 | INIT_LIST_HEAD(&dma->channels); | ||
661 | dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); | ||
662 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
663 | if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { | ||
664 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
665 | dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); | ||
666 | dma->chancnt = ARRAY_SIZE(ioat_dma->idx); | ||
667 | } | ||
668 | xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); | ||
669 | xfercap_log &= 0x1f; /* bits [4:0] valid */ | ||
670 | if (xfercap_log == 0) | ||
671 | return 0; | ||
672 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); | ||
673 | |||
674 | for (i = 0; i < dma->chancnt; i++) { | ||
675 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); | ||
676 | if (!ioat_chan) | ||
677 | break; | ||
678 | |||
679 | ioat_init_channel(ioat_dma, ioat_chan, i); | ||
680 | ioat_chan->xfercap_log = xfercap_log; | ||
681 | spin_lock_init(&ioat_chan->prep_lock); | ||
682 | if (ioat_dma->reset_hw(ioat_chan)) { | ||
683 | i = 0; | ||
684 | break; | ||
685 | } | ||
686 | } | ||
687 | dma->chancnt = i; | ||
688 | return i; | ||
689 | } | ||
690 | |||
691 | static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | 315 | static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx) |
692 | { | 316 | { |
693 | struct dma_chan *c = tx->chan; | 317 | struct dma_chan *c = tx->chan; |
@@ -741,8 +365,7 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) | |||
741 | return desc; | 365 | return desc; |
742 | } | 366 | } |
743 | 367 | ||
744 | static void | 368 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) |
745 | ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | ||
746 | { | 369 | { |
747 | struct ioatdma_device *ioat_dma; | 370 | struct ioatdma_device *ioat_dma; |
748 | 371 | ||
@@ -751,7 +374,7 @@ ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | |||
751 | kmem_cache_free(ioat_cache, desc); | 374 | kmem_cache_free(ioat_cache, desc); |
752 | } | 375 | } |
753 | 376 | ||
754 | static struct ioat_ring_ent ** | 377 | struct ioat_ring_ent ** |
755 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | 378 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
756 | { | 379 | { |
757 | struct ioat_ring_ent **ring; | 380 | struct ioat_ring_ent **ring; |
@@ -788,128 +411,6 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | |||
788 | return ring; | 411 | return ring; |
789 | } | 412 | } |
790 | 413 | ||
791 | /** | ||
792 | * ioat_free_chan_resources - release all the descriptors | ||
793 | * @chan: the channel to be cleaned | ||
794 | */ | ||
795 | void ioat_free_chan_resources(struct dma_chan *c) | ||
796 | { | ||
797 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
798 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
799 | struct ioat_ring_ent *desc; | ||
800 | const int total_descs = 1 << ioat_chan->alloc_order; | ||
801 | int descs; | ||
802 | int i; | ||
803 | |||
804 | /* Before freeing channel resources first check | ||
805 | * if they have been previously allocated for this channel. | ||
806 | */ | ||
807 | if (!ioat_chan->ring) | ||
808 | return; | ||
809 | |||
810 | ioat_stop(ioat_chan); | ||
811 | ioat_dma->reset_hw(ioat_chan); | ||
812 | |||
813 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
814 | spin_lock_bh(&ioat_chan->prep_lock); | ||
815 | descs = ioat_ring_space(ioat_chan); | ||
816 | dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); | ||
817 | for (i = 0; i < descs; i++) { | ||
818 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); | ||
819 | ioat_free_ring_ent(desc, c); | ||
820 | } | ||
821 | |||
822 | if (descs < total_descs) | ||
823 | dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", | ||
824 | total_descs - descs); | ||
825 | |||
826 | for (i = 0; i < total_descs - descs; i++) { | ||
827 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); | ||
828 | dump_desc_dbg(ioat_chan, desc); | ||
829 | ioat_free_ring_ent(desc, c); | ||
830 | } | ||
831 | |||
832 | kfree(ioat_chan->ring); | ||
833 | ioat_chan->ring = NULL; | ||
834 | ioat_chan->alloc_order = 0; | ||
835 | pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, | ||
836 | ioat_chan->completion_dma); | ||
837 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
838 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
839 | |||
840 | ioat_chan->last_completion = 0; | ||
841 | ioat_chan->completion_dma = 0; | ||
842 | ioat_chan->dmacount = 0; | ||
843 | } | ||
844 | |||
845 | /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring | ||
846 | * @chan: channel to be initialized | ||
847 | */ | ||
848 | int ioat_alloc_chan_resources(struct dma_chan *c) | ||
849 | { | ||
850 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
851 | struct ioat_ring_ent **ring; | ||
852 | u64 status; | ||
853 | int order; | ||
854 | int i = 0; | ||
855 | u32 chanerr; | ||
856 | |||
857 | /* have we already been set up? */ | ||
858 | if (ioat_chan->ring) | ||
859 | return 1 << ioat_chan->alloc_order; | ||
860 | |||
861 | /* Setup register to interrupt and write completion status on error */ | ||
862 | writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
863 | |||
864 | /* allocate a completion writeback area */ | ||
865 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
866 | ioat_chan->completion = | ||
867 | pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, | ||
868 | GFP_KERNEL, &ioat_chan->completion_dma); | ||
869 | if (!ioat_chan->completion) | ||
870 | return -ENOMEM; | ||
871 | |||
872 | memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); | ||
873 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, | ||
874 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
875 | writel(((u64)ioat_chan->completion_dma) >> 32, | ||
876 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
877 | |||
878 | order = ioat_get_alloc_order(); | ||
879 | ring = ioat_alloc_ring(c, order, GFP_KERNEL); | ||
880 | if (!ring) | ||
881 | return -ENOMEM; | ||
882 | |||
883 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
884 | spin_lock_bh(&ioat_chan->prep_lock); | ||
885 | ioat_chan->ring = ring; | ||
886 | ioat_chan->head = 0; | ||
887 | ioat_chan->issued = 0; | ||
888 | ioat_chan->tail = 0; | ||
889 | ioat_chan->alloc_order = order; | ||
890 | set_bit(IOAT_RUN, &ioat_chan->state); | ||
891 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
892 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
893 | |||
894 | ioat_start_null_desc(ioat_chan); | ||
895 | |||
896 | /* check that we got off the ground */ | ||
897 | do { | ||
898 | udelay(1); | ||
899 | status = ioat_chansts(ioat_chan); | ||
900 | } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); | ||
901 | |||
902 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
903 | return 1 << ioat_chan->alloc_order; | ||
904 | |||
905 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
906 | |||
907 | dev_WARN(to_dev(ioat_chan), | ||
908 | "failed to start channel chanerr: %#x\n", chanerr); | ||
909 | ioat_free_chan_resources(c); | ||
910 | return -EFAULT; | ||
911 | } | ||
912 | |||
913 | bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) | 414 | bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) |
914 | { | 415 | { |
915 | /* reshape differs from normal ring allocation in that we want | 416 | /* reshape differs from normal ring allocation in that we want |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 2566ec6ae8a4..d2ffa5775d53 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -381,6 +381,43 @@ ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) | |||
381 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 381 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); |
382 | } | 382 | } |
383 | 383 | ||
384 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data); | ||
385 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); | ||
386 | struct ioat_ring_ent ** | ||
387 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); | ||
388 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); | ||
389 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); | ||
390 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan); | ||
391 | struct dma_async_tx_descriptor * | ||
392 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); | ||
393 | struct dma_async_tx_descriptor * | ||
394 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
395 | unsigned int src_cnt, size_t len, unsigned long flags); | ||
396 | struct dma_async_tx_descriptor * | ||
397 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | ||
398 | unsigned int src_cnt, size_t len, | ||
399 | enum sum_check_flags *result, unsigned long flags); | ||
400 | struct dma_async_tx_descriptor * | ||
401 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
402 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
403 | unsigned long flags); | ||
404 | struct dma_async_tx_descriptor * | ||
405 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
406 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
407 | enum sum_check_flags *pqres, unsigned long flags); | ||
408 | struct dma_async_tx_descriptor * | ||
409 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | ||
410 | unsigned int src_cnt, size_t len, unsigned long flags); | ||
411 | struct dma_async_tx_descriptor * | ||
412 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | ||
413 | unsigned int src_cnt, size_t len, | ||
414 | enum sum_check_flags *result, unsigned long flags); | ||
415 | enum dma_status | ||
416 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | ||
417 | struct dma_tx_state *txstate); | ||
418 | void ioat_cleanup_event(unsigned long data); | ||
419 | void ioat_timer_event(unsigned long data); | ||
420 | bool is_bwd_ioat(struct pci_dev *pdev); | ||
384 | int ioat_probe(struct ioatdma_device *ioat_dma); | 421 | int ioat_probe(struct ioatdma_device *ioat_dma); |
385 | int ioat_register(struct ioatdma_device *ioat_dma); | 422 | int ioat_register(struct ioatdma_device *ioat_dma); |
386 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma); | 423 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma); |
@@ -421,5 +458,7 @@ extern int ioat_pending_level; | |||
421 | extern int ioat_ring_alloc_order; | 458 | extern int ioat_ring_alloc_order; |
422 | extern struct kobj_type ioat_ktype; | 459 | extern struct kobj_type ioat_ktype; |
423 | extern struct kmem_cache *ioat_cache; | 460 | extern struct kmem_cache *ioat_cache; |
461 | extern int ioat_ring_max_alloc_order; | ||
462 | extern struct kmem_cache *ioat_sed_cache; | ||
424 | 463 | ||
425 | #endif /* IOATDMA_H */ | 464 | #endif /* IOATDMA_H */ |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 2a47f3968ce4..f6a194a3a463 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -62,8 +62,6 @@ | |||
62 | #include "hw.h" | 62 | #include "hw.h" |
63 | #include "dma.h" | 63 | #include "dma.h" |
64 | 64 | ||
65 | extern struct kmem_cache *ioat3_sed_cache; | ||
66 | |||
67 | /* ioat hardware assumes at least two sources for raid operations */ | 65 | /* ioat hardware assumes at least two sources for raid operations */ |
68 | #define src_cnt_to_sw(x) ((x) + 2) | 66 | #define src_cnt_to_sw(x) ((x) + 2) |
69 | #define src_cnt_to_hw(x) ((x) - 2) | 67 | #define src_cnt_to_hw(x) ((x) - 2) |
@@ -118,124 +116,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], | |||
118 | pq->coef[idx] = coef; | 116 | pq->coef[idx] = coef; |
119 | } | 117 | } |
120 | 118 | ||
121 | static bool is_jf_ioat(struct pci_dev *pdev) | ||
122 | { | ||
123 | switch (pdev->device) { | ||
124 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | ||
125 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | ||
126 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | ||
127 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | ||
128 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | ||
129 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | ||
130 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | ||
131 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | ||
132 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | ||
133 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | ||
134 | return true; | ||
135 | default: | ||
136 | return false; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static bool is_snb_ioat(struct pci_dev *pdev) | ||
141 | { | ||
142 | switch (pdev->device) { | ||
143 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | ||
144 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | ||
145 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | ||
146 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | ||
147 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | ||
148 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | ||
149 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | ||
150 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | ||
151 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | ||
152 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | ||
153 | return true; | ||
154 | default: | ||
155 | return false; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static bool is_ivb_ioat(struct pci_dev *pdev) | ||
160 | { | ||
161 | switch (pdev->device) { | ||
162 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | ||
163 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | ||
164 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | ||
165 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | ||
166 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | ||
167 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | ||
168 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | ||
169 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | ||
170 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | ||
171 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | ||
172 | return true; | ||
173 | default: | ||
174 | return false; | ||
175 | } | ||
176 | |||
177 | } | ||
178 | |||
179 | static bool is_hsw_ioat(struct pci_dev *pdev) | ||
180 | { | ||
181 | switch (pdev->device) { | ||
182 | case PCI_DEVICE_ID_INTEL_IOAT_HSW0: | ||
183 | case PCI_DEVICE_ID_INTEL_IOAT_HSW1: | ||
184 | case PCI_DEVICE_ID_INTEL_IOAT_HSW2: | ||
185 | case PCI_DEVICE_ID_INTEL_IOAT_HSW3: | ||
186 | case PCI_DEVICE_ID_INTEL_IOAT_HSW4: | ||
187 | case PCI_DEVICE_ID_INTEL_IOAT_HSW5: | ||
188 | case PCI_DEVICE_ID_INTEL_IOAT_HSW6: | ||
189 | case PCI_DEVICE_ID_INTEL_IOAT_HSW7: | ||
190 | case PCI_DEVICE_ID_INTEL_IOAT_HSW8: | ||
191 | case PCI_DEVICE_ID_INTEL_IOAT_HSW9: | ||
192 | return true; | ||
193 | default: | ||
194 | return false; | ||
195 | } | ||
196 | |||
197 | } | ||
198 | |||
199 | static bool is_xeon_cb32(struct pci_dev *pdev) | ||
200 | { | ||
201 | return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || | ||
202 | is_hsw_ioat(pdev); | ||
203 | } | ||
204 | |||
205 | static bool is_bwd_ioat(struct pci_dev *pdev) | ||
206 | { | ||
207 | switch (pdev->device) { | ||
208 | case PCI_DEVICE_ID_INTEL_IOAT_BWD0: | ||
209 | case PCI_DEVICE_ID_INTEL_IOAT_BWD1: | ||
210 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | ||
211 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | ||
212 | /* even though not Atom, BDX-DE has same DMA silicon */ | ||
213 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | ||
214 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | ||
215 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | ||
216 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | ||
217 | return true; | ||
218 | default: | ||
219 | return false; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | static bool is_bwd_noraid(struct pci_dev *pdev) | ||
224 | { | ||
225 | switch (pdev->device) { | ||
226 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | ||
227 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | ||
228 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | ||
229 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | ||
230 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | ||
231 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | ||
232 | return true; | ||
233 | default: | ||
234 | return false; | ||
235 | } | ||
236 | |||
237 | } | ||
238 | |||
239 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], | 119 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], |
240 | dma_addr_t addr, u32 offset, u8 coef, unsigned idx) | 120 | dma_addr_t addr, u32 offset, u8 coef, unsigned idx) |
241 | { | 121 | { |
@@ -258,7 +138,7 @@ ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) | |||
258 | struct ioat_sed_ent *sed; | 138 | struct ioat_sed_ent *sed; |
259 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | 139 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; |
260 | 140 | ||
261 | sed = kmem_cache_alloc(ioat3_sed_cache, flags); | 141 | sed = kmem_cache_alloc(ioat_sed_cache, flags); |
262 | if (!sed) | 142 | if (!sed) |
263 | return NULL; | 143 | return NULL; |
264 | 144 | ||
@@ -266,7 +146,7 @@ ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) | |||
266 | sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], | 146 | sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], |
267 | flags, &sed->dma); | 147 | flags, &sed->dma); |
268 | if (!sed->hw) { | 148 | if (!sed->hw) { |
269 | kmem_cache_free(ioat3_sed_cache, sed); | 149 | kmem_cache_free(ioat_sed_cache, sed); |
270 | return NULL; | 150 | return NULL; |
271 | } | 151 | } |
272 | 152 | ||
@@ -280,7 +160,7 @@ ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) | |||
280 | return; | 160 | return; |
281 | 161 | ||
282 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | 162 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); |
283 | kmem_cache_free(ioat3_sed_cache, sed); | 163 | kmem_cache_free(ioat_sed_cache, sed); |
284 | } | 164 | } |
285 | 165 | ||
286 | static bool desc_has_ext(struct ioat_ring_ent *desc) | 166 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
@@ -464,7 +344,7 @@ static void ioat3_cleanup(struct ioatdma_chan *ioat_chan) | |||
464 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 344 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
465 | } | 345 | } |
466 | 346 | ||
467 | static void ioat3_cleanup_event(unsigned long data) | 347 | void ioat_cleanup_event(unsigned long data) |
468 | { | 348 | { |
469 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | 349 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); |
470 | 350 | ||
@@ -582,7 +462,7 @@ static void check_active(struct ioatdma_chan *ioat_chan) | |||
582 | 462 | ||
583 | } | 463 | } |
584 | 464 | ||
585 | static void ioat3_timer_event(unsigned long data) | 465 | void ioat_timer_event(unsigned long data) |
586 | { | 466 | { |
587 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | 467 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); |
588 | dma_addr_t phys_complete; | 468 | dma_addr_t phys_complete; |
@@ -634,8 +514,8 @@ static void ioat3_timer_event(unsigned long data) | |||
634 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 514 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
635 | } | 515 | } |
636 | 516 | ||
637 | static enum dma_status | 517 | enum dma_status |
638 | ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 518 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
639 | struct dma_tx_state *txstate) | 519 | struct dma_tx_state *txstate) |
640 | { | 520 | { |
641 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 521 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
@@ -651,7 +531,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
651 | } | 531 | } |
652 | 532 | ||
653 | static struct dma_async_tx_descriptor * | 533 | static struct dma_async_tx_descriptor * |
654 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | 534 | __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, |
655 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | 535 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, |
656 | size_t len, unsigned long flags) | 536 | size_t len, unsigned long flags) |
657 | { | 537 | { |
@@ -743,15 +623,15 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
743 | return &compl_desc->txd; | 623 | return &compl_desc->txd; |
744 | } | 624 | } |
745 | 625 | ||
746 | static struct dma_async_tx_descriptor * | 626 | struct dma_async_tx_descriptor * |
747 | ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 627 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
748 | unsigned int src_cnt, size_t len, unsigned long flags) | 628 | unsigned int src_cnt, size_t len, unsigned long flags) |
749 | { | 629 | { |
750 | return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | 630 | return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); |
751 | } | 631 | } |
752 | 632 | ||
753 | static struct dma_async_tx_descriptor * | 633 | struct dma_async_tx_descriptor * |
754 | ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | 634 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, |
755 | unsigned int src_cnt, size_t len, | 635 | unsigned int src_cnt, size_t len, |
756 | enum sum_check_flags *result, unsigned long flags) | 636 | enum sum_check_flags *result, unsigned long flags) |
757 | { | 637 | { |
@@ -760,7 +640,7 @@ ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | |||
760 | */ | 640 | */ |
761 | *result = 0; | 641 | *result = 0; |
762 | 642 | ||
763 | return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], | 643 | return __ioat_prep_xor_lock(chan, result, src[0], &src[1], |
764 | src_cnt - 1, len, flags); | 644 | src_cnt - 1, len, flags); |
765 | } | 645 | } |
766 | 646 | ||
@@ -828,7 +708,7 @@ static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan, | |||
828 | } | 708 | } |
829 | 709 | ||
830 | static struct dma_async_tx_descriptor * | 710 | static struct dma_async_tx_descriptor * |
831 | __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | 711 | __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, |
832 | const dma_addr_t *dst, const dma_addr_t *src, | 712 | const dma_addr_t *dst, const dma_addr_t *src, |
833 | unsigned int src_cnt, const unsigned char *scf, | 713 | unsigned int src_cnt, const unsigned char *scf, |
834 | size_t len, unsigned long flags) | 714 | size_t len, unsigned long flags) |
@@ -952,7 +832,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
952 | } | 832 | } |
953 | 833 | ||
954 | static struct dma_async_tx_descriptor * | 834 | static struct dma_async_tx_descriptor * |
955 | __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | 835 | __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, |
956 | const dma_addr_t *dst, const dma_addr_t *src, | 836 | const dma_addr_t *dst, const dma_addr_t *src, |
957 | unsigned int src_cnt, const unsigned char *scf, | 837 | unsigned int src_cnt, const unsigned char *scf, |
958 | size_t len, unsigned long flags) | 838 | size_t len, unsigned long flags) |
@@ -1062,8 +942,8 @@ static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) | |||
1062 | return src_cnt; | 942 | return src_cnt; |
1063 | } | 943 | } |
1064 | 944 | ||
1065 | static struct dma_async_tx_descriptor * | 945 | struct dma_async_tx_descriptor * |
1066 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 946 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, |
1067 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 947 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
1068 | unsigned long flags) | 948 | unsigned long flags) |
1069 | { | 949 | { |
@@ -1087,23 +967,23 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
1087 | single_source_coef[1] = 0; | 967 | single_source_coef[1] = 0; |
1088 | 968 | ||
1089 | return src_cnt_flags(src_cnt, flags) > 8 ? | 969 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1090 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, | 970 | __ioat_prep_pq16_lock(chan, NULL, dst, single_source, |
1091 | 2, single_source_coef, len, | 971 | 2, single_source_coef, len, |
1092 | flags) : | 972 | flags) : |
1093 | __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, | 973 | __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2, |
1094 | single_source_coef, len, flags); | 974 | single_source_coef, len, flags); |
1095 | 975 | ||
1096 | } else { | 976 | } else { |
1097 | return src_cnt_flags(src_cnt, flags) > 8 ? | 977 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1098 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, | 978 | __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt, |
1099 | scf, len, flags) : | 979 | scf, len, flags) : |
1100 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, | 980 | __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt, |
1101 | scf, len, flags); | 981 | scf, len, flags); |
1102 | } | 982 | } |
1103 | } | 983 | } |
1104 | 984 | ||
1105 | static struct dma_async_tx_descriptor * | 985 | struct dma_async_tx_descriptor * |
1106 | ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | 986 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, |
1107 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 987 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
1108 | enum sum_check_flags *pqres, unsigned long flags) | 988 | enum sum_check_flags *pqres, unsigned long flags) |
1109 | { | 989 | { |
@@ -1119,14 +999,14 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
1119 | *pqres = 0; | 999 | *pqres = 0; |
1120 | 1000 | ||
1121 | return src_cnt_flags(src_cnt, flags) > 8 ? | 1001 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1122 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1002 | __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, |
1123 | flags) : | 1003 | flags) : |
1124 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1004 | __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, |
1125 | flags); | 1005 | flags); |
1126 | } | 1006 | } |
1127 | 1007 | ||
1128 | static struct dma_async_tx_descriptor * | 1008 | struct dma_async_tx_descriptor * |
1129 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | 1009 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, |
1130 | unsigned int src_cnt, size_t len, unsigned long flags) | 1010 | unsigned int src_cnt, size_t len, unsigned long flags) |
1131 | { | 1011 | { |
1132 | unsigned char scf[src_cnt]; | 1012 | unsigned char scf[src_cnt]; |
@@ -1138,14 +1018,14 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
1138 | pq[1] = dst; /* specify valid address for disabled result */ | 1018 | pq[1] = dst; /* specify valid address for disabled result */ |
1139 | 1019 | ||
1140 | return src_cnt_flags(src_cnt, flags) > 8 ? | 1020 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1141 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1021 | __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, |
1142 | flags) : | 1022 | flags) : |
1143 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1023 | __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, |
1144 | flags); | 1024 | flags); |
1145 | } | 1025 | } |
1146 | 1026 | ||
1147 | static struct dma_async_tx_descriptor * | 1027 | struct dma_async_tx_descriptor * |
1148 | ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | 1028 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, |
1149 | unsigned int src_cnt, size_t len, | 1029 | unsigned int src_cnt, size_t len, |
1150 | enum sum_check_flags *result, unsigned long flags) | 1030 | enum sum_check_flags *result, unsigned long flags) |
1151 | { | 1031 | { |
@@ -1163,14 +1043,14 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
1163 | pq[1] = pq[0]; /* specify valid address for disabled result */ | 1043 | pq[1] = pq[0]; /* specify valid address for disabled result */ |
1164 | 1044 | ||
1165 | return src_cnt_flags(src_cnt, flags) > 8 ? | 1045 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1166 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, | 1046 | __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, |
1167 | scf, len, flags) : | 1047 | scf, len, flags) : |
1168 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | 1048 | __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, |
1169 | scf, len, flags); | 1049 | scf, len, flags); |
1170 | } | 1050 | } |
1171 | 1051 | ||
1172 | static struct dma_async_tx_descriptor * | 1052 | struct dma_async_tx_descriptor * |
1173 | ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | 1053 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) |
1174 | { | 1054 | { |
1175 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 1055 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
1176 | struct ioat_ring_ent *desc; | 1056 | struct ioat_ring_ent *desc; |
@@ -1200,293 +1080,6 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | |||
1200 | return &desc->txd; | 1080 | return &desc->txd; |
1201 | } | 1081 | } |
1202 | 1082 | ||
1203 | static void ioat3_dma_test_callback(void *dma_async_param) | ||
1204 | { | ||
1205 | struct completion *cmp = dma_async_param; | ||
1206 | |||
1207 | complete(cmp); | ||
1208 | } | ||
1209 | |||
1210 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | ||
1211 | static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | ||
1212 | { | ||
1213 | int i, src_idx; | ||
1214 | struct page *dest; | ||
1215 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | ||
1216 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | ||
1217 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | ||
1218 | dma_addr_t dest_dma; | ||
1219 | struct dma_async_tx_descriptor *tx; | ||
1220 | struct dma_chan *dma_chan; | ||
1221 | dma_cookie_t cookie; | ||
1222 | u8 cmp_byte = 0; | ||
1223 | u32 cmp_word; | ||
1224 | u32 xor_val_result; | ||
1225 | int err = 0; | ||
1226 | struct completion cmp; | ||
1227 | unsigned long tmo; | ||
1228 | struct device *dev = &ioat_dma->pdev->dev; | ||
1229 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
1230 | u8 op = 0; | ||
1231 | |||
1232 | dev_dbg(dev, "%s\n", __func__); | ||
1233 | |||
1234 | if (!dma_has_cap(DMA_XOR, dma->cap_mask)) | ||
1235 | return 0; | ||
1236 | |||
1237 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | ||
1238 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | ||
1239 | if (!xor_srcs[src_idx]) { | ||
1240 | while (src_idx--) | ||
1241 | __free_page(xor_srcs[src_idx]); | ||
1242 | return -ENOMEM; | ||
1243 | } | ||
1244 | } | ||
1245 | |||
1246 | dest = alloc_page(GFP_KERNEL); | ||
1247 | if (!dest) { | ||
1248 | while (src_idx--) | ||
1249 | __free_page(xor_srcs[src_idx]); | ||
1250 | return -ENOMEM; | ||
1251 | } | ||
1252 | |||
1253 | /* Fill in src buffers */ | ||
1254 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | ||
1255 | u8 *ptr = page_address(xor_srcs[src_idx]); | ||
1256 | for (i = 0; i < PAGE_SIZE; i++) | ||
1257 | ptr[i] = (1 << src_idx); | ||
1258 | } | ||
1259 | |||
1260 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) | ||
1261 | cmp_byte ^= (u8) (1 << src_idx); | ||
1262 | |||
1263 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | ||
1264 | (cmp_byte << 8) | cmp_byte; | ||
1265 | |||
1266 | memset(page_address(dest), 0, PAGE_SIZE); | ||
1267 | |||
1268 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
1269 | device_node); | ||
1270 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
1271 | err = -ENODEV; | ||
1272 | goto out; | ||
1273 | } | ||
1274 | |||
1275 | /* test xor */ | ||
1276 | op = IOAT_OP_XOR; | ||
1277 | |||
1278 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1279 | if (dma_mapping_error(dev, dest_dma)) | ||
1280 | goto dma_unmap; | ||
1281 | |||
1282 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
1283 | dma_srcs[i] = DMA_ERROR_CODE; | ||
1284 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | ||
1285 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | ||
1286 | DMA_TO_DEVICE); | ||
1287 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
1288 | goto dma_unmap; | ||
1289 | } | ||
1290 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | ||
1291 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | ||
1292 | DMA_PREP_INTERRUPT); | ||
1293 | |||
1294 | if (!tx) { | ||
1295 | dev_err(dev, "Self-test xor prep failed\n"); | ||
1296 | err = -ENODEV; | ||
1297 | goto dma_unmap; | ||
1298 | } | ||
1299 | |||
1300 | async_tx_ack(tx); | ||
1301 | init_completion(&cmp); | ||
1302 | tx->callback = ioat3_dma_test_callback; | ||
1303 | tx->callback_param = &cmp; | ||
1304 | cookie = tx->tx_submit(tx); | ||
1305 | if (cookie < 0) { | ||
1306 | dev_err(dev, "Self-test xor setup failed\n"); | ||
1307 | err = -ENODEV; | ||
1308 | goto dma_unmap; | ||
1309 | } | ||
1310 | dma->device_issue_pending(dma_chan); | ||
1311 | |||
1312 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1313 | |||
1314 | if (tmo == 0 || | ||
1315 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | ||
1316 | dev_err(dev, "Self-test xor timed out\n"); | ||
1317 | err = -ENODEV; | ||
1318 | goto dma_unmap; | ||
1319 | } | ||
1320 | |||
1321 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
1322 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
1323 | |||
1324 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1325 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | ||
1326 | u32 *ptr = page_address(dest); | ||
1327 | if (ptr[i] != cmp_word) { | ||
1328 | dev_err(dev, "Self-test xor failed compare\n"); | ||
1329 | err = -ENODEV; | ||
1330 | goto free_resources; | ||
1331 | } | ||
1332 | } | ||
1333 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1334 | |||
1335 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1336 | |||
1337 | /* skip validate if the capability is not present */ | ||
1338 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | ||
1339 | goto free_resources; | ||
1340 | |||
1341 | op = IOAT_OP_XOR_VAL; | ||
1342 | |||
1343 | /* validate the sources with the destintation page */ | ||
1344 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
1345 | xor_val_srcs[i] = xor_srcs[i]; | ||
1346 | xor_val_srcs[i] = dest; | ||
1347 | |||
1348 | xor_val_result = 1; | ||
1349 | |||
1350 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1351 | dma_srcs[i] = DMA_ERROR_CODE; | ||
1352 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | ||
1353 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | ||
1354 | DMA_TO_DEVICE); | ||
1355 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
1356 | goto dma_unmap; | ||
1357 | } | ||
1358 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | ||
1359 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | ||
1360 | &xor_val_result, DMA_PREP_INTERRUPT); | ||
1361 | if (!tx) { | ||
1362 | dev_err(dev, "Self-test zero prep failed\n"); | ||
1363 | err = -ENODEV; | ||
1364 | goto dma_unmap; | ||
1365 | } | ||
1366 | |||
1367 | async_tx_ack(tx); | ||
1368 | init_completion(&cmp); | ||
1369 | tx->callback = ioat3_dma_test_callback; | ||
1370 | tx->callback_param = &cmp; | ||
1371 | cookie = tx->tx_submit(tx); | ||
1372 | if (cookie < 0) { | ||
1373 | dev_err(dev, "Self-test zero setup failed\n"); | ||
1374 | err = -ENODEV; | ||
1375 | goto dma_unmap; | ||
1376 | } | ||
1377 | dma->device_issue_pending(dma_chan); | ||
1378 | |||
1379 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1380 | |||
1381 | if (tmo == 0 || | ||
1382 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | ||
1383 | dev_err(dev, "Self-test validate timed out\n"); | ||
1384 | err = -ENODEV; | ||
1385 | goto dma_unmap; | ||
1386 | } | ||
1387 | |||
1388 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1389 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
1390 | |||
1391 | if (xor_val_result != 0) { | ||
1392 | dev_err(dev, "Self-test validate failed compare\n"); | ||
1393 | err = -ENODEV; | ||
1394 | goto free_resources; | ||
1395 | } | ||
1396 | |||
1397 | memset(page_address(dest), 0, PAGE_SIZE); | ||
1398 | |||
1399 | /* test for non-zero parity sum */ | ||
1400 | op = IOAT_OP_XOR_VAL; | ||
1401 | |||
1402 | xor_val_result = 0; | ||
1403 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1404 | dma_srcs[i] = DMA_ERROR_CODE; | ||
1405 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | ||
1406 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | ||
1407 | DMA_TO_DEVICE); | ||
1408 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
1409 | goto dma_unmap; | ||
1410 | } | ||
1411 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | ||
1412 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | ||
1413 | &xor_val_result, DMA_PREP_INTERRUPT); | ||
1414 | if (!tx) { | ||
1415 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | ||
1416 | err = -ENODEV; | ||
1417 | goto dma_unmap; | ||
1418 | } | ||
1419 | |||
1420 | async_tx_ack(tx); | ||
1421 | init_completion(&cmp); | ||
1422 | tx->callback = ioat3_dma_test_callback; | ||
1423 | tx->callback_param = &cmp; | ||
1424 | cookie = tx->tx_submit(tx); | ||
1425 | if (cookie < 0) { | ||
1426 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | ||
1427 | err = -ENODEV; | ||
1428 | goto dma_unmap; | ||
1429 | } | ||
1430 | dma->device_issue_pending(dma_chan); | ||
1431 | |||
1432 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
1433 | |||
1434 | if (tmo == 0 || | ||
1435 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | ||
1436 | dev_err(dev, "Self-test 2nd validate timed out\n"); | ||
1437 | err = -ENODEV; | ||
1438 | goto dma_unmap; | ||
1439 | } | ||
1440 | |||
1441 | if (xor_val_result != SUM_CHECK_P_RESULT) { | ||
1442 | dev_err(dev, "Self-test validate failed compare\n"); | ||
1443 | err = -ENODEV; | ||
1444 | goto dma_unmap; | ||
1445 | } | ||
1446 | |||
1447 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1448 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
1449 | |||
1450 | goto free_resources; | ||
1451 | dma_unmap: | ||
1452 | if (op == IOAT_OP_XOR) { | ||
1453 | if (dest_dma != DMA_ERROR_CODE) | ||
1454 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, | ||
1455 | DMA_FROM_DEVICE); | ||
1456 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
1457 | if (dma_srcs[i] != DMA_ERROR_CODE) | ||
1458 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
1459 | DMA_TO_DEVICE); | ||
1460 | } else if (op == IOAT_OP_XOR_VAL) { | ||
1461 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1462 | if (dma_srcs[i] != DMA_ERROR_CODE) | ||
1463 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
1464 | DMA_TO_DEVICE); | ||
1465 | } | ||
1466 | free_resources: | ||
1467 | dma->device_free_chan_resources(dma_chan); | ||
1468 | out: | ||
1469 | src_idx = IOAT_NUM_SRC_TEST; | ||
1470 | while (src_idx--) | ||
1471 | __free_page(xor_srcs[src_idx]); | ||
1472 | __free_page(dest); | ||
1473 | return err; | ||
1474 | } | ||
1475 | |||
1476 | static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) | ||
1477 | { | ||
1478 | int rc = ioat_dma_self_test(ioat_dma); | ||
1479 | |||
1480 | if (rc) | ||
1481 | return rc; | ||
1482 | |||
1483 | rc = ioat_xor_val_self_test(ioat_dma); | ||
1484 | if (rc) | ||
1485 | return rc; | ||
1486 | |||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) | 1083 | static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) |
1491 | { | 1084 | { |
1492 | struct pci_dev *pdev = ioat_dma->pdev; | 1085 | struct pci_dev *pdev = ioat_dma->pdev; |
@@ -1521,7 +1114,7 @@ static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) | |||
1521 | return ioat_dma_setup_interrupts(ioat_dma); | 1114 | return ioat_dma_setup_interrupts(ioat_dma); |
1522 | } | 1115 | } |
1523 | 1116 | ||
1524 | static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | 1117 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan) |
1525 | { | 1118 | { |
1526 | /* throw away whatever the channel was doing and get it | 1119 | /* throw away whatever the channel was doing and get it |
1527 | * initialized, with ioat3 specific workarounds | 1120 | * initialized, with ioat3 specific workarounds |
@@ -1569,148 +1162,3 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | |||
1569 | 1162 | ||
1570 | return err; | 1163 | return err; |
1571 | } | 1164 | } |
1572 | |||
1573 | static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) | ||
1574 | { | ||
1575 | struct dma_device *dma; | ||
1576 | struct dma_chan *c; | ||
1577 | struct ioatdma_chan *ioat_chan; | ||
1578 | u32 errmask; | ||
1579 | |||
1580 | dma = &ioat_dma->dma_dev; | ||
1581 | |||
1582 | /* | ||
1583 | * if we have descriptor write back error status, we mask the | ||
1584 | * error interrupts | ||
1585 | */ | ||
1586 | if (ioat_dma->cap & IOAT_CAP_DWBES) { | ||
1587 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1588 | ioat_chan = to_ioat_chan(c); | ||
1589 | errmask = readl(ioat_chan->reg_base + | ||
1590 | IOAT_CHANERR_MASK_OFFSET); | ||
1591 | errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | | ||
1592 | IOAT_CHANERR_XOR_Q_ERR; | ||
1593 | writel(errmask, ioat_chan->reg_base + | ||
1594 | IOAT_CHANERR_MASK_OFFSET); | ||
1595 | } | ||
1596 | } | ||
1597 | } | ||
1598 | |||
1599 | int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | ||
1600 | { | ||
1601 | struct pci_dev *pdev = ioat_dma->pdev; | ||
1602 | int dca_en = system_has_dca_enabled(pdev); | ||
1603 | struct dma_device *dma; | ||
1604 | struct dma_chan *c; | ||
1605 | struct ioatdma_chan *ioat_chan; | ||
1606 | bool is_raid_device = false; | ||
1607 | int err; | ||
1608 | |||
1609 | ioat_dma->enumerate_channels = ioat_enumerate_channels; | ||
1610 | ioat_dma->reset_hw = ioat3_reset_hw; | ||
1611 | ioat_dma->self_test = ioat3_dma_self_test; | ||
1612 | ioat_dma->intr_quirk = ioat3_intr_quirk; | ||
1613 | dma = &ioat_dma->dma_dev; | ||
1614 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; | ||
1615 | dma->device_issue_pending = ioat_issue_pending; | ||
1616 | dma->device_alloc_chan_resources = ioat_alloc_chan_resources; | ||
1617 | dma->device_free_chan_resources = ioat_free_chan_resources; | ||
1618 | |||
1619 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | ||
1620 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | ||
1621 | |||
1622 | ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); | ||
1623 | |||
1624 | if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) | ||
1625 | ioat_dma->cap &= | ||
1626 | ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | ||
1627 | |||
1628 | /* dca is incompatible with raid operations */ | ||
1629 | if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | ||
1630 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | ||
1631 | |||
1632 | if (ioat_dma->cap & IOAT_CAP_XOR) { | ||
1633 | is_raid_device = true; | ||
1634 | dma->max_xor = 8; | ||
1635 | |||
1636 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1637 | dma->device_prep_dma_xor = ioat3_prep_xor; | ||
1638 | |||
1639 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1640 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | ||
1641 | } | ||
1642 | |||
1643 | if (ioat_dma->cap & IOAT_CAP_PQ) { | ||
1644 | is_raid_device = true; | ||
1645 | |||
1646 | dma->device_prep_dma_pq = ioat3_prep_pq; | ||
1647 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | ||
1648 | dma_cap_set(DMA_PQ, dma->cap_mask); | ||
1649 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | ||
1650 | |||
1651 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { | ||
1652 | dma_set_maxpq(dma, 16, 0); | ||
1653 | } else { | ||
1654 | dma_set_maxpq(dma, 8, 0); | ||
1655 | } | ||
1656 | |||
1657 | if (!(ioat_dma->cap & IOAT_CAP_XOR)) { | ||
1658 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | ||
1659 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | ||
1660 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1661 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1662 | |||
1663 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { | ||
1664 | dma->max_xor = 16; | ||
1665 | } else { | ||
1666 | dma->max_xor = 8; | ||
1667 | } | ||
1668 | } | ||
1669 | } | ||
1670 | |||
1671 | dma->device_tx_status = ioat3_tx_status; | ||
1672 | ioat_dma->cleanup_fn = ioat3_cleanup_event; | ||
1673 | ioat_dma->timer_fn = ioat3_timer_event; | ||
1674 | |||
1675 | /* starting with CB3.3 super extended descriptors are supported */ | ||
1676 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { | ||
1677 | char pool_name[14]; | ||
1678 | int i; | ||
1679 | |||
1680 | for (i = 0; i < MAX_SED_POOLS; i++) { | ||
1681 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | ||
1682 | |||
1683 | /* allocate SED DMA pool */ | ||
1684 | ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, | ||
1685 | &pdev->dev, | ||
1686 | SED_SIZE * (i + 1), 64, 0); | ||
1687 | if (!ioat_dma->sed_hw_pool[i]) | ||
1688 | return -ENOMEM; | ||
1689 | |||
1690 | } | ||
1691 | } | ||
1692 | |||
1693 | if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) | ||
1694 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
1695 | |||
1696 | err = ioat_probe(ioat_dma); | ||
1697 | if (err) | ||
1698 | return err; | ||
1699 | |||
1700 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1701 | ioat_chan = to_ioat_chan(c); | ||
1702 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
1703 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1704 | } | ||
1705 | |||
1706 | err = ioat_register(ioat_dma); | ||
1707 | if (err) | ||
1708 | return err; | ||
1709 | |||
1710 | ioat_kobject_add(ioat_dma, &ioat_ktype); | ||
1711 | |||
1712 | if (dca) | ||
1713 | ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); | ||
1714 | |||
1715 | return 0; | ||
1716 | } | ||
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c new file mode 100644 index 000000000000..de8141c7cd01 --- /dev/null +++ b/drivers/dma/ioat/init.c | |||
@@ -0,0 +1,1293 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2004 - 2015 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The full GNU General Public License is included in this distribution in | ||
15 | * the file called "COPYING". | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/prefetch.h> | ||
29 | #include <linux/dca.h> | ||
30 | #include "dma.h" | ||
31 | #include "registers.h" | ||
32 | #include "hw.h" | ||
33 | |||
34 | #include "../dmaengine.h" | ||
35 | |||
36 | MODULE_VERSION(IOAT_DMA_VERSION); | ||
37 | MODULE_LICENSE("Dual BSD/GPL"); | ||
38 | MODULE_AUTHOR("Intel Corporation"); | ||
39 | |||
40 | static struct pci_device_id ioat_pci_tbl[] = { | ||
41 | /* I/OAT v3 platforms */ | ||
42 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | ||
43 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | ||
44 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, | ||
45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, | ||
46 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, | ||
47 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, | ||
48 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, | ||
49 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, | ||
50 | |||
51 | /* I/OAT v3.2 platforms */ | ||
52 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, | ||
53 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, | ||
54 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, | ||
55 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, | ||
56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, | ||
57 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, | ||
58 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, | ||
59 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, | ||
60 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, | ||
61 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, | ||
62 | |||
63 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, | ||
64 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, | ||
65 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, | ||
66 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, | ||
67 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, | ||
68 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, | ||
69 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, | ||
70 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, | ||
71 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | ||
72 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | ||
73 | |||
74 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, | ||
75 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, | ||
76 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, | ||
77 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, | ||
78 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, | ||
79 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, | ||
80 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, | ||
81 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, | ||
82 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | ||
83 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | ||
84 | |||
85 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, | ||
86 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, | ||
87 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, | ||
88 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, | ||
89 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, | ||
90 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, | ||
91 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, | ||
92 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, | ||
93 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, | ||
94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, | ||
95 | |||
96 | /* I/OAT v3.3 platforms */ | ||
97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, | ||
98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, | ||
99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, | ||
100 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, | ||
101 | |||
102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, | ||
103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, | ||
104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, | ||
105 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, | ||
106 | |||
107 | { 0, } | ||
108 | }; | ||
109 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | ||
110 | |||
111 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | ||
112 | static void ioat_remove(struct pci_dev *pdev); | ||
113 | |||
114 | static int ioat_dca_enabled = 1; | ||
115 | module_param(ioat_dca_enabled, int, 0644); | ||
116 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | ||
117 | int ioat_pending_level = 4; | ||
118 | module_param(ioat_pending_level, int, 0644); | ||
119 | MODULE_PARM_DESC(ioat_pending_level, | ||
120 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
121 | int ioat_ring_alloc_order = 8; | ||
122 | module_param(ioat_ring_alloc_order, int, 0644); | ||
123 | MODULE_PARM_DESC(ioat_ring_alloc_order, | ||
124 | "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); | ||
125 | int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; | ||
126 | module_param(ioat_ring_max_alloc_order, int, 0644); | ||
127 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | ||
128 | "ioat+: upper limit for ring size (default: 16)"); | ||
129 | static char ioat_interrupt_style[32] = "msix"; | ||
130 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
131 | sizeof(ioat_interrupt_style), 0644); | ||
132 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
133 | "set ioat interrupt style: msix (default), msi, intx"); | ||
134 | |||
135 | struct kmem_cache *ioat_cache; | ||
136 | struct kmem_cache *ioat_sed_cache; | ||
137 | |||
138 | static bool is_jf_ioat(struct pci_dev *pdev) | ||
139 | { | ||
140 | switch (pdev->device) { | ||
141 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | ||
142 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | ||
143 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | ||
144 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | ||
145 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | ||
146 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | ||
147 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | ||
148 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | ||
149 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | ||
150 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | ||
151 | return true; | ||
152 | default: | ||
153 | return false; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static bool is_snb_ioat(struct pci_dev *pdev) | ||
158 | { | ||
159 | switch (pdev->device) { | ||
160 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | ||
161 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | ||
162 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | ||
163 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | ||
164 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | ||
165 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | ||
166 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | ||
167 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | ||
168 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | ||
169 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | ||
170 | return true; | ||
171 | default: | ||
172 | return false; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static bool is_ivb_ioat(struct pci_dev *pdev) | ||
177 | { | ||
178 | switch (pdev->device) { | ||
179 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | ||
180 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | ||
181 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | ||
182 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | ||
183 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | ||
184 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | ||
185 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | ||
186 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | ||
187 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | ||
188 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | ||
189 | return true; | ||
190 | default: | ||
191 | return false; | ||
192 | } | ||
193 | |||
194 | } | ||
195 | |||
196 | static bool is_hsw_ioat(struct pci_dev *pdev) | ||
197 | { | ||
198 | switch (pdev->device) { | ||
199 | case PCI_DEVICE_ID_INTEL_IOAT_HSW0: | ||
200 | case PCI_DEVICE_ID_INTEL_IOAT_HSW1: | ||
201 | case PCI_DEVICE_ID_INTEL_IOAT_HSW2: | ||
202 | case PCI_DEVICE_ID_INTEL_IOAT_HSW3: | ||
203 | case PCI_DEVICE_ID_INTEL_IOAT_HSW4: | ||
204 | case PCI_DEVICE_ID_INTEL_IOAT_HSW5: | ||
205 | case PCI_DEVICE_ID_INTEL_IOAT_HSW6: | ||
206 | case PCI_DEVICE_ID_INTEL_IOAT_HSW7: | ||
207 | case PCI_DEVICE_ID_INTEL_IOAT_HSW8: | ||
208 | case PCI_DEVICE_ID_INTEL_IOAT_HSW9: | ||
209 | return true; | ||
210 | default: | ||
211 | return false; | ||
212 | } | ||
213 | |||
214 | } | ||
215 | |||
216 | static bool is_xeon_cb32(struct pci_dev *pdev) | ||
217 | { | ||
218 | return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || | ||
219 | is_hsw_ioat(pdev); | ||
220 | } | ||
221 | |||
222 | bool is_bwd_ioat(struct pci_dev *pdev) | ||
223 | { | ||
224 | switch (pdev->device) { | ||
225 | case PCI_DEVICE_ID_INTEL_IOAT_BWD0: | ||
226 | case PCI_DEVICE_ID_INTEL_IOAT_BWD1: | ||
227 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | ||
228 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | ||
229 | /* even though not Atom, BDX-DE has same DMA silicon */ | ||
230 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | ||
231 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | ||
232 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | ||
233 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | ||
234 | return true; | ||
235 | default: | ||
236 | return false; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | static bool is_bwd_noraid(struct pci_dev *pdev) | ||
241 | { | ||
242 | switch (pdev->device) { | ||
243 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | ||
244 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | ||
245 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | ||
246 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | ||
247 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | ||
248 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | ||
249 | return true; | ||
250 | default: | ||
251 | return false; | ||
252 | } | ||
253 | |||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Perform a IOAT transaction to verify the HW works. | ||
258 | */ | ||
259 | #define IOAT_TEST_SIZE 2000 | ||
260 | |||
261 | static void ioat_dma_test_callback(void *dma_async_param) | ||
262 | { | ||
263 | struct completion *cmp = dma_async_param; | ||
264 | |||
265 | complete(cmp); | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
270 | * @ioat_dma: dma device to be tested | ||
271 | */ | ||
272 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma) | ||
273 | { | ||
274 | int i; | ||
275 | u8 *src; | ||
276 | u8 *dest; | ||
277 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
278 | struct device *dev = &ioat_dma->pdev->dev; | ||
279 | struct dma_chan *dma_chan; | ||
280 | struct dma_async_tx_descriptor *tx; | ||
281 | dma_addr_t dma_dest, dma_src; | ||
282 | dma_cookie_t cookie; | ||
283 | int err = 0; | ||
284 | struct completion cmp; | ||
285 | unsigned long tmo; | ||
286 | unsigned long flags; | ||
287 | |||
288 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
289 | if (!src) | ||
290 | return -ENOMEM; | ||
291 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
292 | if (!dest) { | ||
293 | kfree(src); | ||
294 | return -ENOMEM; | ||
295 | } | ||
296 | |||
297 | /* Fill in src buffer */ | ||
298 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
299 | src[i] = (u8)i; | ||
300 | |||
301 | /* Start copy, using first DMA channel */ | ||
302 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
303 | device_node); | ||
304 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
305 | dev_err(dev, "selftest cannot allocate chan resource\n"); | ||
306 | err = -ENODEV; | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
311 | if (dma_mapping_error(dev, dma_src)) { | ||
312 | dev_err(dev, "mapping src buffer failed\n"); | ||
313 | goto free_resources; | ||
314 | } | ||
315 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
316 | if (dma_mapping_error(dev, dma_dest)) { | ||
317 | dev_err(dev, "mapping dest buffer failed\n"); | ||
318 | goto unmap_src; | ||
319 | } | ||
320 | flags = DMA_PREP_INTERRUPT; | ||
321 | tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, | ||
322 | dma_src, IOAT_TEST_SIZE, | ||
323 | flags); | ||
324 | if (!tx) { | ||
325 | dev_err(dev, "Self-test prep failed, disabling\n"); | ||
326 | err = -ENODEV; | ||
327 | goto unmap_dma; | ||
328 | } | ||
329 | |||
330 | async_tx_ack(tx); | ||
331 | init_completion(&cmp); | ||
332 | tx->callback = ioat_dma_test_callback; | ||
333 | tx->callback_param = &cmp; | ||
334 | cookie = tx->tx_submit(tx); | ||
335 | if (cookie < 0) { | ||
336 | dev_err(dev, "Self-test setup failed, disabling\n"); | ||
337 | err = -ENODEV; | ||
338 | goto unmap_dma; | ||
339 | } | ||
340 | dma->device_issue_pending(dma_chan); | ||
341 | |||
342 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
343 | |||
344 | if (tmo == 0 || | ||
345 | dma->device_tx_status(dma_chan, cookie, NULL) | ||
346 | != DMA_COMPLETE) { | ||
347 | dev_err(dev, "Self-test copy timed out, disabling\n"); | ||
348 | err = -ENODEV; | ||
349 | goto unmap_dma; | ||
350 | } | ||
351 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
352 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | ||
353 | err = -ENODEV; | ||
354 | goto free_resources; | ||
355 | } | ||
356 | |||
357 | unmap_dma: | ||
358 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
359 | unmap_src: | ||
360 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
361 | free_resources: | ||
362 | dma->device_free_chan_resources(dma_chan); | ||
363 | out: | ||
364 | kfree(src); | ||
365 | kfree(dest); | ||
366 | return err; | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
371 | * @ioat_dma: ioat dma device | ||
372 | */ | ||
373 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) | ||
374 | { | ||
375 | struct ioatdma_chan *ioat_chan; | ||
376 | struct pci_dev *pdev = ioat_dma->pdev; | ||
377 | struct device *dev = &pdev->dev; | ||
378 | struct msix_entry *msix; | ||
379 | int i, j, msixcnt; | ||
380 | int err = -EINVAL; | ||
381 | u8 intrctrl = 0; | ||
382 | |||
383 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
384 | goto msix; | ||
385 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
386 | goto msi; | ||
387 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
388 | goto intx; | ||
389 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); | ||
390 | goto err_no_irq; | ||
391 | |||
392 | msix: | ||
393 | /* The number of MSI-X vectors should equal the number of channels */ | ||
394 | msixcnt = ioat_dma->dma_dev.chancnt; | ||
395 | for (i = 0; i < msixcnt; i++) | ||
396 | ioat_dma->msix_entries[i].entry = i; | ||
397 | |||
398 | err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); | ||
399 | if (err) | ||
400 | goto msi; | ||
401 | |||
402 | for (i = 0; i < msixcnt; i++) { | ||
403 | msix = &ioat_dma->msix_entries[i]; | ||
404 | ioat_chan = ioat_chan_by_index(ioat_dma, i); | ||
405 | err = devm_request_irq(dev, msix->vector, | ||
406 | ioat_dma_do_interrupt_msix, 0, | ||
407 | "ioat-msix", ioat_chan); | ||
408 | if (err) { | ||
409 | for (j = 0; j < i; j++) { | ||
410 | msix = &ioat_dma->msix_entries[j]; | ||
411 | ioat_chan = ioat_chan_by_index(ioat_dma, j); | ||
412 | devm_free_irq(dev, msix->vector, ioat_chan); | ||
413 | } | ||
414 | goto msi; | ||
415 | } | ||
416 | } | ||
417 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
418 | ioat_dma->irq_mode = IOAT_MSIX; | ||
419 | goto done; | ||
420 | |||
421 | msi: | ||
422 | err = pci_enable_msi(pdev); | ||
423 | if (err) | ||
424 | goto intx; | ||
425 | |||
426 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | ||
427 | "ioat-msi", ioat_dma); | ||
428 | if (err) { | ||
429 | pci_disable_msi(pdev); | ||
430 | goto intx; | ||
431 | } | ||
432 | ioat_dma->irq_mode = IOAT_MSI; | ||
433 | goto done; | ||
434 | |||
435 | intx: | ||
436 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | ||
437 | IRQF_SHARED, "ioat-intx", ioat_dma); | ||
438 | if (err) | ||
439 | goto err_no_irq; | ||
440 | |||
441 | ioat_dma->irq_mode = IOAT_INTX; | ||
442 | done: | ||
443 | if (ioat_dma->intr_quirk) | ||
444 | ioat_dma->intr_quirk(ioat_dma); | ||
445 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
446 | writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | ||
447 | return 0; | ||
448 | |||
449 | err_no_irq: | ||
450 | /* Disable all interrupt generation */ | ||
451 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | ||
452 | ioat_dma->irq_mode = IOAT_NOIRQ; | ||
453 | dev_err(dev, "no usable interrupts\n"); | ||
454 | return err; | ||
455 | } | ||
456 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); | ||
457 | |||
458 | static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) | ||
459 | { | ||
460 | /* Disable all interrupt generation */ | ||
461 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); | ||
462 | } | ||
463 | |||
464 | int ioat_probe(struct ioatdma_device *ioat_dma) | ||
465 | { | ||
466 | int err = -ENODEV; | ||
467 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
468 | struct pci_dev *pdev = ioat_dma->pdev; | ||
469 | struct device *dev = &pdev->dev; | ||
470 | |||
471 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
472 | ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
473 | sizeof(struct ioat_dma_descriptor), | ||
474 | 64, 0); | ||
475 | if (!ioat_dma->dma_pool) { | ||
476 | err = -ENOMEM; | ||
477 | goto err_dma_pool; | ||
478 | } | ||
479 | |||
480 | ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, | ||
481 | sizeof(u64), | ||
482 | SMP_CACHE_BYTES, | ||
483 | SMP_CACHE_BYTES); | ||
484 | |||
485 | if (!ioat_dma->completion_pool) { | ||
486 | err = -ENOMEM; | ||
487 | goto err_completion_pool; | ||
488 | } | ||
489 | |||
490 | ioat_dma->enumerate_channels(ioat_dma); | ||
491 | |||
492 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
493 | dma->dev = &pdev->dev; | ||
494 | |||
495 | if (!dma->chancnt) { | ||
496 | dev_err(dev, "channel enumeration error\n"); | ||
497 | goto err_setup_interrupts; | ||
498 | } | ||
499 | |||
500 | err = ioat_dma_setup_interrupts(ioat_dma); | ||
501 | if (err) | ||
502 | goto err_setup_interrupts; | ||
503 | |||
504 | err = ioat_dma->self_test(ioat_dma); | ||
505 | if (err) | ||
506 | goto err_self_test; | ||
507 | |||
508 | return 0; | ||
509 | |||
510 | err_self_test: | ||
511 | ioat_disable_interrupts(ioat_dma); | ||
512 | err_setup_interrupts: | ||
513 | pci_pool_destroy(ioat_dma->completion_pool); | ||
514 | err_completion_pool: | ||
515 | pci_pool_destroy(ioat_dma->dma_pool); | ||
516 | err_dma_pool: | ||
517 | return err; | ||
518 | } | ||
519 | |||
520 | int ioat_register(struct ioatdma_device *ioat_dma) | ||
521 | { | ||
522 | int err = dma_async_device_register(&ioat_dma->dma_dev); | ||
523 | |||
524 | if (err) { | ||
525 | ioat_disable_interrupts(ioat_dma); | ||
526 | pci_pool_destroy(ioat_dma->completion_pool); | ||
527 | pci_pool_destroy(ioat_dma->dma_pool); | ||
528 | } | ||
529 | |||
530 | return err; | ||
531 | } | ||
532 | |||
533 | void ioat_dma_remove(struct ioatdma_device *ioat_dma) | ||
534 | { | ||
535 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
536 | |||
537 | ioat_disable_interrupts(ioat_dma); | ||
538 | |||
539 | ioat_kobject_del(ioat_dma); | ||
540 | |||
541 | dma_async_device_unregister(dma); | ||
542 | |||
543 | pci_pool_destroy(ioat_dma->dma_pool); | ||
544 | pci_pool_destroy(ioat_dma->completion_pool); | ||
545 | |||
546 | INIT_LIST_HEAD(&dma->channels); | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * ioat_enumerate_channels - find and initialize the device's channels | ||
551 | * @ioat_dma: the ioat dma device to be enumerated | ||
552 | */ | ||
553 | int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) | ||
554 | { | ||
555 | struct ioatdma_chan *ioat_chan; | ||
556 | struct device *dev = &ioat_dma->pdev->dev; | ||
557 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
558 | u8 xfercap_log; | ||
559 | int i; | ||
560 | |||
561 | INIT_LIST_HEAD(&dma->channels); | ||
562 | dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); | ||
563 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
564 | if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { | ||
565 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
566 | dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); | ||
567 | dma->chancnt = ARRAY_SIZE(ioat_dma->idx); | ||
568 | } | ||
569 | xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); | ||
570 | xfercap_log &= 0x1f; /* bits [4:0] valid */ | ||
571 | if (xfercap_log == 0) | ||
572 | return 0; | ||
573 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); | ||
574 | |||
575 | for (i = 0; i < dma->chancnt; i++) { | ||
576 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); | ||
577 | if (!ioat_chan) | ||
578 | break; | ||
579 | |||
580 | ioat_init_channel(ioat_dma, ioat_chan, i); | ||
581 | ioat_chan->xfercap_log = xfercap_log; | ||
582 | spin_lock_init(&ioat_chan->prep_lock); | ||
583 | if (ioat_dma->reset_hw(ioat_chan)) { | ||
584 | i = 0; | ||
585 | break; | ||
586 | } | ||
587 | } | ||
588 | dma->chancnt = i; | ||
589 | return i; | ||
590 | } | ||
591 | |||
592 | /** | ||
593 | * ioat_free_chan_resources - release all the descriptors | ||
594 | * @chan: the channel to be cleaned | ||
595 | */ | ||
596 | void ioat_free_chan_resources(struct dma_chan *c) | ||
597 | { | ||
598 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
599 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
600 | struct ioat_ring_ent *desc; | ||
601 | const int total_descs = 1 << ioat_chan->alloc_order; | ||
602 | int descs; | ||
603 | int i; | ||
604 | |||
605 | /* Before freeing channel resources first check | ||
606 | * if they have been previously allocated for this channel. | ||
607 | */ | ||
608 | if (!ioat_chan->ring) | ||
609 | return; | ||
610 | |||
611 | ioat_stop(ioat_chan); | ||
612 | ioat_dma->reset_hw(ioat_chan); | ||
613 | |||
614 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
615 | spin_lock_bh(&ioat_chan->prep_lock); | ||
616 | descs = ioat_ring_space(ioat_chan); | ||
617 | dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); | ||
618 | for (i = 0; i < descs; i++) { | ||
619 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); | ||
620 | ioat_free_ring_ent(desc, c); | ||
621 | } | ||
622 | |||
623 | if (descs < total_descs) | ||
624 | dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", | ||
625 | total_descs - descs); | ||
626 | |||
627 | for (i = 0; i < total_descs - descs; i++) { | ||
628 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); | ||
629 | dump_desc_dbg(ioat_chan, desc); | ||
630 | ioat_free_ring_ent(desc, c); | ||
631 | } | ||
632 | |||
633 | kfree(ioat_chan->ring); | ||
634 | ioat_chan->ring = NULL; | ||
635 | ioat_chan->alloc_order = 0; | ||
636 | pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, | ||
637 | ioat_chan->completion_dma); | ||
638 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
639 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
640 | |||
641 | ioat_chan->last_completion = 0; | ||
642 | ioat_chan->completion_dma = 0; | ||
643 | ioat_chan->dmacount = 0; | ||
644 | } | ||
645 | |||
646 | /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring | ||
647 | * @chan: channel to be initialized | ||
648 | */ | ||
649 | int ioat_alloc_chan_resources(struct dma_chan *c) | ||
650 | { | ||
651 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
652 | struct ioat_ring_ent **ring; | ||
653 | u64 status; | ||
654 | int order; | ||
655 | int i = 0; | ||
656 | u32 chanerr; | ||
657 | |||
658 | /* have we already been set up? */ | ||
659 | if (ioat_chan->ring) | ||
660 | return 1 << ioat_chan->alloc_order; | ||
661 | |||
662 | /* Setup register to interrupt and write completion status on error */ | ||
663 | writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
664 | |||
665 | /* allocate a completion writeback area */ | ||
666 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
667 | ioat_chan->completion = | ||
668 | pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, | ||
669 | GFP_KERNEL, &ioat_chan->completion_dma); | ||
670 | if (!ioat_chan->completion) | ||
671 | return -ENOMEM; | ||
672 | |||
673 | memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); | ||
674 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, | ||
675 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
676 | writel(((u64)ioat_chan->completion_dma) >> 32, | ||
677 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
678 | |||
679 | order = ioat_get_alloc_order(); | ||
680 | ring = ioat_alloc_ring(c, order, GFP_KERNEL); | ||
681 | if (!ring) | ||
682 | return -ENOMEM; | ||
683 | |||
684 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
685 | spin_lock_bh(&ioat_chan->prep_lock); | ||
686 | ioat_chan->ring = ring; | ||
687 | ioat_chan->head = 0; | ||
688 | ioat_chan->issued = 0; | ||
689 | ioat_chan->tail = 0; | ||
690 | ioat_chan->alloc_order = order; | ||
691 | set_bit(IOAT_RUN, &ioat_chan->state); | ||
692 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
693 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
694 | |||
695 | ioat_start_null_desc(ioat_chan); | ||
696 | |||
697 | /* check that we got off the ground */ | ||
698 | do { | ||
699 | udelay(1); | ||
700 | status = ioat_chansts(ioat_chan); | ||
701 | } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); | ||
702 | |||
703 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
704 | return 1 << ioat_chan->alloc_order; | ||
705 | |||
706 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
707 | |||
708 | dev_WARN(to_dev(ioat_chan), | ||
709 | "failed to start channel chanerr: %#x\n", chanerr); | ||
710 | ioat_free_chan_resources(c); | ||
711 | return -EFAULT; | ||
712 | } | ||
713 | |||
714 | /* common channel initialization */ | ||
715 | void | ||
716 | ioat_init_channel(struct ioatdma_device *ioat_dma, | ||
717 | struct ioatdma_chan *ioat_chan, int idx) | ||
718 | { | ||
719 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
720 | struct dma_chan *c = &ioat_chan->dma_chan; | ||
721 | unsigned long data = (unsigned long) c; | ||
722 | |||
723 | ioat_chan->ioat_dma = ioat_dma; | ||
724 | ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); | ||
725 | spin_lock_init(&ioat_chan->cleanup_lock); | ||
726 | ioat_chan->dma_chan.device = dma; | ||
727 | dma_cookie_init(&ioat_chan->dma_chan); | ||
728 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); | ||
729 | ioat_dma->idx[idx] = ioat_chan; | ||
730 | init_timer(&ioat_chan->timer); | ||
731 | ioat_chan->timer.function = ioat_dma->timer_fn; | ||
732 | ioat_chan->timer.data = data; | ||
733 | tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); | ||
734 | } | ||
735 | |||
736 | static void ioat3_dma_test_callback(void *dma_async_param) | ||
737 | { | ||
738 | struct completion *cmp = dma_async_param; | ||
739 | |||
740 | complete(cmp); | ||
741 | } | ||
742 | |||
743 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | ||
744 | static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | ||
745 | { | ||
746 | int i, src_idx; | ||
747 | struct page *dest; | ||
748 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | ||
749 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | ||
750 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | ||
751 | dma_addr_t dest_dma; | ||
752 | struct dma_async_tx_descriptor *tx; | ||
753 | struct dma_chan *dma_chan; | ||
754 | dma_cookie_t cookie; | ||
755 | u8 cmp_byte = 0; | ||
756 | u32 cmp_word; | ||
757 | u32 xor_val_result; | ||
758 | int err = 0; | ||
759 | struct completion cmp; | ||
760 | unsigned long tmo; | ||
761 | struct device *dev = &ioat_dma->pdev->dev; | ||
762 | struct dma_device *dma = &ioat_dma->dma_dev; | ||
763 | u8 op = 0; | ||
764 | |||
765 | dev_dbg(dev, "%s\n", __func__); | ||
766 | |||
767 | if (!dma_has_cap(DMA_XOR, dma->cap_mask)) | ||
768 | return 0; | ||
769 | |||
770 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | ||
771 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | ||
772 | if (!xor_srcs[src_idx]) { | ||
773 | while (src_idx--) | ||
774 | __free_page(xor_srcs[src_idx]); | ||
775 | return -ENOMEM; | ||
776 | } | ||
777 | } | ||
778 | |||
779 | dest = alloc_page(GFP_KERNEL); | ||
780 | if (!dest) { | ||
781 | while (src_idx--) | ||
782 | __free_page(xor_srcs[src_idx]); | ||
783 | return -ENOMEM; | ||
784 | } | ||
785 | |||
786 | /* Fill in src buffers */ | ||
787 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | ||
788 | u8 *ptr = page_address(xor_srcs[src_idx]); | ||
789 | |||
790 | for (i = 0; i < PAGE_SIZE; i++) | ||
791 | ptr[i] = (1 << src_idx); | ||
792 | } | ||
793 | |||
794 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) | ||
795 | cmp_byte ^= (u8) (1 << src_idx); | ||
796 | |||
797 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | ||
798 | (cmp_byte << 8) | cmp_byte; | ||
799 | |||
800 | memset(page_address(dest), 0, PAGE_SIZE); | ||
801 | |||
802 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
803 | device_node); | ||
804 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
805 | err = -ENODEV; | ||
806 | goto out; | ||
807 | } | ||
808 | |||
809 | /* test xor */ | ||
810 | op = IOAT_OP_XOR; | ||
811 | |||
812 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
813 | if (dma_mapping_error(dev, dest_dma)) | ||
814 | goto dma_unmap; | ||
815 | |||
816 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
817 | dma_srcs[i] = DMA_ERROR_CODE; | ||
818 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | ||
819 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | ||
820 | DMA_TO_DEVICE); | ||
821 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
822 | goto dma_unmap; | ||
823 | } | ||
824 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | ||
825 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | ||
826 | DMA_PREP_INTERRUPT); | ||
827 | |||
828 | if (!tx) { | ||
829 | dev_err(dev, "Self-test xor prep failed\n"); | ||
830 | err = -ENODEV; | ||
831 | goto dma_unmap; | ||
832 | } | ||
833 | |||
834 | async_tx_ack(tx); | ||
835 | init_completion(&cmp); | ||
836 | tx->callback = ioat3_dma_test_callback; | ||
837 | tx->callback_param = &cmp; | ||
838 | cookie = tx->tx_submit(tx); | ||
839 | if (cookie < 0) { | ||
840 | dev_err(dev, "Self-test xor setup failed\n"); | ||
841 | err = -ENODEV; | ||
842 | goto dma_unmap; | ||
843 | } | ||
844 | dma->device_issue_pending(dma_chan); | ||
845 | |||
846 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
847 | |||
848 | if (tmo == 0 || | ||
849 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | ||
850 | dev_err(dev, "Self-test xor timed out\n"); | ||
851 | err = -ENODEV; | ||
852 | goto dma_unmap; | ||
853 | } | ||
854 | |||
855 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
856 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
857 | |||
858 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
859 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | ||
860 | u32 *ptr = page_address(dest); | ||
861 | |||
862 | if (ptr[i] != cmp_word) { | ||
863 | dev_err(dev, "Self-test xor failed compare\n"); | ||
864 | err = -ENODEV; | ||
865 | goto free_resources; | ||
866 | } | ||
867 | } | ||
868 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
869 | |||
870 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
871 | |||
872 | /* skip validate if the capability is not present */ | ||
873 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | ||
874 | goto free_resources; | ||
875 | |||
876 | op = IOAT_OP_XOR_VAL; | ||
877 | |||
878 | /* validate the sources with the destintation page */ | ||
879 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
880 | xor_val_srcs[i] = xor_srcs[i]; | ||
881 | xor_val_srcs[i] = dest; | ||
882 | |||
883 | xor_val_result = 1; | ||
884 | |||
885 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
886 | dma_srcs[i] = DMA_ERROR_CODE; | ||
887 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | ||
888 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | ||
889 | DMA_TO_DEVICE); | ||
890 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
891 | goto dma_unmap; | ||
892 | } | ||
893 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | ||
894 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | ||
895 | &xor_val_result, DMA_PREP_INTERRUPT); | ||
896 | if (!tx) { | ||
897 | dev_err(dev, "Self-test zero prep failed\n"); | ||
898 | err = -ENODEV; | ||
899 | goto dma_unmap; | ||
900 | } | ||
901 | |||
902 | async_tx_ack(tx); | ||
903 | init_completion(&cmp); | ||
904 | tx->callback = ioat3_dma_test_callback; | ||
905 | tx->callback_param = &cmp; | ||
906 | cookie = tx->tx_submit(tx); | ||
907 | if (cookie < 0) { | ||
908 | dev_err(dev, "Self-test zero setup failed\n"); | ||
909 | err = -ENODEV; | ||
910 | goto dma_unmap; | ||
911 | } | ||
912 | dma->device_issue_pending(dma_chan); | ||
913 | |||
914 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
915 | |||
916 | if (tmo == 0 || | ||
917 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | ||
918 | dev_err(dev, "Self-test validate timed out\n"); | ||
919 | err = -ENODEV; | ||
920 | goto dma_unmap; | ||
921 | } | ||
922 | |||
923 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
924 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
925 | |||
926 | if (xor_val_result != 0) { | ||
927 | dev_err(dev, "Self-test validate failed compare\n"); | ||
928 | err = -ENODEV; | ||
929 | goto free_resources; | ||
930 | } | ||
931 | |||
932 | memset(page_address(dest), 0, PAGE_SIZE); | ||
933 | |||
934 | /* test for non-zero parity sum */ | ||
935 | op = IOAT_OP_XOR_VAL; | ||
936 | |||
937 | xor_val_result = 0; | ||
938 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
939 | dma_srcs[i] = DMA_ERROR_CODE; | ||
940 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | ||
941 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | ||
942 | DMA_TO_DEVICE); | ||
943 | if (dma_mapping_error(dev, dma_srcs[i])) | ||
944 | goto dma_unmap; | ||
945 | } | ||
946 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | ||
947 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | ||
948 | &xor_val_result, DMA_PREP_INTERRUPT); | ||
949 | if (!tx) { | ||
950 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | ||
951 | err = -ENODEV; | ||
952 | goto dma_unmap; | ||
953 | } | ||
954 | |||
955 | async_tx_ack(tx); | ||
956 | init_completion(&cmp); | ||
957 | tx->callback = ioat3_dma_test_callback; | ||
958 | tx->callback_param = &cmp; | ||
959 | cookie = tx->tx_submit(tx); | ||
960 | if (cookie < 0) { | ||
961 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | ||
962 | err = -ENODEV; | ||
963 | goto dma_unmap; | ||
964 | } | ||
965 | dma->device_issue_pending(dma_chan); | ||
966 | |||
967 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
968 | |||
969 | if (tmo == 0 || | ||
970 | dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { | ||
971 | dev_err(dev, "Self-test 2nd validate timed out\n"); | ||
972 | err = -ENODEV; | ||
973 | goto dma_unmap; | ||
974 | } | ||
975 | |||
976 | if (xor_val_result != SUM_CHECK_P_RESULT) { | ||
977 | dev_err(dev, "Self-test validate failed compare\n"); | ||
978 | err = -ENODEV; | ||
979 | goto dma_unmap; | ||
980 | } | ||
981 | |||
982 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
983 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
984 | |||
985 | goto free_resources; | ||
986 | dma_unmap: | ||
987 | if (op == IOAT_OP_XOR) { | ||
988 | if (dest_dma != DMA_ERROR_CODE) | ||
989 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, | ||
990 | DMA_FROM_DEVICE); | ||
991 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
992 | if (dma_srcs[i] != DMA_ERROR_CODE) | ||
993 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
994 | DMA_TO_DEVICE); | ||
995 | } else if (op == IOAT_OP_XOR_VAL) { | ||
996 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
997 | if (dma_srcs[i] != DMA_ERROR_CODE) | ||
998 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
999 | DMA_TO_DEVICE); | ||
1000 | } | ||
1001 | free_resources: | ||
1002 | dma->device_free_chan_resources(dma_chan); | ||
1003 | out: | ||
1004 | src_idx = IOAT_NUM_SRC_TEST; | ||
1005 | while (src_idx--) | ||
1006 | __free_page(xor_srcs[src_idx]); | ||
1007 | __free_page(dest); | ||
1008 | return err; | ||
1009 | } | ||
1010 | |||
1011 | static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) | ||
1012 | { | ||
1013 | int rc = ioat_dma_self_test(ioat_dma); | ||
1014 | |||
1015 | if (rc) | ||
1016 | return rc; | ||
1017 | |||
1018 | rc = ioat_xor_val_self_test(ioat_dma); | ||
1019 | if (rc) | ||
1020 | return rc; | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) | ||
1026 | { | ||
1027 | struct dma_device *dma; | ||
1028 | struct dma_chan *c; | ||
1029 | struct ioatdma_chan *ioat_chan; | ||
1030 | u32 errmask; | ||
1031 | |||
1032 | dma = &ioat_dma->dma_dev; | ||
1033 | |||
1034 | /* | ||
1035 | * if we have descriptor write back error status, we mask the | ||
1036 | * error interrupts | ||
1037 | */ | ||
1038 | if (ioat_dma->cap & IOAT_CAP_DWBES) { | ||
1039 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1040 | ioat_chan = to_ioat_chan(c); | ||
1041 | errmask = readl(ioat_chan->reg_base + | ||
1042 | IOAT_CHANERR_MASK_OFFSET); | ||
1043 | errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | | ||
1044 | IOAT_CHANERR_XOR_Q_ERR; | ||
1045 | writel(errmask, ioat_chan->reg_base + | ||
1046 | IOAT_CHANERR_MASK_OFFSET); | ||
1047 | } | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | ||
1052 | { | ||
1053 | struct pci_dev *pdev = ioat_dma->pdev; | ||
1054 | int dca_en = system_has_dca_enabled(pdev); | ||
1055 | struct dma_device *dma; | ||
1056 | struct dma_chan *c; | ||
1057 | struct ioatdma_chan *ioat_chan; | ||
1058 | bool is_raid_device = false; | ||
1059 | int err; | ||
1060 | |||
1061 | ioat_dma->enumerate_channels = ioat_enumerate_channels; | ||
1062 | ioat_dma->reset_hw = ioat_reset_hw; | ||
1063 | ioat_dma->self_test = ioat3_dma_self_test; | ||
1064 | ioat_dma->intr_quirk = ioat3_intr_quirk; | ||
1065 | dma = &ioat_dma->dma_dev; | ||
1066 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; | ||
1067 | dma->device_issue_pending = ioat_issue_pending; | ||
1068 | dma->device_alloc_chan_resources = ioat_alloc_chan_resources; | ||
1069 | dma->device_free_chan_resources = ioat_free_chan_resources; | ||
1070 | |||
1071 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | ||
1072 | dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; | ||
1073 | |||
1074 | ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); | ||
1075 | |||
1076 | if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) | ||
1077 | ioat_dma->cap &= | ||
1078 | ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | ||
1079 | |||
1080 | /* dca is incompatible with raid operations */ | ||
1081 | if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | ||
1082 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | ||
1083 | |||
1084 | if (ioat_dma->cap & IOAT_CAP_XOR) { | ||
1085 | is_raid_device = true; | ||
1086 | dma->max_xor = 8; | ||
1087 | |||
1088 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1089 | dma->device_prep_dma_xor = ioat_prep_xor; | ||
1090 | |||
1091 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1092 | dma->device_prep_dma_xor_val = ioat_prep_xor_val; | ||
1093 | } | ||
1094 | |||
1095 | if (ioat_dma->cap & IOAT_CAP_PQ) { | ||
1096 | is_raid_device = true; | ||
1097 | |||
1098 | dma->device_prep_dma_pq = ioat_prep_pq; | ||
1099 | dma->device_prep_dma_pq_val = ioat_prep_pq_val; | ||
1100 | dma_cap_set(DMA_PQ, dma->cap_mask); | ||
1101 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | ||
1102 | |||
1103 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) | ||
1104 | dma_set_maxpq(dma, 16, 0); | ||
1105 | else | ||
1106 | dma_set_maxpq(dma, 8, 0); | ||
1107 | |||
1108 | if (!(ioat_dma->cap & IOAT_CAP_XOR)) { | ||
1109 | dma->device_prep_dma_xor = ioat_prep_pqxor; | ||
1110 | dma->device_prep_dma_xor_val = ioat_prep_pqxor_val; | ||
1111 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1112 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1113 | |||
1114 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) | ||
1115 | dma->max_xor = 16; | ||
1116 | else | ||
1117 | dma->max_xor = 8; | ||
1118 | } | ||
1119 | } | ||
1120 | |||
1121 | dma->device_tx_status = ioat_tx_status; | ||
1122 | ioat_dma->cleanup_fn = ioat_cleanup_event; | ||
1123 | ioat_dma->timer_fn = ioat_timer_event; | ||
1124 | |||
1125 | /* starting with CB3.3 super extended descriptors are supported */ | ||
1126 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { | ||
1127 | char pool_name[14]; | ||
1128 | int i; | ||
1129 | |||
1130 | for (i = 0; i < MAX_SED_POOLS; i++) { | ||
1131 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | ||
1132 | |||
1133 | /* allocate SED DMA pool */ | ||
1134 | ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, | ||
1135 | &pdev->dev, | ||
1136 | SED_SIZE * (i + 1), 64, 0); | ||
1137 | if (!ioat_dma->sed_hw_pool[i]) | ||
1138 | return -ENOMEM; | ||
1139 | |||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) | ||
1144 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
1145 | |||
1146 | err = ioat_probe(ioat_dma); | ||
1147 | if (err) | ||
1148 | return err; | ||
1149 | |||
1150 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1151 | ioat_chan = to_ioat_chan(c); | ||
1152 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
1153 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1154 | } | ||
1155 | |||
1156 | err = ioat_register(ioat_dma); | ||
1157 | if (err) | ||
1158 | return err; | ||
1159 | |||
1160 | ioat_kobject_add(ioat_dma, &ioat_ktype); | ||
1161 | |||
1162 | if (dca) | ||
1163 | ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); | ||
1164 | |||
1165 | return 0; | ||
1166 | } | ||
1167 | |||
1168 | #define DRV_NAME "ioatdma" | ||
1169 | |||
1170 | static struct pci_driver ioat_pci_driver = { | ||
1171 | .name = DRV_NAME, | ||
1172 | .id_table = ioat_pci_tbl, | ||
1173 | .probe = ioat_pci_probe, | ||
1174 | .remove = ioat_remove, | ||
1175 | }; | ||
1176 | |||
1177 | static struct ioatdma_device * | ||
1178 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) | ||
1179 | { | ||
1180 | struct device *dev = &pdev->dev; | ||
1181 | struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); | ||
1182 | |||
1183 | if (!d) | ||
1184 | return NULL; | ||
1185 | d->pdev = pdev; | ||
1186 | d->reg_base = iobase; | ||
1187 | return d; | ||
1188 | } | ||
1189 | |||
1190 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
1191 | { | ||
1192 | void __iomem * const *iomap; | ||
1193 | struct device *dev = &pdev->dev; | ||
1194 | struct ioatdma_device *device; | ||
1195 | int err; | ||
1196 | |||
1197 | err = pcim_enable_device(pdev); | ||
1198 | if (err) | ||
1199 | return err; | ||
1200 | |||
1201 | err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); | ||
1202 | if (err) | ||
1203 | return err; | ||
1204 | iomap = pcim_iomap_table(pdev); | ||
1205 | if (!iomap) | ||
1206 | return -ENOMEM; | ||
1207 | |||
1208 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
1209 | if (err) | ||
1210 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1211 | if (err) | ||
1212 | return err; | ||
1213 | |||
1214 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
1215 | if (err) | ||
1216 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1217 | if (err) | ||
1218 | return err; | ||
1219 | |||
1220 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); | ||
1221 | if (!device) | ||
1222 | return -ENOMEM; | ||
1223 | pci_set_master(pdev); | ||
1224 | pci_set_drvdata(pdev, device); | ||
1225 | |||
1226 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
1227 | if (device->version >= IOAT_VER_3_0) | ||
1228 | err = ioat3_dma_probe(device, ioat_dca_enabled); | ||
1229 | else | ||
1230 | return -ENODEV; | ||
1231 | |||
1232 | if (err) { | ||
1233 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | ||
1234 | return -ENODEV; | ||
1235 | } | ||
1236 | |||
1237 | return 0; | ||
1238 | } | ||
1239 | |||
1240 | static void ioat_remove(struct pci_dev *pdev) | ||
1241 | { | ||
1242 | struct ioatdma_device *device = pci_get_drvdata(pdev); | ||
1243 | |||
1244 | if (!device) | ||
1245 | return; | ||
1246 | |||
1247 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | ||
1248 | if (device->dca) { | ||
1249 | unregister_dca_provider(device->dca, &pdev->dev); | ||
1250 | free_dca_provider(device->dca); | ||
1251 | device->dca = NULL; | ||
1252 | } | ||
1253 | ioat_dma_remove(device); | ||
1254 | } | ||
1255 | |||
1256 | static int __init ioat_init_module(void) | ||
1257 | { | ||
1258 | int err = -ENOMEM; | ||
1259 | |||
1260 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", | ||
1261 | DRV_NAME, IOAT_DMA_VERSION); | ||
1262 | |||
1263 | ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), | ||
1264 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
1265 | if (!ioat_cache) | ||
1266 | return -ENOMEM; | ||
1267 | |||
1268 | ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); | ||
1269 | if (!ioat_sed_cache) | ||
1270 | goto err_ioat_cache; | ||
1271 | |||
1272 | err = pci_register_driver(&ioat_pci_driver); | ||
1273 | if (err) | ||
1274 | goto err_ioat3_cache; | ||
1275 | |||
1276 | return 0; | ||
1277 | |||
1278 | err_ioat3_cache: | ||
1279 | kmem_cache_destroy(ioat_sed_cache); | ||
1280 | |||
1281 | err_ioat_cache: | ||
1282 | kmem_cache_destroy(ioat_cache); | ||
1283 | |||
1284 | return err; | ||
1285 | } | ||
1286 | module_init(ioat_init_module); | ||
1287 | |||
1288 | static void __exit ioat_exit_module(void) | ||
1289 | { | ||
1290 | pci_unregister_driver(&ioat_pci_driver); | ||
1291 | kmem_cache_destroy(ioat_cache); | ||
1292 | } | ||
1293 | module_exit(ioat_exit_module); | ||
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c deleted file mode 100644 index 0ee610224ecd..000000000000 --- a/drivers/dma/ioat/pci.c +++ /dev/null | |||
@@ -1,278 +0,0 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2007 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The full GNU General Public License is included in this distribution in | ||
15 | * the file called "COPYING". | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
21 | * copy operations. | ||
22 | */ | ||
23 | |||
24 | #include <linux/init.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/dca.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/acpi.h> | ||
31 | #include "dma.h" | ||
32 | #include "registers.h" | ||
33 | #include "hw.h" | ||
34 | |||
35 | MODULE_VERSION(IOAT_DMA_VERSION); | ||
36 | MODULE_LICENSE("Dual BSD/GPL"); | ||
37 | MODULE_AUTHOR("Intel Corporation"); | ||
38 | |||
39 | static struct pci_device_id ioat_pci_tbl[] = { | ||
40 | /* I/OAT v3 platforms */ | ||
41 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | ||
42 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | ||
43 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, | ||
44 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, | ||
45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, | ||
46 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, | ||
47 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, | ||
48 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, | ||
49 | |||
50 | /* I/OAT v3.2 platforms */ | ||
51 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, | ||
52 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, | ||
53 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, | ||
54 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, | ||
55 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, | ||
56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, | ||
57 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, | ||
58 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, | ||
59 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, | ||
60 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, | ||
61 | |||
62 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, | ||
63 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, | ||
64 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, | ||
65 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, | ||
66 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, | ||
67 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, | ||
68 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, | ||
69 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, | ||
70 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | ||
71 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | ||
72 | |||
73 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, | ||
74 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, | ||
75 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, | ||
76 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, | ||
77 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, | ||
78 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, | ||
79 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, | ||
80 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, | ||
81 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | ||
82 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | ||
83 | |||
84 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, | ||
85 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, | ||
86 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, | ||
87 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, | ||
88 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, | ||
89 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, | ||
90 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, | ||
91 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, | ||
92 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, | ||
93 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, | ||
94 | |||
95 | /* I/OAT v3.3 platforms */ | ||
96 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, | ||
97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, | ||
98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, | ||
99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, | ||
100 | |||
101 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, | ||
102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, | ||
103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, | ||
104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, | ||
105 | |||
106 | { 0, } | ||
107 | }; | ||
108 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | ||
109 | |||
110 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | ||
111 | static void ioat_remove(struct pci_dev *pdev); | ||
112 | |||
113 | static int ioat_dca_enabled = 1; | ||
114 | module_param(ioat_dca_enabled, int, 0644); | ||
115 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | ||
116 | |||
117 | struct kmem_cache *ioat_cache; | ||
118 | struct kmem_cache *ioat3_sed_cache; | ||
119 | |||
120 | #define DRV_NAME "ioatdma" | ||
121 | |||
122 | static struct pci_driver ioat_pci_driver = { | ||
123 | .name = DRV_NAME, | ||
124 | .id_table = ioat_pci_tbl, | ||
125 | .probe = ioat_pci_probe, | ||
126 | .remove = ioat_remove, | ||
127 | }; | ||
128 | |||
129 | static struct ioatdma_device * | ||
130 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) | ||
131 | { | ||
132 | struct device *dev = &pdev->dev; | ||
133 | struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); | ||
134 | |||
135 | if (!d) | ||
136 | return NULL; | ||
137 | d->pdev = pdev; | ||
138 | d->reg_base = iobase; | ||
139 | return d; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * The dmaengine core assumes that async DMA devices will only be removed | ||
144 | * when they not used anymore, or it assumes dma_async_device_unregister() | ||
145 | * will only be called by dma driver exit routines. But this assumption is | ||
146 | * not true for the IOAT driver, which calls dma_async_device_unregister() | ||
147 | * from ioat_remove(). So current IOAT driver doesn't support device | ||
148 | * hot-removal because it may cause system crash to hot-remove inuse IOAT | ||
149 | * devices. | ||
150 | * | ||
151 | * This is a hack to disable IOAT devices under ejectable PCI host bridge | ||
152 | * so it won't break PCI host bridge hot-removal. | ||
153 | */ | ||
154 | static bool ioat_pci_has_ejectable_acpi_ancestor(struct pci_dev *pdev) | ||
155 | { | ||
156 | #ifdef CONFIG_ACPI | ||
157 | struct pci_bus *bus = pdev->bus; | ||
158 | struct acpi_device *adev; | ||
159 | |||
160 | while (bus->parent) | ||
161 | bus = bus->parent; | ||
162 | for (adev = ACPI_COMPANION(bus->bridge); adev; adev = adev->parent) | ||
163 | if (adev->flags.ejectable) | ||
164 | return true; | ||
165 | #endif | ||
166 | |||
167 | return false; | ||
168 | } | ||
169 | |||
170 | static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
171 | { | ||
172 | void __iomem * const *iomap; | ||
173 | struct device *dev = &pdev->dev; | ||
174 | struct ioatdma_device *device; | ||
175 | int err; | ||
176 | |||
177 | if (ioat_pci_has_ejectable_acpi_ancestor(pdev)) { | ||
178 | dev_dbg(&pdev->dev, "ignore ejectable IOAT device.\n"); | ||
179 | return -ENODEV; | ||
180 | } | ||
181 | |||
182 | err = pcim_enable_device(pdev); | ||
183 | if (err) | ||
184 | return err; | ||
185 | |||
186 | err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); | ||
187 | if (err) | ||
188 | return err; | ||
189 | iomap = pcim_iomap_table(pdev); | ||
190 | if (!iomap) | ||
191 | return -ENOMEM; | ||
192 | |||
193 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
194 | if (err) | ||
195 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
196 | if (err) | ||
197 | return err; | ||
198 | |||
199 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
200 | if (err) | ||
201 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
202 | if (err) | ||
203 | return err; | ||
204 | |||
205 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); | ||
206 | if (!device) | ||
207 | return -ENOMEM; | ||
208 | pci_set_master(pdev); | ||
209 | pci_set_drvdata(pdev, device); | ||
210 | |||
211 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
212 | if (device->version >= IOAT_VER_3_0) | ||
213 | err = ioat3_dma_probe(device, ioat_dca_enabled); | ||
214 | else | ||
215 | return -ENODEV; | ||
216 | |||
217 | if (err) { | ||
218 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | ||
219 | return -ENODEV; | ||
220 | } | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static void ioat_remove(struct pci_dev *pdev) | ||
226 | { | ||
227 | struct ioatdma_device *device = pci_get_drvdata(pdev); | ||
228 | |||
229 | if (!device) | ||
230 | return; | ||
231 | |||
232 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | ||
233 | if (device->dca) { | ||
234 | unregister_dca_provider(device->dca, &pdev->dev); | ||
235 | free_dca_provider(device->dca); | ||
236 | device->dca = NULL; | ||
237 | } | ||
238 | ioat_dma_remove(device); | ||
239 | } | ||
240 | |||
241 | static int __init ioat_init_module(void) | ||
242 | { | ||
243 | int err = -ENOMEM; | ||
244 | |||
245 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", | ||
246 | DRV_NAME, IOAT_DMA_VERSION); | ||
247 | |||
248 | ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), | ||
249 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
250 | if (!ioat_cache) | ||
251 | return -ENOMEM; | ||
252 | |||
253 | ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); | ||
254 | if (!ioat3_sed_cache) | ||
255 | goto err_ioat_cache; | ||
256 | |||
257 | err = pci_register_driver(&ioat_pci_driver); | ||
258 | if (err) | ||
259 | goto err_ioat3_cache; | ||
260 | |||
261 | return 0; | ||
262 | |||
263 | err_ioat3_cache: | ||
264 | kmem_cache_destroy(ioat3_sed_cache); | ||
265 | |||
266 | err_ioat_cache: | ||
267 | kmem_cache_destroy(ioat_cache); | ||
268 | |||
269 | return err; | ||
270 | } | ||
271 | module_init(ioat_init_module); | ||
272 | |||
273 | static void __exit ioat_exit_module(void) | ||
274 | { | ||
275 | pci_unregister_driver(&ioat_pci_driver); | ||
276 | kmem_cache_destroy(ioat_cache); | ||
277 | } | ||
278 | module_exit(ioat_exit_module); | ||