diff options
-rw-r--r-- | drivers/dma/ioat/dma.c | 156 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 50 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 48 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 6 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 126 |
5 files changed, 195 insertions, 191 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 60aa04d95a0b..3cf2639fb06a 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -93,30 +93,30 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |||
93 | 93 | ||
94 | /* common channel initialization */ | 94 | /* common channel initialization */ |
95 | void | 95 | void |
96 | ioat_init_channel(struct ioatdma_device *device, struct ioatdma_chan *ioat_chan, | 96 | ioat_init_channel(struct ioatdma_device *ioat_dma, |
97 | int idx) | 97 | struct ioatdma_chan *ioat_chan, int idx) |
98 | { | 98 | { |
99 | struct dma_device *dma = &device->common; | 99 | struct dma_device *dma = &ioat_dma->dma_dev; |
100 | struct dma_chan *c = &ioat_chan->dma_chan; | 100 | struct dma_chan *c = &ioat_chan->dma_chan; |
101 | unsigned long data = (unsigned long) c; | 101 | unsigned long data = (unsigned long) c; |
102 | 102 | ||
103 | ioat_chan->device = device; | 103 | ioat_chan->ioat_dma = ioat_dma; |
104 | ioat_chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | 104 | ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); |
105 | spin_lock_init(&ioat_chan->cleanup_lock); | 105 | spin_lock_init(&ioat_chan->cleanup_lock); |
106 | ioat_chan->dma_chan.device = dma; | 106 | ioat_chan->dma_chan.device = dma; |
107 | dma_cookie_init(&ioat_chan->dma_chan); | 107 | dma_cookie_init(&ioat_chan->dma_chan); |
108 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); | 108 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); |
109 | device->idx[idx] = ioat_chan; | 109 | ioat_dma->idx[idx] = ioat_chan; |
110 | init_timer(&ioat_chan->timer); | 110 | init_timer(&ioat_chan->timer); |
111 | ioat_chan->timer.function = device->timer_fn; | 111 | ioat_chan->timer.function = ioat_dma->timer_fn; |
112 | ioat_chan->timer.data = data; | 112 | ioat_chan->timer.data = data; |
113 | tasklet_init(&ioat_chan->cleanup_task, device->cleanup_fn, data); | 113 | tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); |
114 | } | 114 | } |
115 | 115 | ||
116 | void ioat_stop(struct ioatdma_chan *ioat_chan) | 116 | void ioat_stop(struct ioatdma_chan *ioat_chan) |
117 | { | 117 | { |
118 | struct ioatdma_device *device = ioat_chan->device; | 118 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
119 | struct pci_dev *pdev = device->pdev; | 119 | struct pci_dev *pdev = ioat_dma->pdev; |
120 | int chan_id = chan_num(ioat_chan); | 120 | int chan_id = chan_num(ioat_chan); |
121 | struct msix_entry *msix; | 121 | struct msix_entry *msix; |
122 | 122 | ||
@@ -126,9 +126,9 @@ void ioat_stop(struct ioatdma_chan *ioat_chan) | |||
126 | clear_bit(IOAT_RUN, &ioat_chan->state); | 126 | clear_bit(IOAT_RUN, &ioat_chan->state); |
127 | 127 | ||
128 | /* flush inflight interrupts */ | 128 | /* flush inflight interrupts */ |
129 | switch (device->irq_mode) { | 129 | switch (ioat_dma->irq_mode) { |
130 | case IOAT_MSIX: | 130 | case IOAT_MSIX: |
131 | msix = &device->msix_entries[chan_id]; | 131 | msix = &ioat_dma->msix_entries[chan_id]; |
132 | synchronize_irq(msix->vector); | 132 | synchronize_irq(msix->vector); |
133 | break; | 133 | break; |
134 | case IOAT_MSI: | 134 | case IOAT_MSI: |
@@ -146,7 +146,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan) | |||
146 | tasklet_kill(&ioat_chan->cleanup_task); | 146 | tasklet_kill(&ioat_chan->cleanup_task); |
147 | 147 | ||
148 | /* final cleanup now that everything is quiesced and can't re-arm */ | 148 | /* final cleanup now that everything is quiesced and can't re-arm */ |
149 | device->cleanup_fn((unsigned long)&ioat_chan->dma_chan); | 149 | ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan); |
150 | } | 150 | } |
151 | 151 | ||
152 | dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan) | 152 | dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan) |
@@ -189,14 +189,14 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
189 | struct dma_tx_state *txstate) | 189 | struct dma_tx_state *txstate) |
190 | { | 190 | { |
191 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 191 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
192 | struct ioatdma_device *device = ioat_chan->device; | 192 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
193 | enum dma_status ret; | 193 | enum dma_status ret; |
194 | 194 | ||
195 | ret = dma_cookie_status(c, cookie, txstate); | 195 | ret = dma_cookie_status(c, cookie, txstate); |
196 | if (ret == DMA_COMPLETE) | 196 | if (ret == DMA_COMPLETE) |
197 | return ret; | 197 | return ret; |
198 | 198 | ||
199 | device->cleanup_fn((unsigned long) c); | 199 | ioat_dma->cleanup_fn((unsigned long) c); |
200 | 200 | ||
201 | return dma_cookie_status(c, cookie, txstate); | 201 | return dma_cookie_status(c, cookie, txstate); |
202 | } | 202 | } |
@@ -215,15 +215,15 @@ static void ioat_dma_test_callback(void *dma_async_param) | |||
215 | 215 | ||
216 | /** | 216 | /** |
217 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | 217 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. |
218 | * @device: device to be tested | 218 | * @ioat_dma: dma device to be tested |
219 | */ | 219 | */ |
220 | int ioat_dma_self_test(struct ioatdma_device *device) | 220 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma) |
221 | { | 221 | { |
222 | int i; | 222 | int i; |
223 | u8 *src; | 223 | u8 *src; |
224 | u8 *dest; | 224 | u8 *dest; |
225 | struct dma_device *dma = &device->common; | 225 | struct dma_device *dma = &ioat_dma->dma_dev; |
226 | struct device *dev = &device->pdev->dev; | 226 | struct device *dev = &ioat_dma->pdev->dev; |
227 | struct dma_chan *dma_chan; | 227 | struct dma_chan *dma_chan; |
228 | struct dma_async_tx_descriptor *tx; | 228 | struct dma_async_tx_descriptor *tx; |
229 | dma_addr_t dma_dest, dma_src; | 229 | dma_addr_t dma_dest, dma_src; |
@@ -266,8 +266,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
266 | goto unmap_src; | 266 | goto unmap_src; |
267 | } | 267 | } |
268 | flags = DMA_PREP_INTERRUPT; | 268 | flags = DMA_PREP_INTERRUPT; |
269 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 269 | tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, |
270 | IOAT_TEST_SIZE, flags); | 270 | dma_src, IOAT_TEST_SIZE, |
271 | flags); | ||
271 | if (!tx) { | 272 | if (!tx) { |
272 | dev_err(dev, "Self-test prep failed, disabling\n"); | 273 | dev_err(dev, "Self-test prep failed, disabling\n"); |
273 | err = -ENODEV; | 274 | err = -ENODEV; |
@@ -321,12 +322,12 @@ MODULE_PARM_DESC(ioat_interrupt_style, | |||
321 | 322 | ||
322 | /** | 323 | /** |
323 | * ioat_dma_setup_interrupts - setup interrupt handler | 324 | * ioat_dma_setup_interrupts - setup interrupt handler |
324 | * @device: ioat device | 325 | * @ioat_dma: ioat dma device |
325 | */ | 326 | */ |
326 | int ioat_dma_setup_interrupts(struct ioatdma_device *device) | 327 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) |
327 | { | 328 | { |
328 | struct ioatdma_chan *ioat_chan; | 329 | struct ioatdma_chan *ioat_chan; |
329 | struct pci_dev *pdev = device->pdev; | 330 | struct pci_dev *pdev = ioat_dma->pdev; |
330 | struct device *dev = &pdev->dev; | 331 | struct device *dev = &pdev->dev; |
331 | struct msix_entry *msix; | 332 | struct msix_entry *msix; |
332 | int i, j, msixcnt; | 333 | int i, j, msixcnt; |
@@ -344,31 +345,31 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) | |||
344 | 345 | ||
345 | msix: | 346 | msix: |
346 | /* The number of MSI-X vectors should equal the number of channels */ | 347 | /* The number of MSI-X vectors should equal the number of channels */ |
347 | msixcnt = device->common.chancnt; | 348 | msixcnt = ioat_dma->dma_dev.chancnt; |
348 | for (i = 0; i < msixcnt; i++) | 349 | for (i = 0; i < msixcnt; i++) |
349 | device->msix_entries[i].entry = i; | 350 | ioat_dma->msix_entries[i].entry = i; |
350 | 351 | ||
351 | err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); | 352 | err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); |
352 | if (err) | 353 | if (err) |
353 | goto msi; | 354 | goto msi; |
354 | 355 | ||
355 | for (i = 0; i < msixcnt; i++) { | 356 | for (i = 0; i < msixcnt; i++) { |
356 | msix = &device->msix_entries[i]; | 357 | msix = &ioat_dma->msix_entries[i]; |
357 | ioat_chan = ioat_chan_by_index(device, i); | 358 | ioat_chan = ioat_chan_by_index(ioat_dma, i); |
358 | err = devm_request_irq(dev, msix->vector, | 359 | err = devm_request_irq(dev, msix->vector, |
359 | ioat_dma_do_interrupt_msix, 0, | 360 | ioat_dma_do_interrupt_msix, 0, |
360 | "ioat-msix", ioat_chan); | 361 | "ioat-msix", ioat_chan); |
361 | if (err) { | 362 | if (err) { |
362 | for (j = 0; j < i; j++) { | 363 | for (j = 0; j < i; j++) { |
363 | msix = &device->msix_entries[j]; | 364 | msix = &ioat_dma->msix_entries[j]; |
364 | ioat_chan = ioat_chan_by_index(device, j); | 365 | ioat_chan = ioat_chan_by_index(ioat_dma, j); |
365 | devm_free_irq(dev, msix->vector, ioat_chan); | 366 | devm_free_irq(dev, msix->vector, ioat_chan); |
366 | } | 367 | } |
367 | goto msi; | 368 | goto msi; |
368 | } | 369 | } |
369 | } | 370 | } |
370 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | 371 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; |
371 | device->irq_mode = IOAT_MSIX; | 372 | ioat_dma->irq_mode = IOAT_MSIX; |
372 | goto done; | 373 | goto done; |
373 | 374 | ||
374 | msi: | 375 | msi: |
@@ -377,69 +378,70 @@ msi: | |||
377 | goto intx; | 378 | goto intx; |
378 | 379 | ||
379 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | 380 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, |
380 | "ioat-msi", device); | 381 | "ioat-msi", ioat_dma); |
381 | if (err) { | 382 | if (err) { |
382 | pci_disable_msi(pdev); | 383 | pci_disable_msi(pdev); |
383 | goto intx; | 384 | goto intx; |
384 | } | 385 | } |
385 | device->irq_mode = IOAT_MSI; | 386 | ioat_dma->irq_mode = IOAT_MSI; |
386 | goto done; | 387 | goto done; |
387 | 388 | ||
388 | intx: | 389 | intx: |
389 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | 390 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, |
390 | IRQF_SHARED, "ioat-intx", device); | 391 | IRQF_SHARED, "ioat-intx", ioat_dma); |
391 | if (err) | 392 | if (err) |
392 | goto err_no_irq; | 393 | goto err_no_irq; |
393 | 394 | ||
394 | device->irq_mode = IOAT_INTX; | 395 | ioat_dma->irq_mode = IOAT_INTX; |
395 | done: | 396 | done: |
396 | if (device->intr_quirk) | 397 | if (ioat_dma->intr_quirk) |
397 | device->intr_quirk(device); | 398 | ioat_dma->intr_quirk(ioat_dma); |
398 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | 399 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
399 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | 400 | writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); |
400 | return 0; | 401 | return 0; |
401 | 402 | ||
402 | err_no_irq: | 403 | err_no_irq: |
403 | /* Disable all interrupt generation */ | 404 | /* Disable all interrupt generation */ |
404 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 405 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); |
405 | device->irq_mode = IOAT_NOIRQ; | 406 | ioat_dma->irq_mode = IOAT_NOIRQ; |
406 | dev_err(dev, "no usable interrupts\n"); | 407 | dev_err(dev, "no usable interrupts\n"); |
407 | return err; | 408 | return err; |
408 | } | 409 | } |
409 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); | 410 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); |
410 | 411 | ||
411 | static void ioat_disable_interrupts(struct ioatdma_device *device) | 412 | static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) |
412 | { | 413 | { |
413 | /* Disable all interrupt generation */ | 414 | /* Disable all interrupt generation */ |
414 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 415 | writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); |
415 | } | 416 | } |
416 | 417 | ||
417 | int ioat_probe(struct ioatdma_device *device) | 418 | int ioat_probe(struct ioatdma_device *ioat_dma) |
418 | { | 419 | { |
419 | int err = -ENODEV; | 420 | int err = -ENODEV; |
420 | struct dma_device *dma = &device->common; | 421 | struct dma_device *dma = &ioat_dma->dma_dev; |
421 | struct pci_dev *pdev = device->pdev; | 422 | struct pci_dev *pdev = ioat_dma->pdev; |
422 | struct device *dev = &pdev->dev; | 423 | struct device *dev = &pdev->dev; |
423 | 424 | ||
424 | /* DMA coherent memory pool for DMA descriptor allocations */ | 425 | /* DMA coherent memory pool for DMA descriptor allocations */ |
425 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | 426 | ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, |
426 | sizeof(struct ioat_dma_descriptor), | 427 | sizeof(struct ioat_dma_descriptor), |
427 | 64, 0); | 428 | 64, 0); |
428 | if (!device->dma_pool) { | 429 | if (!ioat_dma->dma_pool) { |
429 | err = -ENOMEM; | 430 | err = -ENOMEM; |
430 | goto err_dma_pool; | 431 | goto err_dma_pool; |
431 | } | 432 | } |
432 | 433 | ||
433 | device->completion_pool = pci_pool_create("completion_pool", pdev, | 434 | ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, |
434 | sizeof(u64), SMP_CACHE_BYTES, | 435 | sizeof(u64), |
435 | SMP_CACHE_BYTES); | 436 | SMP_CACHE_BYTES, |
437 | SMP_CACHE_BYTES); | ||
436 | 438 | ||
437 | if (!device->completion_pool) { | 439 | if (!ioat_dma->completion_pool) { |
438 | err = -ENOMEM; | 440 | err = -ENOMEM; |
439 | goto err_completion_pool; | 441 | goto err_completion_pool; |
440 | } | 442 | } |
441 | 443 | ||
442 | device->enumerate_channels(device); | 444 | ioat_dma->enumerate_channels(ioat_dma); |
443 | 445 | ||
444 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | 446 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
445 | dma->dev = &pdev->dev; | 447 | dma->dev = &pdev->dev; |
@@ -449,34 +451,34 @@ int ioat_probe(struct ioatdma_device *device) | |||
449 | goto err_setup_interrupts; | 451 | goto err_setup_interrupts; |
450 | } | 452 | } |
451 | 453 | ||
452 | err = ioat_dma_setup_interrupts(device); | 454 | err = ioat_dma_setup_interrupts(ioat_dma); |
453 | if (err) | 455 | if (err) |
454 | goto err_setup_interrupts; | 456 | goto err_setup_interrupts; |
455 | 457 | ||
456 | err = device->self_test(device); | 458 | err = ioat_dma->self_test(ioat_dma); |
457 | if (err) | 459 | if (err) |
458 | goto err_self_test; | 460 | goto err_self_test; |
459 | 461 | ||
460 | return 0; | 462 | return 0; |
461 | 463 | ||
462 | err_self_test: | 464 | err_self_test: |
463 | ioat_disable_interrupts(device); | 465 | ioat_disable_interrupts(ioat_dma); |
464 | err_setup_interrupts: | 466 | err_setup_interrupts: |
465 | pci_pool_destroy(device->completion_pool); | 467 | pci_pool_destroy(ioat_dma->completion_pool); |
466 | err_completion_pool: | 468 | err_completion_pool: |
467 | pci_pool_destroy(device->dma_pool); | 469 | pci_pool_destroy(ioat_dma->dma_pool); |
468 | err_dma_pool: | 470 | err_dma_pool: |
469 | return err; | 471 | return err; |
470 | } | 472 | } |
471 | 473 | ||
472 | int ioat_register(struct ioatdma_device *device) | 474 | int ioat_register(struct ioatdma_device *ioat_dma) |
473 | { | 475 | { |
474 | int err = dma_async_device_register(&device->common); | 476 | int err = dma_async_device_register(&ioat_dma->dma_dev); |
475 | 477 | ||
476 | if (err) { | 478 | if (err) { |
477 | ioat_disable_interrupts(device); | 479 | ioat_disable_interrupts(ioat_dma); |
478 | pci_pool_destroy(device->completion_pool); | 480 | pci_pool_destroy(ioat_dma->completion_pool); |
479 | pci_pool_destroy(device->dma_pool); | 481 | pci_pool_destroy(ioat_dma->dma_pool); |
480 | } | 482 | } |
481 | 483 | ||
482 | return err; | 484 | return err; |
@@ -499,10 +501,10 @@ struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); | |||
499 | static ssize_t version_show(struct dma_chan *c, char *page) | 501 | static ssize_t version_show(struct dma_chan *c, char *page) |
500 | { | 502 | { |
501 | struct dma_device *dma = c->device; | 503 | struct dma_device *dma = c->device; |
502 | struct ioatdma_device *device = to_ioatdma_device(dma); | 504 | struct ioatdma_device *ioat_dma = to_ioatdma_device(dma); |
503 | 505 | ||
504 | return sprintf(page, "%d.%d\n", | 506 | return sprintf(page, "%d.%d\n", |
505 | device->version >> 4, device->version & 0xf); | 507 | ioat_dma->version >> 4, ioat_dma->version & 0xf); |
506 | } | 508 | } |
507 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); | 509 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); |
508 | 510 | ||
@@ -524,9 +526,9 @@ const struct sysfs_ops ioat_sysfs_ops = { | |||
524 | .show = ioat_attr_show, | 526 | .show = ioat_attr_show, |
525 | }; | 527 | }; |
526 | 528 | ||
527 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | 529 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) |
528 | { | 530 | { |
529 | struct dma_device *dma = &device->common; | 531 | struct dma_device *dma = &ioat_dma->dma_dev; |
530 | struct dma_chan *c; | 532 | struct dma_chan *c; |
531 | 533 | ||
532 | list_for_each_entry(c, &dma->channels, device_node) { | 534 | list_for_each_entry(c, &dma->channels, device_node) { |
@@ -545,9 +547,9 @@ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | |||
545 | } | 547 | } |
546 | } | 548 | } |
547 | 549 | ||
548 | void ioat_kobject_del(struct ioatdma_device *device) | 550 | void ioat_kobject_del(struct ioatdma_device *ioat_dma) |
549 | { | 551 | { |
550 | struct dma_device *dma = &device->common; | 552 | struct dma_device *dma = &ioat_dma->dma_dev; |
551 | struct dma_chan *c; | 553 | struct dma_chan *c; |
552 | 554 | ||
553 | list_for_each_entry(c, &dma->channels, device_node) { | 555 | list_for_each_entry(c, &dma->channels, device_node) { |
@@ -560,18 +562,18 @@ void ioat_kobject_del(struct ioatdma_device *device) | |||
560 | } | 562 | } |
561 | } | 563 | } |
562 | 564 | ||
563 | void ioat_dma_remove(struct ioatdma_device *device) | 565 | void ioat_dma_remove(struct ioatdma_device *ioat_dma) |
564 | { | 566 | { |
565 | struct dma_device *dma = &device->common; | 567 | struct dma_device *dma = &ioat_dma->dma_dev; |
566 | 568 | ||
567 | ioat_disable_interrupts(device); | 569 | ioat_disable_interrupts(ioat_dma); |
568 | 570 | ||
569 | ioat_kobject_del(device); | 571 | ioat_kobject_del(ioat_dma); |
570 | 572 | ||
571 | dma_async_device_unregister(dma); | 573 | dma_async_device_unregister(dma); |
572 | 574 | ||
573 | pci_pool_destroy(device->dma_pool); | 575 | pci_pool_destroy(ioat_dma->dma_pool); |
574 | pci_pool_destroy(device->completion_pool); | 576 | pci_pool_destroy(ioat_dma->completion_pool); |
575 | 577 | ||
576 | INIT_LIST_HEAD(&dma->channels); | 578 | INIT_LIST_HEAD(&dma->channels); |
577 | } | 579 | } |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 43290d1c88ed..11bbcf27f86f 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -30,11 +30,11 @@ | |||
30 | 30 | ||
31 | #define IOAT_DMA_DCA_ANY_CPU ~0 | 31 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
32 | 32 | ||
33 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | 33 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) |
34 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | 34 | #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) |
35 | #define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) | 35 | #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) |
36 | 36 | ||
37 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | 37 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * workaround for IOAT ver.3.0 null descriptor issue | 40 | * workaround for IOAT ver.3.0 null descriptor issue |
@@ -54,7 +54,7 @@ enum ioat_irq_mode { | |||
54 | * @pdev: PCI-Express device | 54 | * @pdev: PCI-Express device |
55 | * @reg_base: MMIO register space base address | 55 | * @reg_base: MMIO register space base address |
56 | * @dma_pool: for allocating DMA descriptors | 56 | * @dma_pool: for allocating DMA descriptors |
57 | * @common: embedded struct dma_device | 57 | * @dma_dev: embedded struct dma_device |
58 | * @version: version of ioatdma device | 58 | * @version: version of ioatdma device |
59 | * @msix_entries: irq handlers | 59 | * @msix_entries: irq handlers |
60 | * @idx: per channel data | 60 | * @idx: per channel data |
@@ -75,19 +75,19 @@ struct ioatdma_device { | |||
75 | struct pci_pool *completion_pool; | 75 | struct pci_pool *completion_pool; |
76 | #define MAX_SED_POOLS 5 | 76 | #define MAX_SED_POOLS 5 |
77 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | 77 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; |
78 | struct dma_device common; | 78 | struct dma_device dma_dev; |
79 | u8 version; | 79 | u8 version; |
80 | struct msix_entry msix_entries[4]; | 80 | struct msix_entry msix_entries[4]; |
81 | struct ioatdma_chan *idx[4]; | 81 | struct ioatdma_chan *idx[4]; |
82 | struct dca_provider *dca; | 82 | struct dca_provider *dca; |
83 | enum ioat_irq_mode irq_mode; | 83 | enum ioat_irq_mode irq_mode; |
84 | u32 cap; | 84 | u32 cap; |
85 | void (*intr_quirk)(struct ioatdma_device *device); | 85 | void (*intr_quirk)(struct ioatdma_device *ioat_dma); |
86 | int (*enumerate_channels)(struct ioatdma_device *device); | 86 | int (*enumerate_channels)(struct ioatdma_device *ioat_dma); |
87 | int (*reset_hw)(struct ioatdma_chan *ioat_chan); | 87 | int (*reset_hw)(struct ioatdma_chan *ioat_chan); |
88 | void (*cleanup_fn)(unsigned long data); | 88 | void (*cleanup_fn)(unsigned long data); |
89 | void (*timer_fn)(unsigned long data); | 89 | void (*timer_fn)(unsigned long data); |
90 | int (*self_test)(struct ioatdma_device *device); | 90 | int (*self_test)(struct ioatdma_device *ioat_dma); |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct ioatdma_chan { | 93 | struct ioatdma_chan { |
@@ -107,7 +107,7 @@ struct ioatdma_chan { | |||
107 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | 107 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) |
108 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) | 108 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) |
109 | #define RESET_DELAY msecs_to_jiffies(100) | 109 | #define RESET_DELAY msecs_to_jiffies(100) |
110 | struct ioatdma_device *device; | 110 | struct ioatdma_device *ioat_dma; |
111 | dma_addr_t completion_dma; | 111 | dma_addr_t completion_dma; |
112 | u64 *completion; | 112 | u64 *completion; |
113 | struct tasklet_struct cleanup_task; | 113 | struct tasklet_struct cleanup_task; |
@@ -188,14 +188,14 @@ __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, | |||
188 | ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) | 188 | ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) |
189 | 189 | ||
190 | static inline struct ioatdma_chan * | 190 | static inline struct ioatdma_chan * |
191 | ioat_chan_by_index(struct ioatdma_device *device, int index) | 191 | ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) |
192 | { | 192 | { |
193 | return device->idx[index]; | 193 | return ioat_dma->idx[index]; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) | 196 | static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) |
197 | { | 197 | { |
198 | u8 ver = ioat_chan->device->version; | 198 | u8 ver = ioat_chan->ioat_dma->version; |
199 | u64 status; | 199 | u64 status; |
200 | u32 status_lo; | 200 | u32 status_lo; |
201 | 201 | ||
@@ -214,7 +214,7 @@ static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) | |||
214 | 214 | ||
215 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) | 215 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) |
216 | { | 216 | { |
217 | u8 ver = ioat_chan->device->version; | 217 | u8 ver = ioat_chan->ioat_dma->version; |
218 | u64 status; | 218 | u64 status; |
219 | 219 | ||
220 | /* With IOAT v3.3 the status register is 64bit. */ | 220 | /* With IOAT v3.3 the status register is 64bit. */ |
@@ -242,7 +242,7 @@ static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) | |||
242 | 242 | ||
243 | static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) | 243 | static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) |
244 | { | 244 | { |
245 | u8 ver = ioat_chan->device->version; | 245 | u8 ver = ioat_chan->ioat_dma->version; |
246 | 246 | ||
247 | writeb(IOAT_CHANCMD_SUSPEND, | 247 | writeb(IOAT_CHANCMD_SUSPEND, |
248 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 248 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); |
@@ -250,7 +250,7 @@ static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) | |||
250 | 250 | ||
251 | static inline void ioat_reset(struct ioatdma_chan *ioat_chan) | 251 | static inline void ioat_reset(struct ioatdma_chan *ioat_chan) |
252 | { | 252 | { |
253 | u8 ver = ioat_chan->device->version; | 253 | u8 ver = ioat_chan->ioat_dma->version; |
254 | 254 | ||
255 | writeb(IOAT_CHANCMD_RESET, | 255 | writeb(IOAT_CHANCMD_RESET, |
256 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 256 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); |
@@ -258,7 +258,7 @@ static inline void ioat_reset(struct ioatdma_chan *ioat_chan) | |||
258 | 258 | ||
259 | static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) | 259 | static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) |
260 | { | 260 | { |
261 | u8 ver = ioat_chan->device->version; | 261 | u8 ver = ioat_chan->ioat_dma->version; |
262 | u8 cmd; | 262 | u8 cmd; |
263 | 263 | ||
264 | cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 264 | cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); |
@@ -291,20 +291,20 @@ static inline bool is_ioat_bug(unsigned long err) | |||
291 | return !!err; | 291 | return !!err; |
292 | } | 292 | } |
293 | 293 | ||
294 | int ioat_probe(struct ioatdma_device *device); | 294 | int ioat_probe(struct ioatdma_device *ioat_dma); |
295 | int ioat_register(struct ioatdma_device *device); | 295 | int ioat_register(struct ioatdma_device *ioat_dma); |
296 | int ioat_dma_self_test(struct ioatdma_device *device); | 296 | int ioat_dma_self_test(struct ioatdma_device *ioat_dma); |
297 | void ioat_dma_remove(struct ioatdma_device *device); | 297 | void ioat_dma_remove(struct ioatdma_device *ioat_dma); |
298 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 298 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
299 | void ioat_init_channel(struct ioatdma_device *device, | 299 | void ioat_init_channel(struct ioatdma_device *ioat_dma, |
300 | struct ioatdma_chan *ioat_chan, int idx); | 300 | struct ioatdma_chan *ioat_chan, int idx); |
301 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 301 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
302 | struct dma_tx_state *txstate); | 302 | struct dma_tx_state *txstate); |
303 | bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, | 303 | bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, |
304 | dma_addr_t *phys_complete); | 304 | dma_addr_t *phys_complete); |
305 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 305 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); |
306 | void ioat_kobject_del(struct ioatdma_device *device); | 306 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); |
307 | int ioat_dma_setup_interrupts(struct ioatdma_device *device); | 307 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); |
308 | void ioat_stop(struct ioatdma_chan *ioat_chan); | 308 | void ioat_stop(struct ioatdma_chan *ioat_chan); |
309 | extern const struct sysfs_ops ioat_sysfs_ops; | 309 | extern const struct sysfs_ops ioat_sysfs_ops; |
310 | extern struct ioat_sysfs_entry ioat_version_attr; | 310 | extern struct ioat_sysfs_entry ioat_version_attr; |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 0f4b2435e707..020c1fe31ca1 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -187,25 +187,25 @@ int ioat2_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) | |||
187 | 187 | ||
188 | /** | 188 | /** |
189 | * ioat2_enumerate_channels - find and initialize the device's channels | 189 | * ioat2_enumerate_channels - find and initialize the device's channels |
190 | * @device: the device to be enumerated | 190 | * @ioat_dma: the ioat dma device to be enumerated |
191 | */ | 191 | */ |
192 | int ioat2_enumerate_channels(struct ioatdma_device *device) | 192 | int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma) |
193 | { | 193 | { |
194 | struct ioatdma_chan *ioat_chan; | 194 | struct ioatdma_chan *ioat_chan; |
195 | struct device *dev = &device->pdev->dev; | 195 | struct device *dev = &ioat_dma->pdev->dev; |
196 | struct dma_device *dma = &device->common; | 196 | struct dma_device *dma = &ioat_dma->dma_dev; |
197 | u8 xfercap_log; | 197 | u8 xfercap_log; |
198 | int i; | 198 | int i; |
199 | 199 | ||
200 | INIT_LIST_HEAD(&dma->channels); | 200 | INIT_LIST_HEAD(&dma->channels); |
201 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | 201 | dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); |
202 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | 202 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
203 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | 203 | if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { |
204 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | 204 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", |
205 | dma->chancnt, ARRAY_SIZE(device->idx)); | 205 | dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); |
206 | dma->chancnt = ARRAY_SIZE(device->idx); | 206 | dma->chancnt = ARRAY_SIZE(ioat_dma->idx); |
207 | } | 207 | } |
208 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | 208 | xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); |
209 | xfercap_log &= 0x1f; /* bits [4:0] valid */ | 209 | xfercap_log &= 0x1f; /* bits [4:0] valid */ |
210 | if (xfercap_log == 0) | 210 | if (xfercap_log == 0) |
211 | return 0; | 211 | return 0; |
@@ -216,10 +216,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | |||
216 | if (!ioat_chan) | 216 | if (!ioat_chan) |
217 | break; | 217 | break; |
218 | 218 | ||
219 | ioat_init_channel(device, ioat_chan, i); | 219 | ioat_init_channel(ioat_dma, ioat_chan, i); |
220 | ioat_chan->xfercap_log = xfercap_log; | 220 | ioat_chan->xfercap_log = xfercap_log; |
221 | spin_lock_init(&ioat_chan->prep_lock); | 221 | spin_lock_init(&ioat_chan->prep_lock); |
222 | if (device->reset_hw(ioat_chan)) { | 222 | if (ioat_dma->reset_hw(ioat_chan)) { |
223 | i = 0; | 223 | i = 0; |
224 | break; | 224 | break; |
225 | } | 225 | } |
@@ -258,18 +258,18 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f | |||
258 | { | 258 | { |
259 | struct ioat_dma_descriptor *hw; | 259 | struct ioat_dma_descriptor *hw; |
260 | struct ioat_ring_ent *desc; | 260 | struct ioat_ring_ent *desc; |
261 | struct ioatdma_device *dma; | 261 | struct ioatdma_device *ioat_dma; |
262 | dma_addr_t phys; | 262 | dma_addr_t phys; |
263 | 263 | ||
264 | dma = to_ioatdma_device(chan->device); | 264 | ioat_dma = to_ioatdma_device(chan->device); |
265 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); | 265 | hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys); |
266 | if (!hw) | 266 | if (!hw) |
267 | return NULL; | 267 | return NULL; |
268 | memset(hw, 0, sizeof(*hw)); | 268 | memset(hw, 0, sizeof(*hw)); |
269 | 269 | ||
270 | desc = kmem_cache_zalloc(ioat2_cache, flags); | 270 | desc = kmem_cache_zalloc(ioat2_cache, flags); |
271 | if (!desc) { | 271 | if (!desc) { |
272 | pci_pool_free(dma->dma_pool, hw, phys); | 272 | pci_pool_free(ioat_dma->dma_pool, hw, phys); |
273 | return NULL; | 273 | return NULL; |
274 | } | 274 | } |
275 | 275 | ||
@@ -282,10 +282,10 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f | |||
282 | 282 | ||
283 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | 283 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) |
284 | { | 284 | { |
285 | struct ioatdma_device *dma; | 285 | struct ioatdma_device *ioat_dma; |
286 | 286 | ||
287 | dma = to_ioatdma_device(chan->device); | 287 | ioat_dma = to_ioatdma_device(chan->device); |
288 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | 288 | pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); |
289 | kmem_cache_free(ioat2_cache, desc); | 289 | kmem_cache_free(ioat2_cache, desc); |
290 | } | 290 | } |
291 | 291 | ||
@@ -348,7 +348,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
348 | /* allocate a completion writeback area */ | 348 | /* allocate a completion writeback area */ |
349 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | 349 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
350 | ioat_chan->completion = | 350 | ioat_chan->completion = |
351 | pci_pool_alloc(ioat_chan->device->completion_pool, | 351 | pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, |
352 | GFP_KERNEL, &ioat_chan->completion_dma); | 352 | GFP_KERNEL, &ioat_chan->completion_dma); |
353 | if (!ioat_chan->completion) | 353 | if (!ioat_chan->completion) |
354 | return -ENOMEM; | 354 | return -ENOMEM; |
@@ -554,10 +554,10 @@ int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) | |||
554 | */ | 554 | */ |
555 | if (time_is_before_jiffies(ioat_chan->timer.expires) | 555 | if (time_is_before_jiffies(ioat_chan->timer.expires) |
556 | && timer_pending(&ioat_chan->timer)) { | 556 | && timer_pending(&ioat_chan->timer)) { |
557 | struct ioatdma_device *device = ioat_chan->device; | 557 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
558 | 558 | ||
559 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | 559 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); |
560 | device->timer_fn((unsigned long)ioat_chan); | 560 | ioat_dma->timer_fn((unsigned long)ioat_chan); |
561 | } | 561 | } |
562 | 562 | ||
563 | return -ENOMEM; | 563 | return -ENOMEM; |
@@ -617,7 +617,7 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |||
617 | void ioat2_free_chan_resources(struct dma_chan *c) | 617 | void ioat2_free_chan_resources(struct dma_chan *c) |
618 | { | 618 | { |
619 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 619 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
620 | struct ioatdma_device *device = ioat_chan->device; | 620 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
621 | struct ioat_ring_ent *desc; | 621 | struct ioat_ring_ent *desc; |
622 | const int total_descs = 1 << ioat_chan->alloc_order; | 622 | const int total_descs = 1 << ioat_chan->alloc_order; |
623 | int descs; | 623 | int descs; |
@@ -630,7 +630,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
630 | return; | 630 | return; |
631 | 631 | ||
632 | ioat_stop(ioat_chan); | 632 | ioat_stop(ioat_chan); |
633 | device->reset_hw(ioat_chan); | 633 | ioat_dma->reset_hw(ioat_chan); |
634 | 634 | ||
635 | spin_lock_bh(&ioat_chan->cleanup_lock); | 635 | spin_lock_bh(&ioat_chan->cleanup_lock); |
636 | spin_lock_bh(&ioat_chan->prep_lock); | 636 | spin_lock_bh(&ioat_chan->prep_lock); |
@@ -654,7 +654,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
654 | kfree(ioat_chan->ring); | 654 | kfree(ioat_chan->ring); |
655 | ioat_chan->ring = NULL; | 655 | ioat_chan->ring = NULL; |
656 | ioat_chan->alloc_order = 0; | 656 | ioat_chan->alloc_order = 0; |
657 | pci_pool_free(device->completion_pool, ioat_chan->completion, | 657 | pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, |
658 | ioat_chan->completion_dma); | 658 | ioat_chan->completion_dma); |
659 | spin_unlock_bh(&ioat_chan->prep_lock); | 659 | spin_unlock_bh(&ioat_chan->prep_lock); |
660 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 660 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index d3b73c8819cd..7d69ed3edab4 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -121,11 +121,11 @@ ioat2_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) | |||
121 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 121 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); |
122 | } | 122 | } |
123 | 123 | ||
124 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 124 | int ioat2_dma_probe(struct ioatdma_device *ioat_dma, int dca); |
125 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 125 | int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca); |
126 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 126 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
127 | int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); | 127 | int ioat2_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); |
128 | int ioat2_enumerate_channels(struct ioatdma_device *device); | 128 | int ioat2_enumerate_channels(struct ioatdma_device *ioat_dma); |
129 | struct dma_async_tx_descriptor * | 129 | struct dma_async_tx_descriptor * |
130 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | 130 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, |
131 | dma_addr_t dma_src, size_t len, unsigned long flags); | 131 | dma_addr_t dma_src, size_t len, unsigned long flags); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 9fb9b450c154..8ad4b07e7b85 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -254,7 +254,7 @@ static void pq16_set_src(struct ioat_raw_descriptor *desc[3], | |||
254 | } | 254 | } |
255 | 255 | ||
256 | static struct ioat_sed_ent * | 256 | static struct ioat_sed_ent * |
257 | ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | 257 | ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) |
258 | { | 258 | { |
259 | struct ioat_sed_ent *sed; | 259 | struct ioat_sed_ent *sed; |
260 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | 260 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; |
@@ -264,7 +264,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | |||
264 | return NULL; | 264 | return NULL; |
265 | 265 | ||
266 | sed->hw_pool = hw_pool; | 266 | sed->hw_pool = hw_pool; |
267 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], | 267 | sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], |
268 | flags, &sed->dma); | 268 | flags, &sed->dma); |
269 | if (!sed->hw) { | 269 | if (!sed->hw) { |
270 | kmem_cache_free(ioat3_sed_cache, sed); | 270 | kmem_cache_free(ioat3_sed_cache, sed); |
@@ -274,12 +274,13 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | |||
274 | return sed; | 274 | return sed; |
275 | } | 275 | } |
276 | 276 | ||
277 | static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) | 277 | static void |
278 | ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) | ||
278 | { | 279 | { |
279 | if (!sed) | 280 | if (!sed) |
280 | return; | 281 | return; |
281 | 282 | ||
282 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | 283 | dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); |
283 | kmem_cache_free(ioat3_sed_cache, sed); | 284 | kmem_cache_free(ioat3_sed_cache, sed); |
284 | } | 285 | } |
285 | 286 | ||
@@ -370,7 +371,7 @@ desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) | |||
370 | */ | 371 | */ |
371 | static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | 372 | static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) |
372 | { | 373 | { |
373 | struct ioatdma_device *device = ioat_chan->device; | 374 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
374 | struct ioat_ring_ent *desc; | 375 | struct ioat_ring_ent *desc; |
375 | bool seen_current = false; | 376 | bool seen_current = false; |
376 | int idx = ioat_chan->tail, i; | 377 | int idx = ioat_chan->tail, i; |
@@ -399,7 +400,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | |||
399 | dump_desc_dbg(ioat_chan, desc); | 400 | dump_desc_dbg(ioat_chan, desc); |
400 | 401 | ||
401 | /* set err stat if we are using dwbes */ | 402 | /* set err stat if we are using dwbes */ |
402 | if (device->cap & IOAT_CAP_DWBES) | 403 | if (ioat_dma->cap & IOAT_CAP_DWBES) |
403 | desc_get_errstat(ioat_chan, desc); | 404 | desc_get_errstat(ioat_chan, desc); |
404 | 405 | ||
405 | tx = &desc->txd; | 406 | tx = &desc->txd; |
@@ -423,7 +424,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | |||
423 | 424 | ||
424 | /* cleanup super extended descriptors */ | 425 | /* cleanup super extended descriptors */ |
425 | if (desc->sed) { | 426 | if (desc->sed) { |
426 | ioat3_free_sed(device, desc->sed); | 427 | ioat3_free_sed(ioat_dma, desc->sed); |
427 | desc->sed = NULL; | 428 | desc->sed = NULL; |
428 | } | 429 | } |
429 | } | 430 | } |
@@ -440,7 +441,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | |||
440 | } | 441 | } |
441 | /* 5 microsecond delay per pending descriptor */ | 442 | /* 5 microsecond delay per pending descriptor */ |
442 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), | 443 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), |
443 | ioat_chan->device->reg_base + IOAT_INTRDELAY_OFFSET); | 444 | ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); |
444 | } | 445 | } |
445 | 446 | ||
446 | static void ioat3_cleanup(struct ioatdma_chan *ioat_chan) | 447 | static void ioat3_cleanup(struct ioatdma_chan *ioat_chan) |
@@ -834,7 +835,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
834 | size_t len, unsigned long flags) | 835 | size_t len, unsigned long flags) |
835 | { | 836 | { |
836 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 837 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
837 | struct ioatdma_device *device = ioat_chan->device; | 838 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
838 | struct ioat_ring_ent *compl_desc; | 839 | struct ioat_ring_ent *compl_desc; |
839 | struct ioat_ring_ent *desc; | 840 | struct ioat_ring_ent *desc; |
840 | struct ioat_ring_ent *ext; | 841 | struct ioat_ring_ent *ext; |
@@ -845,7 +846,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
845 | u32 offset = 0; | 846 | u32 offset = 0; |
846 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | 847 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; |
847 | int i, s, idx, with_ext, num_descs; | 848 | int i, s, idx, with_ext, num_descs; |
848 | int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; | 849 | int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; |
849 | 850 | ||
850 | dev_dbg(to_dev(ioat_chan), "%s\n", __func__); | 851 | dev_dbg(to_dev(ioat_chan), "%s\n", __func__); |
851 | /* the engine requires at least two sources (we provide | 852 | /* the engine requires at least two sources (we provide |
@@ -911,7 +912,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
911 | pq->ctl = 0; | 912 | pq->ctl = 0; |
912 | pq->ctl_f.op = op; | 913 | pq->ctl_f.op = op; |
913 | /* we turn on descriptor write back error status */ | 914 | /* we turn on descriptor write back error status */ |
914 | if (device->cap & IOAT_CAP_DWBES) | 915 | if (ioat_dma->cap & IOAT_CAP_DWBES) |
915 | pq->ctl_f.wb_en = result ? 1 : 0; | 916 | pq->ctl_f.wb_en = result ? 1 : 0; |
916 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | 917 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); |
917 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | 918 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); |
@@ -958,7 +959,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
958 | size_t len, unsigned long flags) | 959 | size_t len, unsigned long flags) |
959 | { | 960 | { |
960 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 961 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
961 | struct ioatdma_device *device = ioat_chan->device; | 962 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
962 | struct ioat_ring_ent *desc; | 963 | struct ioat_ring_ent *desc; |
963 | size_t total_len = len; | 964 | size_t total_len = len; |
964 | struct ioat_pq_descriptor *pq; | 965 | struct ioat_pq_descriptor *pq; |
@@ -994,7 +995,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
994 | 995 | ||
995 | descs[0] = (struct ioat_raw_descriptor *) pq; | 996 | descs[0] = (struct ioat_raw_descriptor *) pq; |
996 | 997 | ||
997 | desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); | 998 | desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); |
998 | if (!desc->sed) { | 999 | if (!desc->sed) { |
999 | dev_err(to_dev(ioat_chan), | 1000 | dev_err(to_dev(ioat_chan), |
1000 | "%s: no free sed entries\n", __func__); | 1001 | "%s: no free sed entries\n", __func__); |
@@ -1026,7 +1027,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
1026 | pq->ctl_f.op = op; | 1027 | pq->ctl_f.op = op; |
1027 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); | 1028 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); |
1028 | /* we turn on descriptor write back error status */ | 1029 | /* we turn on descriptor write back error status */ |
1029 | if (device->cap & IOAT_CAP_DWBES) | 1030 | if (ioat_dma->cap & IOAT_CAP_DWBES) |
1030 | pq->ctl_f.wb_en = result ? 1 : 0; | 1031 | pq->ctl_f.wb_en = result ? 1 : 0; |
1031 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | 1032 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); |
1032 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | 1033 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); |
@@ -1208,7 +1209,7 @@ static void ioat3_dma_test_callback(void *dma_async_param) | |||
1208 | } | 1209 | } |
1209 | 1210 | ||
1210 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | 1211 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ |
1211 | static int ioat_xor_val_self_test(struct ioatdma_device *device) | 1212 | static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) |
1212 | { | 1213 | { |
1213 | int i, src_idx; | 1214 | int i, src_idx; |
1214 | struct page *dest; | 1215 | struct page *dest; |
@@ -1225,8 +1226,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1225 | int err = 0; | 1226 | int err = 0; |
1226 | struct completion cmp; | 1227 | struct completion cmp; |
1227 | unsigned long tmo; | 1228 | unsigned long tmo; |
1228 | struct device *dev = &device->pdev->dev; | 1229 | struct device *dev = &ioat_dma->pdev->dev; |
1229 | struct dma_device *dma = &device->common; | 1230 | struct dma_device *dma = &ioat_dma->dma_dev; |
1230 | u8 op = 0; | 1231 | u8 op = 0; |
1231 | 1232 | ||
1232 | dev_dbg(dev, "%s\n", __func__); | 1233 | dev_dbg(dev, "%s\n", __func__); |
@@ -1473,35 +1474,35 @@ out: | |||
1473 | return err; | 1474 | return err; |
1474 | } | 1475 | } |
1475 | 1476 | ||
1476 | static int ioat3_dma_self_test(struct ioatdma_device *device) | 1477 | static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) |
1477 | { | 1478 | { |
1478 | int rc = ioat_dma_self_test(device); | 1479 | int rc = ioat_dma_self_test(ioat_dma); |
1479 | 1480 | ||
1480 | if (rc) | 1481 | if (rc) |
1481 | return rc; | 1482 | return rc; |
1482 | 1483 | ||
1483 | rc = ioat_xor_val_self_test(device); | 1484 | rc = ioat_xor_val_self_test(ioat_dma); |
1484 | if (rc) | 1485 | if (rc) |
1485 | return rc; | 1486 | return rc; |
1486 | 1487 | ||
1487 | return 0; | 1488 | return 0; |
1488 | } | 1489 | } |
1489 | 1490 | ||
1490 | static int ioat3_irq_reinit(struct ioatdma_device *device) | 1491 | static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) |
1491 | { | 1492 | { |
1492 | struct pci_dev *pdev = device->pdev; | 1493 | struct pci_dev *pdev = ioat_dma->pdev; |
1493 | int irq = pdev->irq, i; | 1494 | int irq = pdev->irq, i; |
1494 | 1495 | ||
1495 | if (!is_bwd_ioat(pdev)) | 1496 | if (!is_bwd_ioat(pdev)) |
1496 | return 0; | 1497 | return 0; |
1497 | 1498 | ||
1498 | switch (device->irq_mode) { | 1499 | switch (ioat_dma->irq_mode) { |
1499 | case IOAT_MSIX: | 1500 | case IOAT_MSIX: |
1500 | for (i = 0; i < device->common.chancnt; i++) { | 1501 | for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { |
1501 | struct msix_entry *msix = &device->msix_entries[i]; | 1502 | struct msix_entry *msix = &ioat_dma->msix_entries[i]; |
1502 | struct ioatdma_chan *ioat_chan; | 1503 | struct ioatdma_chan *ioat_chan; |
1503 | 1504 | ||
1504 | ioat_chan = ioat_chan_by_index(device, i); | 1505 | ioat_chan = ioat_chan_by_index(ioat_dma, i); |
1505 | devm_free_irq(&pdev->dev, msix->vector, ioat_chan); | 1506 | devm_free_irq(&pdev->dev, msix->vector, ioat_chan); |
1506 | } | 1507 | } |
1507 | 1508 | ||
@@ -1511,14 +1512,14 @@ static int ioat3_irq_reinit(struct ioatdma_device *device) | |||
1511 | pci_disable_msi(pdev); | 1512 | pci_disable_msi(pdev); |
1512 | /* fall through */ | 1513 | /* fall through */ |
1513 | case IOAT_INTX: | 1514 | case IOAT_INTX: |
1514 | devm_free_irq(&pdev->dev, irq, device); | 1515 | devm_free_irq(&pdev->dev, irq, ioat_dma); |
1515 | break; | 1516 | break; |
1516 | default: | 1517 | default: |
1517 | return 0; | 1518 | return 0; |
1518 | } | 1519 | } |
1519 | device->irq_mode = IOAT_NOIRQ; | 1520 | ioat_dma->irq_mode = IOAT_NOIRQ; |
1520 | 1521 | ||
1521 | return ioat_dma_setup_interrupts(device); | 1522 | return ioat_dma_setup_interrupts(ioat_dma); |
1522 | } | 1523 | } |
1523 | 1524 | ||
1524 | static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | 1525 | static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) |
@@ -1526,8 +1527,8 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | |||
1526 | /* throw away whatever the channel was doing and get it | 1527 | /* throw away whatever the channel was doing and get it |
1527 | * initialized, with ioat3 specific workarounds | 1528 | * initialized, with ioat3 specific workarounds |
1528 | */ | 1529 | */ |
1529 | struct ioatdma_device *device = ioat_chan->device; | 1530 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
1530 | struct pci_dev *pdev = device->pdev; | 1531 | struct pci_dev *pdev = ioat_dma->pdev; |
1531 | u32 chanerr; | 1532 | u32 chanerr; |
1532 | u16 dev_id; | 1533 | u16 dev_id; |
1533 | int err; | 1534 | int err; |
@@ -1537,7 +1538,7 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | |||
1537 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 1538 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
1538 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 1539 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
1539 | 1540 | ||
1540 | if (device->version < IOAT_VER_3_3) { | 1541 | if (ioat_dma->version < IOAT_VER_3_3) { |
1541 | /* clear any pending errors */ | 1542 | /* clear any pending errors */ |
1542 | err = pci_read_config_dword(pdev, | 1543 | err = pci_read_config_dword(pdev, |
1543 | IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | 1544 | IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); |
@@ -1562,7 +1563,7 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | |||
1562 | 1563 | ||
1563 | err = ioat2_reset_sync(ioat_chan, msecs_to_jiffies(200)); | 1564 | err = ioat2_reset_sync(ioat_chan, msecs_to_jiffies(200)); |
1564 | if (!err) | 1565 | if (!err) |
1565 | err = ioat3_irq_reinit(device); | 1566 | err = ioat3_irq_reinit(ioat_dma); |
1566 | 1567 | ||
1567 | if (err) | 1568 | if (err) |
1568 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); | 1569 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); |
@@ -1570,20 +1571,20 @@ static int ioat3_reset_hw(struct ioatdma_chan *ioat_chan) | |||
1570 | return err; | 1571 | return err; |
1571 | } | 1572 | } |
1572 | 1573 | ||
1573 | static void ioat3_intr_quirk(struct ioatdma_device *device) | 1574 | static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) |
1574 | { | 1575 | { |
1575 | struct dma_device *dma; | 1576 | struct dma_device *dma; |
1576 | struct dma_chan *c; | 1577 | struct dma_chan *c; |
1577 | struct ioatdma_chan *ioat_chan; | 1578 | struct ioatdma_chan *ioat_chan; |
1578 | u32 errmask; | 1579 | u32 errmask; |
1579 | 1580 | ||
1580 | dma = &device->common; | 1581 | dma = &ioat_dma->dma_dev; |
1581 | 1582 | ||
1582 | /* | 1583 | /* |
1583 | * if we have descriptor write back error status, we mask the | 1584 | * if we have descriptor write back error status, we mask the |
1584 | * error interrupts | 1585 | * error interrupts |
1585 | */ | 1586 | */ |
1586 | if (device->cap & IOAT_CAP_DWBES) { | 1587 | if (ioat_dma->cap & IOAT_CAP_DWBES) { |
1587 | list_for_each_entry(c, &dma->channels, device_node) { | 1588 | list_for_each_entry(c, &dma->channels, device_node) { |
1588 | ioat_chan = to_ioat_chan(c); | 1589 | ioat_chan = to_ioat_chan(c); |
1589 | errmask = readl(ioat_chan->reg_base + | 1590 | errmask = readl(ioat_chan->reg_base + |
@@ -1596,9 +1597,9 @@ static void ioat3_intr_quirk(struct ioatdma_device *device) | |||
1596 | } | 1597 | } |
1597 | } | 1598 | } |
1598 | 1599 | ||
1599 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1600 | int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) |
1600 | { | 1601 | { |
1601 | struct pci_dev *pdev = device->pdev; | 1602 | struct pci_dev *pdev = ioat_dma->pdev; |
1602 | int dca_en = system_has_dca_enabled(pdev); | 1603 | int dca_en = system_has_dca_enabled(pdev); |
1603 | struct dma_device *dma; | 1604 | struct dma_device *dma; |
1604 | struct dma_chan *c; | 1605 | struct dma_chan *c; |
@@ -1606,11 +1607,11 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1606 | bool is_raid_device = false; | 1607 | bool is_raid_device = false; |
1607 | int err; | 1608 | int err; |
1608 | 1609 | ||
1609 | device->enumerate_channels = ioat2_enumerate_channels; | 1610 | ioat_dma->enumerate_channels = ioat2_enumerate_channels; |
1610 | device->reset_hw = ioat3_reset_hw; | 1611 | ioat_dma->reset_hw = ioat3_reset_hw; |
1611 | device->self_test = ioat3_dma_self_test; | 1612 | ioat_dma->self_test = ioat3_dma_self_test; |
1612 | device->intr_quirk = ioat3_intr_quirk; | 1613 | ioat_dma->intr_quirk = ioat3_intr_quirk; |
1613 | dma = &device->common; | 1614 | dma = &ioat_dma->dma_dev; |
1614 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | 1615 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; |
1615 | dma->device_issue_pending = ioat2_issue_pending; | 1616 | dma->device_issue_pending = ioat2_issue_pending; |
1616 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 1617 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
@@ -1619,16 +1620,17 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1619 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | 1620 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
1620 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1621 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
1621 | 1622 | ||
1622 | device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1623 | ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); |
1623 | 1624 | ||
1624 | if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) | 1625 | if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) |
1625 | device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | 1626 | ioat_dma->cap &= |
1627 | ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | ||
1626 | 1628 | ||
1627 | /* dca is incompatible with raid operations */ | 1629 | /* dca is incompatible with raid operations */ |
1628 | if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | 1630 | if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) |
1629 | device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | 1631 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); |
1630 | 1632 | ||
1631 | if (device->cap & IOAT_CAP_XOR) { | 1633 | if (ioat_dma->cap & IOAT_CAP_XOR) { |
1632 | is_raid_device = true; | 1634 | is_raid_device = true; |
1633 | dma->max_xor = 8; | 1635 | dma->max_xor = 8; |
1634 | 1636 | ||
@@ -1639,7 +1641,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1639 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | 1641 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; |
1640 | } | 1642 | } |
1641 | 1643 | ||
1642 | if (device->cap & IOAT_CAP_PQ) { | 1644 | if (ioat_dma->cap & IOAT_CAP_PQ) { |
1643 | is_raid_device = true; | 1645 | is_raid_device = true; |
1644 | 1646 | ||
1645 | dma->device_prep_dma_pq = ioat3_prep_pq; | 1647 | dma->device_prep_dma_pq = ioat3_prep_pq; |
@@ -1647,19 +1649,19 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1647 | dma_cap_set(DMA_PQ, dma->cap_mask); | 1649 | dma_cap_set(DMA_PQ, dma->cap_mask); |
1648 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | 1650 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); |
1649 | 1651 | ||
1650 | if (device->cap & IOAT_CAP_RAID16SS) { | 1652 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { |
1651 | dma_set_maxpq(dma, 16, 0); | 1653 | dma_set_maxpq(dma, 16, 0); |
1652 | } else { | 1654 | } else { |
1653 | dma_set_maxpq(dma, 8, 0); | 1655 | dma_set_maxpq(dma, 8, 0); |
1654 | } | 1656 | } |
1655 | 1657 | ||
1656 | if (!(device->cap & IOAT_CAP_XOR)) { | 1658 | if (!(ioat_dma->cap & IOAT_CAP_XOR)) { |
1657 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | 1659 | dma->device_prep_dma_xor = ioat3_prep_pqxor; |
1658 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | 1660 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; |
1659 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1661 | dma_cap_set(DMA_XOR, dma->cap_mask); |
1660 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | 1662 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); |
1661 | 1663 | ||
1662 | if (device->cap & IOAT_CAP_RAID16SS) { | 1664 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { |
1663 | dma->max_xor = 16; | 1665 | dma->max_xor = 16; |
1664 | } else { | 1666 | } else { |
1665 | dma->max_xor = 8; | 1667 | dma->max_xor = 8; |
@@ -1668,11 +1670,11 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1668 | } | 1670 | } |
1669 | 1671 | ||
1670 | dma->device_tx_status = ioat3_tx_status; | 1672 | dma->device_tx_status = ioat3_tx_status; |
1671 | device->cleanup_fn = ioat3_cleanup_event; | 1673 | ioat_dma->cleanup_fn = ioat3_cleanup_event; |
1672 | device->timer_fn = ioat3_timer_event; | 1674 | ioat_dma->timer_fn = ioat3_timer_event; |
1673 | 1675 | ||
1674 | /* starting with CB3.3 super extended descriptors are supported */ | 1676 | /* starting with CB3.3 super extended descriptors are supported */ |
1675 | if (device->cap & IOAT_CAP_RAID16SS) { | 1677 | if (ioat_dma->cap & IOAT_CAP_RAID16SS) { |
1676 | char pool_name[14]; | 1678 | char pool_name[14]; |
1677 | int i; | 1679 | int i; |
1678 | 1680 | ||
@@ -1680,19 +1682,19 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1680 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | 1682 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); |
1681 | 1683 | ||
1682 | /* allocate SED DMA pool */ | 1684 | /* allocate SED DMA pool */ |
1683 | device->sed_hw_pool[i] = dmam_pool_create(pool_name, | 1685 | ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, |
1684 | &pdev->dev, | 1686 | &pdev->dev, |
1685 | SED_SIZE * (i + 1), 64, 0); | 1687 | SED_SIZE * (i + 1), 64, 0); |
1686 | if (!device->sed_hw_pool[i]) | 1688 | if (!ioat_dma->sed_hw_pool[i]) |
1687 | return -ENOMEM; | 1689 | return -ENOMEM; |
1688 | 1690 | ||
1689 | } | 1691 | } |
1690 | } | 1692 | } |
1691 | 1693 | ||
1692 | if (!(device->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) | 1694 | if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) |
1693 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | 1695 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); |
1694 | 1696 | ||
1695 | err = ioat_probe(device); | 1697 | err = ioat_probe(ioat_dma); |
1696 | if (err) | 1698 | if (err) |
1697 | return err; | 1699 | return err; |
1698 | 1700 | ||
@@ -1702,14 +1704,14 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1702 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | 1704 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); |
1703 | } | 1705 | } |
1704 | 1706 | ||
1705 | err = ioat_register(device); | 1707 | err = ioat_register(ioat_dma); |
1706 | if (err) | 1708 | if (err) |
1707 | return err; | 1709 | return err; |
1708 | 1710 | ||
1709 | ioat_kobject_add(device, &ioat2_ktype); | 1711 | ioat_kobject_add(ioat_dma, &ioat2_ktype); |
1710 | 1712 | ||
1711 | if (dca) | 1713 | if (dca) |
1712 | device->dca = ioat3_dca_init(pdev, device->reg_base); | 1714 | ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); |
1713 | 1715 | ||
1714 | return 0; | 1716 | return 0; |
1715 | } | 1717 | } |