diff options
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r-- | drivers/dma/ioat/dma.c | 1139 |
1 files changed, 1139 insertions, 0 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c new file mode 100644 index 000000000000..17a518d0386f --- /dev/null +++ b/drivers/dma/ioat/dma.c | |||
@@ -0,0 +1,1139 @@ | |||
1 | /* | ||
2 | * Intel I/OAT DMA Linux driver | ||
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
25 | * copy operations. | ||
26 | */ | ||
27 | |||
28 | #include <linux/init.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/dmaengine.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/i7300_idle.h> | ||
37 | #include "dma.h" | ||
38 | #include "registers.h" | ||
39 | #include "hw.h" | ||
40 | |||
41 | int ioat_pending_level = 4; | ||
42 | module_param(ioat_pending_level, int, 0644); | ||
43 | MODULE_PARM_DESC(ioat_pending_level, | ||
44 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
45 | |||
46 | /* internal functions */ | ||
47 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); | ||
48 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | ||
49 | |||
50 | /** | ||
51 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | ||
52 | * @irq: interrupt id | ||
53 | * @data: interrupt data | ||
54 | */ | ||
55 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | ||
56 | { | ||
57 | struct ioatdma_device *instance = data; | ||
58 | struct ioat_chan_common *chan; | ||
59 | unsigned long attnstatus; | ||
60 | int bit; | ||
61 | u8 intrctrl; | ||
62 | |||
63 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
64 | |||
65 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
66 | return IRQ_NONE; | ||
67 | |||
68 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
69 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
70 | return IRQ_NONE; | ||
71 | } | ||
72 | |||
73 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
74 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | ||
75 | chan = ioat_chan_by_index(instance, bit); | ||
76 | tasklet_schedule(&chan->cleanup_task); | ||
77 | } | ||
78 | |||
79 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
80 | return IRQ_HANDLED; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | ||
85 | * @irq: interrupt id | ||
86 | * @data: interrupt data | ||
87 | */ | ||
88 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||
89 | { | ||
90 | struct ioat_chan_common *chan = data; | ||
91 | |||
92 | tasklet_schedule(&chan->cleanup_task); | ||
93 | |||
94 | return IRQ_HANDLED; | ||
95 | } | ||
96 | |||
97 | static void ioat1_cleanup_tasklet(unsigned long data); | ||
98 | |||
99 | /* common channel initialization */ | ||
100 | void ioat_init_channel(struct ioatdma_device *device, | ||
101 | struct ioat_chan_common *chan, int idx, | ||
102 | void (*timer_fn)(unsigned long), | ||
103 | void (*tasklet)(unsigned long), | ||
104 | unsigned long ioat) | ||
105 | { | ||
106 | struct dma_device *dma = &device->common; | ||
107 | |||
108 | chan->device = device; | ||
109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | ||
110 | spin_lock_init(&chan->cleanup_lock); | ||
111 | chan->common.device = dma; | ||
112 | list_add_tail(&chan->common.device_node, &dma->channels); | ||
113 | device->idx[idx] = chan; | ||
114 | init_timer(&chan->timer); | ||
115 | chan->timer.function = timer_fn; | ||
116 | chan->timer.data = ioat; | ||
117 | tasklet_init(&chan->cleanup_task, tasklet, ioat); | ||
118 | tasklet_disable(&chan->cleanup_task); | ||
119 | } | ||
120 | |||
121 | static void ioat1_timer_event(unsigned long data); | ||
122 | |||
123 | /** | ||
124 | * ioat1_dma_enumerate_channels - find and initialize the device's channels | ||
125 | * @device: the device to be enumerated | ||
126 | */ | ||
127 | static int ioat1_enumerate_channels(struct ioatdma_device *device) | ||
128 | { | ||
129 | u8 xfercap_scale; | ||
130 | u32 xfercap; | ||
131 | int i; | ||
132 | struct ioat_dma_chan *ioat; | ||
133 | struct device *dev = &device->pdev->dev; | ||
134 | struct dma_device *dma = &device->common; | ||
135 | |||
136 | INIT_LIST_HEAD(&dma->channels); | ||
137 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
138 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
139 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | ||
140 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
141 | dma->chancnt, ARRAY_SIZE(device->idx)); | ||
142 | dma->chancnt = ARRAY_SIZE(device->idx); | ||
143 | } | ||
144 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
145 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ | ||
146 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | ||
147 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); | ||
148 | |||
149 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
150 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
151 | dma->chancnt--; | ||
152 | #endif | ||
153 | for (i = 0; i < dma->chancnt; i++) { | ||
154 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | ||
155 | if (!ioat) | ||
156 | break; | ||
157 | |||
158 | ioat_init_channel(device, &ioat->base, i, | ||
159 | ioat1_timer_event, | ||
160 | ioat1_cleanup_tasklet, | ||
161 | (unsigned long) ioat); | ||
162 | ioat->xfercap = xfercap; | ||
163 | spin_lock_init(&ioat->desc_lock); | ||
164 | INIT_LIST_HEAD(&ioat->free_desc); | ||
165 | INIT_LIST_HEAD(&ioat->used_desc); | ||
166 | } | ||
167 | dma->chancnt = i; | ||
168 | return i; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
173 | * descriptors to hw | ||
174 | * @chan: DMA channel handle | ||
175 | */ | ||
176 | static inline void | ||
177 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) | ||
178 | { | ||
179 | void __iomem *reg_base = ioat->base.reg_base; | ||
180 | |||
181 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", | ||
182 | __func__, ioat->pending); | ||
183 | ioat->pending = 0; | ||
184 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | ||
185 | } | ||
186 | |||
187 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
188 | { | ||
189 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); | ||
190 | |||
191 | if (ioat->pending > 0) { | ||
192 | spin_lock_bh(&ioat->desc_lock); | ||
193 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
194 | spin_unlock_bh(&ioat->desc_lock); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * ioat1_reset_channel - restart a channel | ||
200 | * @ioat: IOAT DMA channel handle | ||
201 | */ | ||
202 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | ||
203 | { | ||
204 | struct ioat_chan_common *chan = &ioat->base; | ||
205 | void __iomem *reg_base = chan->reg_base; | ||
206 | u32 chansts, chanerr; | ||
207 | |||
208 | dev_warn(to_dev(chan), "reset\n"); | ||
209 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); | ||
210 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; | ||
211 | if (chanerr) { | ||
212 | dev_err(to_dev(chan), | ||
213 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
214 | chan_num(chan), chansts, chanerr); | ||
215 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * whack it upside the head with a reset | ||
220 | * and wait for things to settle out. | ||
221 | * force the pending count to a really big negative | ||
222 | * to make sure no one forces an issue_pending | ||
223 | * while we're waiting. | ||
224 | */ | ||
225 | |||
226 | ioat->pending = INT_MIN; | ||
227 | writeb(IOAT_CHANCMD_RESET, | ||
228 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
229 | set_bit(IOAT_RESET_PENDING, &chan->state); | ||
230 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | ||
231 | } | ||
232 | |||
233 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | ||
234 | { | ||
235 | struct dma_chan *c = tx->chan; | ||
236 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
237 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); | ||
238 | struct ioat_chan_common *chan = &ioat->base; | ||
239 | struct ioat_desc_sw *first; | ||
240 | struct ioat_desc_sw *chain_tail; | ||
241 | dma_cookie_t cookie; | ||
242 | |||
243 | spin_lock_bh(&ioat->desc_lock); | ||
244 | /* cookie incr and addition to used_list must be atomic */ | ||
245 | cookie = c->cookie; | ||
246 | cookie++; | ||
247 | if (cookie < 0) | ||
248 | cookie = 1; | ||
249 | c->cookie = cookie; | ||
250 | tx->cookie = cookie; | ||
251 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
252 | |||
253 | /* write address into NextDescriptor field of last desc in chain */ | ||
254 | first = to_ioat_desc(tx->tx_list.next); | ||
255 | chain_tail = to_ioat_desc(ioat->used_desc.prev); | ||
256 | /* make descriptor updates globally visible before chaining */ | ||
257 | wmb(); | ||
258 | chain_tail->hw->next = first->txd.phys; | ||
259 | list_splice_tail_init(&tx->tx_list, &ioat->used_desc); | ||
260 | dump_desc_dbg(ioat, chain_tail); | ||
261 | dump_desc_dbg(ioat, first); | ||
262 | |||
263 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
264 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
265 | |||
266 | ioat->pending += desc->hw->tx_cnt; | ||
267 | if (ioat->pending >= ioat_pending_level) | ||
268 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
269 | spin_unlock_bh(&ioat->desc_lock); | ||
270 | |||
271 | return cookie; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | ||
276 | * @ioat: the channel supplying the memory pool for the descriptors | ||
277 | * @flags: allocation flags | ||
278 | */ | ||
279 | static struct ioat_desc_sw * | ||
280 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) | ||
281 | { | ||
282 | struct ioat_dma_descriptor *desc; | ||
283 | struct ioat_desc_sw *desc_sw; | ||
284 | struct ioatdma_device *ioatdma_device; | ||
285 | dma_addr_t phys; | ||
286 | |||
287 | ioatdma_device = ioat->base.device; | ||
288 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | ||
289 | if (unlikely(!desc)) | ||
290 | return NULL; | ||
291 | |||
292 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | ||
293 | if (unlikely(!desc_sw)) { | ||
294 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | ||
295 | return NULL; | ||
296 | } | ||
297 | |||
298 | memset(desc, 0, sizeof(*desc)); | ||
299 | |||
300 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); | ||
301 | desc_sw->txd.tx_submit = ioat1_tx_submit; | ||
302 | desc_sw->hw = desc; | ||
303 | desc_sw->txd.phys = phys; | ||
304 | set_desc_id(desc_sw, -1); | ||
305 | |||
306 | return desc_sw; | ||
307 | } | ||
308 | |||
309 | static int ioat_initial_desc_count = 256; | ||
310 | module_param(ioat_initial_desc_count, int, 0644); | ||
311 | MODULE_PARM_DESC(ioat_initial_desc_count, | ||
312 | "ioat1: initial descriptors per channel (default: 256)"); | ||
313 | /** | ||
314 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors | ||
315 | * @chan: the channel to be filled out | ||
316 | */ | ||
317 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) | ||
318 | { | ||
319 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
320 | struct ioat_chan_common *chan = &ioat->base; | ||
321 | struct ioat_desc_sw *desc; | ||
322 | u32 chanerr; | ||
323 | int i; | ||
324 | LIST_HEAD(tmp_list); | ||
325 | |||
326 | /* have we already been set up? */ | ||
327 | if (!list_empty(&ioat->free_desc)) | ||
328 | return ioat->desccount; | ||
329 | |||
330 | /* Setup register to interrupt and write completion status on error */ | ||
331 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
332 | |||
333 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
334 | if (chanerr) { | ||
335 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
336 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
337 | } | ||
338 | |||
339 | /* Allocate descriptors */ | ||
340 | for (i = 0; i < ioat_initial_desc_count; i++) { | ||
341 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); | ||
342 | if (!desc) { | ||
343 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); | ||
344 | break; | ||
345 | } | ||
346 | set_desc_id(desc, i); | ||
347 | list_add_tail(&desc->node, &tmp_list); | ||
348 | } | ||
349 | spin_lock_bh(&ioat->desc_lock); | ||
350 | ioat->desccount = i; | ||
351 | list_splice(&tmp_list, &ioat->free_desc); | ||
352 | spin_unlock_bh(&ioat->desc_lock); | ||
353 | |||
354 | /* allocate a completion writeback area */ | ||
355 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
356 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
357 | GFP_KERNEL, &chan->completion_dma); | ||
358 | memset(chan->completion, 0, sizeof(*chan->completion)); | ||
359 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | ||
360 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
361 | writel(((u64) chan->completion_dma) >> 32, | ||
362 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
363 | |||
364 | tasklet_enable(&chan->cleanup_task); | ||
365 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ | ||
366 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
367 | __func__, ioat->desccount); | ||
368 | return ioat->desccount; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * ioat1_dma_free_chan_resources - release all the descriptors | ||
373 | * @chan: the channel to be cleaned | ||
374 | */ | ||
375 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) | ||
376 | { | ||
377 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
378 | struct ioat_chan_common *chan = &ioat->base; | ||
379 | struct ioatdma_device *ioatdma_device = chan->device; | ||
380 | struct ioat_desc_sw *desc, *_desc; | ||
381 | int in_use_descs = 0; | ||
382 | |||
383 | /* Before freeing channel resources first check | ||
384 | * if they have been previously allocated for this channel. | ||
385 | */ | ||
386 | if (ioat->desccount == 0) | ||
387 | return; | ||
388 | |||
389 | tasklet_disable(&chan->cleanup_task); | ||
390 | del_timer_sync(&chan->timer); | ||
391 | ioat1_cleanup(ioat); | ||
392 | |||
393 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
394 | * before removing DMA descriptor resources. | ||
395 | */ | ||
396 | writeb(IOAT_CHANCMD_RESET, | ||
397 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
398 | mdelay(100); | ||
399 | |||
400 | spin_lock_bh(&ioat->desc_lock); | ||
401 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { | ||
402 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | ||
403 | __func__, desc_id(desc)); | ||
404 | dump_desc_dbg(ioat, desc); | ||
405 | in_use_descs++; | ||
406 | list_del(&desc->node); | ||
407 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
408 | desc->txd.phys); | ||
409 | kfree(desc); | ||
410 | } | ||
411 | list_for_each_entry_safe(desc, _desc, | ||
412 | &ioat->free_desc, node) { | ||
413 | list_del(&desc->node); | ||
414 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
415 | desc->txd.phys); | ||
416 | kfree(desc); | ||
417 | } | ||
418 | spin_unlock_bh(&ioat->desc_lock); | ||
419 | |||
420 | pci_pool_free(ioatdma_device->completion_pool, | ||
421 | chan->completion, | ||
422 | chan->completion_dma); | ||
423 | |||
424 | /* one is ok since we left it on there on purpose */ | ||
425 | if (in_use_descs > 1) | ||
426 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | ||
427 | in_use_descs - 1); | ||
428 | |||
429 | chan->last_completion = 0; | ||
430 | chan->completion_dma = 0; | ||
431 | ioat->pending = 0; | ||
432 | ioat->desccount = 0; | ||
433 | } | ||
434 | |||
435 | /** | ||
436 | * ioat1_dma_get_next_descriptor - return the next available descriptor | ||
437 | * @ioat: IOAT DMA channel handle | ||
438 | * | ||
439 | * Gets the next descriptor from the chain, and must be called with the | ||
440 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
441 | * has run out. | ||
442 | */ | ||
443 | static struct ioat_desc_sw * | ||
444 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) | ||
445 | { | ||
446 | struct ioat_desc_sw *new; | ||
447 | |||
448 | if (!list_empty(&ioat->free_desc)) { | ||
449 | new = to_ioat_desc(ioat->free_desc.next); | ||
450 | list_del(&new->node); | ||
451 | } else { | ||
452 | /* try to get another desc */ | ||
453 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); | ||
454 | if (!new) { | ||
455 | dev_err(to_dev(&ioat->base), "alloc failed\n"); | ||
456 | return NULL; | ||
457 | } | ||
458 | } | ||
459 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", | ||
460 | __func__, desc_id(new)); | ||
461 | prefetch(new->hw); | ||
462 | return new; | ||
463 | } | ||
464 | |||
465 | static struct dma_async_tx_descriptor * | ||
466 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | ||
467 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
468 | { | ||
469 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
470 | struct ioat_desc_sw *desc; | ||
471 | size_t copy; | ||
472 | LIST_HEAD(chain); | ||
473 | dma_addr_t src = dma_src; | ||
474 | dma_addr_t dest = dma_dest; | ||
475 | size_t total_len = len; | ||
476 | struct ioat_dma_descriptor *hw = NULL; | ||
477 | int tx_cnt = 0; | ||
478 | |||
479 | spin_lock_bh(&ioat->desc_lock); | ||
480 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
481 | do { | ||
482 | if (!desc) | ||
483 | break; | ||
484 | |||
485 | tx_cnt++; | ||
486 | copy = min_t(size_t, len, ioat->xfercap); | ||
487 | |||
488 | hw = desc->hw; | ||
489 | hw->size = copy; | ||
490 | hw->ctl = 0; | ||
491 | hw->src_addr = src; | ||
492 | hw->dst_addr = dest; | ||
493 | |||
494 | list_add_tail(&desc->node, &chain); | ||
495 | |||
496 | len -= copy; | ||
497 | dest += copy; | ||
498 | src += copy; | ||
499 | if (len) { | ||
500 | struct ioat_desc_sw *next; | ||
501 | |||
502 | async_tx_ack(&desc->txd); | ||
503 | next = ioat1_dma_get_next_descriptor(ioat); | ||
504 | hw->next = next ? next->txd.phys : 0; | ||
505 | dump_desc_dbg(ioat, desc); | ||
506 | desc = next; | ||
507 | } else | ||
508 | hw->next = 0; | ||
509 | } while (len); | ||
510 | |||
511 | if (!desc) { | ||
512 | struct ioat_chan_common *chan = &ioat->base; | ||
513 | |||
514 | dev_err(to_dev(chan), | ||
515 | "chan%d - get_next_desc failed\n", chan_num(chan)); | ||
516 | list_splice(&chain, &ioat->free_desc); | ||
517 | spin_unlock_bh(&ioat->desc_lock); | ||
518 | return NULL; | ||
519 | } | ||
520 | spin_unlock_bh(&ioat->desc_lock); | ||
521 | |||
522 | desc->txd.flags = flags; | ||
523 | desc->len = total_len; | ||
524 | list_splice(&chain, &desc->txd.tx_list); | ||
525 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
526 | hw->ctl_f.compl_write = 1; | ||
527 | hw->tx_cnt = tx_cnt; | ||
528 | dump_desc_dbg(ioat, desc); | ||
529 | |||
530 | return &desc->txd; | ||
531 | } | ||
532 | |||
533 | static void ioat1_cleanup_tasklet(unsigned long data) | ||
534 | { | ||
535 | struct ioat_dma_chan *chan = (void *)data; | ||
536 | |||
537 | ioat1_cleanup(chan); | ||
538 | writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
539 | } | ||
540 | |||
541 | static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
542 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
543 | { | ||
544 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
545 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
546 | pci_unmap_single(pdev, addr, len, direction); | ||
547 | else | ||
548 | pci_unmap_page(pdev, addr, len, direction); | ||
549 | } | ||
550 | |||
551 | |||
552 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
553 | size_t len, struct ioat_dma_descriptor *hw) | ||
554 | { | ||
555 | struct pci_dev *pdev = chan->device->pdev; | ||
556 | size_t offset = len - hw->size; | ||
557 | |||
558 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
559 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
560 | PCI_DMA_FROMDEVICE, flags, 1); | ||
561 | |||
562 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
563 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
564 | PCI_DMA_TODEVICE, flags, 0); | ||
565 | } | ||
566 | |||
567 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | ||
568 | { | ||
569 | unsigned long phys_complete; | ||
570 | u64 completion; | ||
571 | |||
572 | completion = *chan->completion; | ||
573 | phys_complete = ioat_chansts_to_addr(completion); | ||
574 | |||
575 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | ||
576 | (unsigned long long) phys_complete); | ||
577 | |||
578 | if (is_ioat_halted(completion)) { | ||
579 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
580 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", | ||
581 | chanerr); | ||
582 | |||
583 | /* TODO do something to salvage the situation */ | ||
584 | } | ||
585 | |||
586 | return phys_complete; | ||
587 | } | ||
588 | |||
589 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||
590 | unsigned long *phys_complete) | ||
591 | { | ||
592 | *phys_complete = ioat_get_current_completion(chan); | ||
593 | if (*phys_complete == chan->last_completion) | ||
594 | return false; | ||
595 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
596 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
597 | |||
598 | return true; | ||
599 | } | ||
600 | |||
601 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) | ||
602 | { | ||
603 | struct ioat_chan_common *chan = &ioat->base; | ||
604 | struct list_head *_desc, *n; | ||
605 | struct dma_async_tx_descriptor *tx; | ||
606 | |||
607 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", | ||
608 | __func__, phys_complete); | ||
609 | list_for_each_safe(_desc, n, &ioat->used_desc) { | ||
610 | struct ioat_desc_sw *desc; | ||
611 | |||
612 | prefetch(n); | ||
613 | desc = list_entry(_desc, typeof(*desc), node); | ||
614 | tx = &desc->txd; | ||
615 | /* | ||
616 | * Incoming DMA requests may use multiple descriptors, | ||
617 | * due to exceeding xfercap, perhaps. If so, only the | ||
618 | * last one will have a cookie, and require unmapping. | ||
619 | */ | ||
620 | dump_desc_dbg(ioat, desc); | ||
621 | if (tx->cookie) { | ||
622 | chan->completed_cookie = tx->cookie; | ||
623 | tx->cookie = 0; | ||
624 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
625 | if (tx->callback) { | ||
626 | tx->callback(tx->callback_param); | ||
627 | tx->callback = NULL; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | if (tx->phys != phys_complete) { | ||
632 | /* | ||
633 | * a completed entry, but not the last, so clean | ||
634 | * up if the client is done with the descriptor | ||
635 | */ | ||
636 | if (async_tx_test_ack(tx)) | ||
637 | list_move_tail(&desc->node, &ioat->free_desc); | ||
638 | } else { | ||
639 | /* | ||
640 | * last used desc. Do not remove, so we can | ||
641 | * append from it. | ||
642 | */ | ||
643 | |||
644 | /* if nothing else is pending, cancel the | ||
645 | * completion timeout | ||
646 | */ | ||
647 | if (n == &ioat->used_desc) { | ||
648 | dev_dbg(to_dev(chan), | ||
649 | "%s cancel completion timeout\n", | ||
650 | __func__); | ||
651 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
652 | } | ||
653 | |||
654 | /* TODO check status bits? */ | ||
655 | break; | ||
656 | } | ||
657 | } | ||
658 | |||
659 | chan->last_completion = phys_complete; | ||
660 | } | ||
661 | |||
662 | /** | ||
663 | * ioat1_cleanup - cleanup up finished descriptors | ||
664 | * @chan: ioat channel to be cleaned up | ||
665 | * | ||
666 | * To prevent lock contention we defer cleanup when the locks are | ||
667 | * contended with a terminal timeout that forces cleanup and catches | ||
668 | * completion notification errors. | ||
669 | */ | ||
670 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||
671 | { | ||
672 | struct ioat_chan_common *chan = &ioat->base; | ||
673 | unsigned long phys_complete; | ||
674 | |||
675 | prefetch(chan->completion); | ||
676 | |||
677 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
678 | return; | ||
679 | |||
680 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
681 | spin_unlock_bh(&chan->cleanup_lock); | ||
682 | return; | ||
683 | } | ||
684 | |||
685 | if (!spin_trylock_bh(&ioat->desc_lock)) { | ||
686 | spin_unlock_bh(&chan->cleanup_lock); | ||
687 | return; | ||
688 | } | ||
689 | |||
690 | __cleanup(ioat, phys_complete); | ||
691 | |||
692 | spin_unlock_bh(&ioat->desc_lock); | ||
693 | spin_unlock_bh(&chan->cleanup_lock); | ||
694 | } | ||
695 | |||
696 | static void ioat1_timer_event(unsigned long data) | ||
697 | { | ||
698 | struct ioat_dma_chan *ioat = (void *) data; | ||
699 | struct ioat_chan_common *chan = &ioat->base; | ||
700 | |||
701 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | ||
702 | |||
703 | spin_lock_bh(&chan->cleanup_lock); | ||
704 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | ||
705 | struct ioat_desc_sw *desc; | ||
706 | |||
707 | spin_lock_bh(&ioat->desc_lock); | ||
708 | |||
709 | /* restart active descriptors */ | ||
710 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
711 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
712 | ioat_start(chan); | ||
713 | |||
714 | ioat->pending = 0; | ||
715 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
716 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
717 | spin_unlock_bh(&ioat->desc_lock); | ||
718 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
719 | unsigned long phys_complete; | ||
720 | |||
721 | spin_lock_bh(&ioat->desc_lock); | ||
722 | /* if we haven't made progress and we have already | ||
723 | * acknowledged a pending completion once, then be more | ||
724 | * forceful with a restart | ||
725 | */ | ||
726 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
727 | __cleanup(ioat, phys_complete); | ||
728 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
729 | ioat1_reset_channel(ioat); | ||
730 | else { | ||
731 | u64 status = ioat_chansts(chan); | ||
732 | |||
733 | /* manually update the last completion address */ | ||
734 | if (ioat_chansts_to_addr(status) != 0) | ||
735 | *chan->completion = status; | ||
736 | |||
737 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
738 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
739 | } | ||
740 | spin_unlock_bh(&ioat->desc_lock); | ||
741 | } | ||
742 | spin_unlock_bh(&chan->cleanup_lock); | ||
743 | } | ||
744 | |||
745 | static enum dma_status | ||
746 | ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
747 | dma_cookie_t *done, dma_cookie_t *used) | ||
748 | { | ||
749 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
750 | |||
751 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
752 | return DMA_SUCCESS; | ||
753 | |||
754 | ioat1_cleanup(ioat); | ||
755 | |||
756 | return ioat_is_complete(c, cookie, done, used); | ||
757 | } | ||
758 | |||
759 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | ||
760 | { | ||
761 | struct ioat_chan_common *chan = &ioat->base; | ||
762 | struct ioat_desc_sw *desc; | ||
763 | struct ioat_dma_descriptor *hw; | ||
764 | |||
765 | spin_lock_bh(&ioat->desc_lock); | ||
766 | |||
767 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
768 | |||
769 | if (!desc) { | ||
770 | dev_err(to_dev(chan), | ||
771 | "Unable to start null desc - get next desc failed\n"); | ||
772 | spin_unlock_bh(&ioat->desc_lock); | ||
773 | return; | ||
774 | } | ||
775 | |||
776 | hw = desc->hw; | ||
777 | hw->ctl = 0; | ||
778 | hw->ctl_f.null = 1; | ||
779 | hw->ctl_f.int_en = 1; | ||
780 | hw->ctl_f.compl_write = 1; | ||
781 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
782 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
783 | hw->src_addr = 0; | ||
784 | hw->dst_addr = 0; | ||
785 | async_tx_ack(&desc->txd); | ||
786 | hw->next = 0; | ||
787 | list_add_tail(&desc->node, &ioat->used_desc); | ||
788 | dump_desc_dbg(ioat, desc); | ||
789 | |||
790 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
791 | ioat_start(chan); | ||
792 | spin_unlock_bh(&ioat->desc_lock); | ||
793 | } | ||
794 | |||
795 | /* | ||
796 | * Perform a IOAT transaction to verify the HW works. | ||
797 | */ | ||
798 | #define IOAT_TEST_SIZE 2000 | ||
799 | |||
800 | static void __devinit ioat_dma_test_callback(void *dma_async_param) | ||
801 | { | ||
802 | struct completion *cmp = dma_async_param; | ||
803 | |||
804 | complete(cmp); | ||
805 | } | ||
806 | |||
807 | /** | ||
808 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
809 | * @device: device to be tested | ||
810 | */ | ||
811 | static int __devinit ioat_dma_self_test(struct ioatdma_device *device) | ||
812 | { | ||
813 | int i; | ||
814 | u8 *src; | ||
815 | u8 *dest; | ||
816 | struct dma_device *dma = &device->common; | ||
817 | struct device *dev = &device->pdev->dev; | ||
818 | struct dma_chan *dma_chan; | ||
819 | struct dma_async_tx_descriptor *tx; | ||
820 | dma_addr_t dma_dest, dma_src; | ||
821 | dma_cookie_t cookie; | ||
822 | int err = 0; | ||
823 | struct completion cmp; | ||
824 | unsigned long tmo; | ||
825 | unsigned long flags; | ||
826 | |||
827 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
828 | if (!src) | ||
829 | return -ENOMEM; | ||
830 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
831 | if (!dest) { | ||
832 | kfree(src); | ||
833 | return -ENOMEM; | ||
834 | } | ||
835 | |||
836 | /* Fill in src buffer */ | ||
837 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
838 | src[i] = (u8)i; | ||
839 | |||
840 | /* Start copy, using first DMA channel */ | ||
841 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
842 | device_node); | ||
843 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
844 | dev_err(dev, "selftest cannot allocate chan resource\n"); | ||
845 | err = -ENODEV; | ||
846 | goto out; | ||
847 | } | ||
848 | |||
849 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
850 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
851 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | | ||
852 | DMA_PREP_INTERRUPT; | ||
853 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
854 | IOAT_TEST_SIZE, flags); | ||
855 | if (!tx) { | ||
856 | dev_err(dev, "Self-test prep failed, disabling\n"); | ||
857 | err = -ENODEV; | ||
858 | goto free_resources; | ||
859 | } | ||
860 | |||
861 | async_tx_ack(tx); | ||
862 | init_completion(&cmp); | ||
863 | tx->callback = ioat_dma_test_callback; | ||
864 | tx->callback_param = &cmp; | ||
865 | cookie = tx->tx_submit(tx); | ||
866 | if (cookie < 0) { | ||
867 | dev_err(dev, "Self-test setup failed, disabling\n"); | ||
868 | err = -ENODEV; | ||
869 | goto free_resources; | ||
870 | } | ||
871 | dma->device_issue_pending(dma_chan); | ||
872 | |||
873 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
874 | |||
875 | if (tmo == 0 || | ||
876 | dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) | ||
877 | != DMA_SUCCESS) { | ||
878 | dev_err(dev, "Self-test copy timed out, disabling\n"); | ||
879 | err = -ENODEV; | ||
880 | goto free_resources; | ||
881 | } | ||
882 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
883 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | ||
884 | err = -ENODEV; | ||
885 | goto free_resources; | ||
886 | } | ||
887 | |||
888 | free_resources: | ||
889 | dma->device_free_chan_resources(dma_chan); | ||
890 | out: | ||
891 | kfree(src); | ||
892 | kfree(dest); | ||
893 | return err; | ||
894 | } | ||
895 | |||
896 | static char ioat_interrupt_style[32] = "msix"; | ||
897 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
898 | sizeof(ioat_interrupt_style), 0644); | ||
899 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
900 | "set ioat interrupt style: msix (default), " | ||
901 | "msix-single-vector, msi, intx)"); | ||
902 | |||
903 | /** | ||
904 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
905 | * @device: ioat device | ||
906 | */ | ||
907 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | ||
908 | { | ||
909 | struct ioat_chan_common *chan; | ||
910 | struct pci_dev *pdev = device->pdev; | ||
911 | struct device *dev = &pdev->dev; | ||
912 | struct msix_entry *msix; | ||
913 | int i, j, msixcnt; | ||
914 | int err = -EINVAL; | ||
915 | u8 intrctrl = 0; | ||
916 | |||
917 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
918 | goto msix; | ||
919 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
920 | goto msix_single_vector; | ||
921 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
922 | goto msi; | ||
923 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
924 | goto intx; | ||
925 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); | ||
926 | goto err_no_irq; | ||
927 | |||
928 | msix: | ||
929 | /* The number of MSI-X vectors should equal the number of channels */ | ||
930 | msixcnt = device->common.chancnt; | ||
931 | for (i = 0; i < msixcnt; i++) | ||
932 | device->msix_entries[i].entry = i; | ||
933 | |||
934 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); | ||
935 | if (err < 0) | ||
936 | goto msi; | ||
937 | if (err > 0) | ||
938 | goto msix_single_vector; | ||
939 | |||
940 | for (i = 0; i < msixcnt; i++) { | ||
941 | msix = &device->msix_entries[i]; | ||
942 | chan = ioat_chan_by_index(device, i); | ||
943 | err = devm_request_irq(dev, msix->vector, | ||
944 | ioat_dma_do_interrupt_msix, 0, | ||
945 | "ioat-msix", chan); | ||
946 | if (err) { | ||
947 | for (j = 0; j < i; j++) { | ||
948 | msix = &device->msix_entries[j]; | ||
949 | chan = ioat_chan_by_index(device, j); | ||
950 | devm_free_irq(dev, msix->vector, chan); | ||
951 | } | ||
952 | goto msix_single_vector; | ||
953 | } | ||
954 | } | ||
955 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
956 | goto done; | ||
957 | |||
958 | msix_single_vector: | ||
959 | msix = &device->msix_entries[0]; | ||
960 | msix->entry = 0; | ||
961 | err = pci_enable_msix(pdev, device->msix_entries, 1); | ||
962 | if (err) | ||
963 | goto msi; | ||
964 | |||
965 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, | ||
966 | "ioat-msix", device); | ||
967 | if (err) { | ||
968 | pci_disable_msix(pdev); | ||
969 | goto msi; | ||
970 | } | ||
971 | goto done; | ||
972 | |||
973 | msi: | ||
974 | err = pci_enable_msi(pdev); | ||
975 | if (err) | ||
976 | goto intx; | ||
977 | |||
978 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | ||
979 | "ioat-msi", device); | ||
980 | if (err) { | ||
981 | pci_disable_msi(pdev); | ||
982 | goto intx; | ||
983 | } | ||
984 | goto done; | ||
985 | |||
986 | intx: | ||
987 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | ||
988 | IRQF_SHARED, "ioat-intx", device); | ||
989 | if (err) | ||
990 | goto err_no_irq; | ||
991 | |||
992 | done: | ||
993 | if (device->intr_quirk) | ||
994 | device->intr_quirk(device); | ||
995 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
996 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
997 | return 0; | ||
998 | |||
999 | err_no_irq: | ||
1000 | /* Disable all interrupt generation */ | ||
1001 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
1002 | dev_err(dev, "no usable interrupts\n"); | ||
1003 | return err; | ||
1004 | } | ||
1005 | |||
1006 | static void ioat_disable_interrupts(struct ioatdma_device *device) | ||
1007 | { | ||
1008 | /* Disable all interrupt generation */ | ||
1009 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
1010 | } | ||
1011 | |||
1012 | int __devinit ioat_probe(struct ioatdma_device *device) | ||
1013 | { | ||
1014 | int err = -ENODEV; | ||
1015 | struct dma_device *dma = &device->common; | ||
1016 | struct pci_dev *pdev = device->pdev; | ||
1017 | struct device *dev = &pdev->dev; | ||
1018 | |||
1019 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
1020 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
1021 | sizeof(struct ioat_dma_descriptor), | ||
1022 | 64, 0); | ||
1023 | if (!device->dma_pool) { | ||
1024 | err = -ENOMEM; | ||
1025 | goto err_dma_pool; | ||
1026 | } | ||
1027 | |||
1028 | device->completion_pool = pci_pool_create("completion_pool", pdev, | ||
1029 | sizeof(u64), SMP_CACHE_BYTES, | ||
1030 | SMP_CACHE_BYTES); | ||
1031 | |||
1032 | if (!device->completion_pool) { | ||
1033 | err = -ENOMEM; | ||
1034 | goto err_completion_pool; | ||
1035 | } | ||
1036 | |||
1037 | device->enumerate_channels(device); | ||
1038 | |||
1039 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
1040 | dma->dev = &pdev->dev; | ||
1041 | |||
1042 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," | ||
1043 | " %d channels, device version 0x%02x, driver version %s\n", | ||
1044 | dma->chancnt, device->version, IOAT_DMA_VERSION); | ||
1045 | |||
1046 | if (!dma->chancnt) { | ||
1047 | dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " | ||
1048 | "zero channels detected\n"); | ||
1049 | goto err_setup_interrupts; | ||
1050 | } | ||
1051 | |||
1052 | err = ioat_dma_setup_interrupts(device); | ||
1053 | if (err) | ||
1054 | goto err_setup_interrupts; | ||
1055 | |||
1056 | err = ioat_dma_self_test(device); | ||
1057 | if (err) | ||
1058 | goto err_self_test; | ||
1059 | |||
1060 | return 0; | ||
1061 | |||
1062 | err_self_test: | ||
1063 | ioat_disable_interrupts(device); | ||
1064 | err_setup_interrupts: | ||
1065 | pci_pool_destroy(device->completion_pool); | ||
1066 | err_completion_pool: | ||
1067 | pci_pool_destroy(device->dma_pool); | ||
1068 | err_dma_pool: | ||
1069 | return err; | ||
1070 | } | ||
1071 | |||
1072 | int __devinit ioat_register(struct ioatdma_device *device) | ||
1073 | { | ||
1074 | int err = dma_async_device_register(&device->common); | ||
1075 | |||
1076 | if (err) { | ||
1077 | ioat_disable_interrupts(device); | ||
1078 | pci_pool_destroy(device->completion_pool); | ||
1079 | pci_pool_destroy(device->dma_pool); | ||
1080 | } | ||
1081 | |||
1082 | return err; | ||
1083 | } | ||
1084 | |||
1085 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | ||
1086 | static void ioat1_intr_quirk(struct ioatdma_device *device) | ||
1087 | { | ||
1088 | struct pci_dev *pdev = device->pdev; | ||
1089 | u32 dmactrl; | ||
1090 | |||
1091 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1092 | if (pdev->msi_enabled) | ||
1093 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1094 | else | ||
1095 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | ||
1096 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1097 | } | ||
1098 | |||
1099 | int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||
1100 | { | ||
1101 | struct pci_dev *pdev = device->pdev; | ||
1102 | struct dma_device *dma; | ||
1103 | int err; | ||
1104 | |||
1105 | device->intr_quirk = ioat1_intr_quirk; | ||
1106 | device->enumerate_channels = ioat1_enumerate_channels; | ||
1107 | dma = &device->common; | ||
1108 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1109 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1110 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; | ||
1111 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | ||
1112 | dma->device_is_tx_complete = ioat1_dma_is_complete; | ||
1113 | |||
1114 | err = ioat_probe(device); | ||
1115 | if (err) | ||
1116 | return err; | ||
1117 | ioat_set_tcp_copy_break(4096); | ||
1118 | err = ioat_register(device); | ||
1119 | if (err) | ||
1120 | return err; | ||
1121 | if (dca) | ||
1122 | device->dca = ioat_dca_init(pdev, device->reg_base); | ||
1123 | |||
1124 | return err; | ||
1125 | } | ||
1126 | |||
1127 | void __devexit ioat_dma_remove(struct ioatdma_device *device) | ||
1128 | { | ||
1129 | struct dma_device *dma = &device->common; | ||
1130 | |||
1131 | ioat_disable_interrupts(device); | ||
1132 | |||
1133 | dma_async_device_unregister(dma); | ||
1134 | |||
1135 | pci_pool_destroy(device->dma_pool); | ||
1136 | pci_pool_destroy(device->completion_pool); | ||
1137 | |||
1138 | INIT_LIST_HEAD(&dma->channels); | ||
1139 | } | ||