diff options
author | Shannon Nelson <shannon.nelson@intel.com> | 2007-10-16 04:27:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:43:09 -0400 |
commit | 43d6e369d43ff175e1e0e80caaedb1e53829247c (patch) | |
tree | f5eae87e5a3ac684c49b4f8c69aa5a4332137c01 /drivers | |
parent | 1fda5f4e96225c3ed0baded942704c0ae399da23 (diff) |
I/OAT: code cleanup from checkpatch output
Take care of a bunch of little code nits in ioatdma files
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/ioat_dma.c | 198 |
1 files changed, 110 insertions, 88 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index f0be71cf1eee..2db05f614843 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -1,10 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | 2 | * Intel I/OAT DMA Linux driver |
3 | * Copyright(c) 2004 - 2007 Intel Corporation. | ||
3 | * | 4 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the Free | 6 | * under the terms and conditions of the GNU General Public License, |
6 | * Software Foundation; either version 2 of the License, or (at your option) | 7 | * version 2, as published by the Free Software Foundation. |
7 | * any later version. | ||
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
@@ -12,11 +12,12 @@ | |||
12 | * more details. | 12 | * more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License along with | 14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | 15 | * this program; if not, write to the Free Software Foundation, Inc., |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
17 | * | 20 | * |
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called COPYING. | ||
20 | */ | 21 | */ |
21 | 22 | ||
22 | /* | 23 | /* |
@@ -35,17 +36,22 @@ | |||
35 | #include "ioatdma_registers.h" | 36 | #include "ioatdma_registers.h" |
36 | #include "ioatdma_hw.h" | 37 | #include "ioatdma_hw.h" |
37 | 38 | ||
39 | #define INITIAL_IOAT_DESC_COUNT 128 | ||
40 | |||
38 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) | 41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) |
39 | #define to_ioat_device(dev) container_of(dev, struct ioat_device, common) | 42 | #define to_ioat_device(dev) container_of(dev, struct ioat_device, common) |
40 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | 43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
41 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) | 44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) |
42 | 45 | ||
43 | /* internal functions */ | 46 | /* internal functions */ |
44 | static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | 47 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); |
48 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | ||
49 | static int __devinit ioat_probe(struct pci_dev *pdev, | ||
50 | const struct pci_device_id *ent); | ||
45 | static void ioat_shutdown(struct pci_dev *pdev); | 51 | static void ioat_shutdown(struct pci_dev *pdev); |
46 | static void __devexit ioat_remove(struct pci_dev *pdev); | 52 | static void __devexit ioat_remove(struct pci_dev *pdev); |
47 | 53 | ||
48 | static int enumerate_dma_channels(struct ioat_device *device) | 54 | static int ioat_dma_enumerate_channels(struct ioat_device *device) |
49 | { | 55 | { |
50 | u8 xfercap_scale; | 56 | u8 xfercap_scale; |
51 | u32 xfercap; | 57 | u32 xfercap; |
@@ -73,13 +79,14 @@ static int enumerate_dma_channels(struct ioat_device *device) | |||
73 | /* This should be made common somewhere in dmaengine.c */ | 79 | /* This should be made common somewhere in dmaengine.c */ |
74 | ioat_chan->common.device = &device->common; | 80 | ioat_chan->common.device = &device->common; |
75 | list_add_tail(&ioat_chan->common.device_node, | 81 | list_add_tail(&ioat_chan->common.device_node, |
76 | &device->common.channels); | 82 | &device->common.channels); |
77 | } | 83 | } |
78 | return device->common.chancnt; | 84 | return device->common.chancnt; |
79 | } | 85 | } |
80 | 86 | ||
81 | static void | 87 | static void ioat_set_src(dma_addr_t addr, |
82 | ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index) | 88 | struct dma_async_tx_descriptor *tx, |
89 | int index) | ||
83 | { | 90 | { |
84 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); | 91 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); |
85 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | 92 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
@@ -93,8 +100,9 @@ ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index) | |||
93 | 100 | ||
94 | } | 101 | } |
95 | 102 | ||
96 | static void | 103 | static void ioat_set_dest(dma_addr_t addr, |
97 | ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index) | 104 | struct dma_async_tx_descriptor *tx, |
105 | int index) | ||
98 | { | 106 | { |
99 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); | 107 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); |
100 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | 108 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
@@ -107,8 +115,7 @@ ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index) | |||
107 | } | 115 | } |
108 | } | 116 | } |
109 | 117 | ||
110 | static dma_cookie_t | 118 | static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) |
111 | ioat_tx_submit(struct dma_async_tx_descriptor *tx) | ||
112 | { | 119 | { |
113 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | 120 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
114 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); | 121 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
@@ -146,8 +153,8 @@ ioat_tx_submit(struct dma_async_tx_descriptor *tx) | |||
146 | } | 153 | } |
147 | 154 | ||
148 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | 155 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( |
149 | struct ioat_dma_chan *ioat_chan, | 156 | struct ioat_dma_chan *ioat_chan, |
150 | gfp_t flags) | 157 | gfp_t flags) |
151 | { | 158 | { |
152 | struct ioat_dma_descriptor *desc; | 159 | struct ioat_dma_descriptor *desc; |
153 | struct ioat_desc_sw *desc_sw; | 160 | struct ioat_desc_sw *desc_sw; |
@@ -177,10 +184,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | |||
177 | return desc_sw; | 184 | return desc_sw; |
178 | } | 185 | } |
179 | 186 | ||
180 | #define INITIAL_IOAT_DESC_COUNT 128 | ||
181 | |||
182 | static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan); | ||
183 | |||
184 | /* returns the actual number of allocated descriptors */ | 187 | /* returns the actual number of allocated descriptors */ |
185 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | 188 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
186 | { | 189 | { |
@@ -195,15 +198,16 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
195 | if (!list_empty(&ioat_chan->free_desc)) | 198 | if (!list_empty(&ioat_chan->free_desc)) |
196 | return INITIAL_IOAT_DESC_COUNT; | 199 | return INITIAL_IOAT_DESC_COUNT; |
197 | 200 | ||
198 | /* Setup register to interrupt and write completion status on error */ | 201 | /* Setup register to interrupt and write completion status on error */ |
199 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | | 202 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | |
200 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | | 203 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | |
201 | IOAT_CHANCTRL_ERR_COMPLETION_EN; | 204 | IOAT_CHANCTRL_ERR_COMPLETION_EN; |
202 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | 205 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); |
203 | 206 | ||
204 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 207 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
205 | if (chanerr) { | 208 | if (chanerr) { |
206 | printk("IOAT: CHANERR = %x, clearing\n", chanerr); | 209 | dev_err(&ioat_chan->device->pdev->dev, |
210 | "ioatdma: CHANERR = %x, clearing\n", chanerr); | ||
207 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 211 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
208 | } | 212 | } |
209 | 213 | ||
@@ -211,7 +215,8 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
211 | for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { | 215 | for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { |
212 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); | 216 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); |
213 | if (!desc) { | 217 | if (!desc) { |
214 | printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i); | 218 | dev_err(&ioat_chan->device->pdev->dev, |
219 | "ioatdma: Only %d initial descriptors\n", i); | ||
215 | break; | 220 | break; |
216 | } | 221 | } |
217 | list_add_tail(&desc->node, &tmp_list); | 222 | list_add_tail(&desc->node, &tmp_list); |
@@ -224,8 +229,8 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
224 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | 229 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
225 | ioat_chan->completion_virt = | 230 | ioat_chan->completion_virt = |
226 | pci_pool_alloc(ioat_chan->device->completion_pool, | 231 | pci_pool_alloc(ioat_chan->device->completion_pool, |
227 | GFP_KERNEL, | 232 | GFP_KERNEL, |
228 | &ioat_chan->completion_addr); | 233 | &ioat_chan->completion_addr); |
229 | memset(ioat_chan->completion_virt, 0, | 234 | memset(ioat_chan->completion_virt, 0, |
230 | sizeof(*ioat_chan->completion_virt)); | 235 | sizeof(*ioat_chan->completion_virt)); |
231 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, | 236 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, |
@@ -233,12 +238,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
233 | writel(((u64) ioat_chan->completion_addr) >> 32, | 238 | writel(((u64) ioat_chan->completion_addr) >> 32, |
234 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | 239 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
235 | 240 | ||
236 | ioat_start_null_desc(ioat_chan); | 241 | ioat_dma_start_null_desc(ioat_chan); |
237 | return i; | 242 | return i; |
238 | } | 243 | } |
239 | 244 | ||
240 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | ||
241 | |||
242 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) | 245 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) |
243 | { | 246 | { |
244 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 247 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
@@ -268,19 +271,22 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
268 | spin_unlock_bh(&ioat_chan->desc_lock); | 271 | spin_unlock_bh(&ioat_chan->desc_lock); |
269 | 272 | ||
270 | pci_pool_free(ioat_device->completion_pool, | 273 | pci_pool_free(ioat_device->completion_pool, |
271 | ioat_chan->completion_virt, | 274 | ioat_chan->completion_virt, |
272 | ioat_chan->completion_addr); | 275 | ioat_chan->completion_addr); |
273 | 276 | ||
274 | /* one is ok since we left it on there on purpose */ | 277 | /* one is ok since we left it on there on purpose */ |
275 | if (in_use_descs > 1) | 278 | if (in_use_descs > 1) |
276 | printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n", | 279 | dev_err(&ioat_chan->device->pdev->dev, |
280 | "ioatdma: Freeing %d in use descriptors!\n", | ||
277 | in_use_descs - 1); | 281 | in_use_descs - 1); |
278 | 282 | ||
279 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | 283 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
280 | } | 284 | } |
281 | 285 | ||
282 | static struct dma_async_tx_descriptor * | 286 | static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( |
283 | ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en) | 287 | struct dma_chan *chan, |
288 | size_t len, | ||
289 | int int_en) | ||
284 | { | 290 | { |
285 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 291 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
286 | struct ioat_desc_sw *first, *prev, *new; | 292 | struct ioat_desc_sw *first, *prev, *new; |
@@ -343,12 +349,11 @@ ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en) | |||
343 | return new ? &new->async_tx : NULL; | 349 | return new ? &new->async_tx : NULL; |
344 | } | 350 | } |
345 | 351 | ||
346 | |||
347 | /** | 352 | /** |
348 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw | 353 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended |
354 | * descriptors to hw | ||
349 | * @chan: DMA channel handle | 355 | * @chan: DMA channel handle |
350 | */ | 356 | */ |
351 | |||
352 | static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) | 357 | static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) |
353 | { | 358 | { |
354 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 359 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
@@ -360,15 +365,15 @@ static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
360 | } | 365 | } |
361 | } | 366 | } |
362 | 367 | ||
363 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) | 368 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) |
364 | { | 369 | { |
365 | unsigned long phys_complete; | 370 | unsigned long phys_complete; |
366 | struct ioat_desc_sw *desc, *_desc; | 371 | struct ioat_desc_sw *desc, *_desc; |
367 | dma_cookie_t cookie = 0; | 372 | dma_cookie_t cookie = 0; |
368 | 373 | ||
369 | prefetch(chan->completion_virt); | 374 | prefetch(ioat_chan->completion_virt); |
370 | 375 | ||
371 | if (!spin_trylock(&chan->cleanup_lock)) | 376 | if (!spin_trylock(&ioat_chan->cleanup_lock)) |
372 | return; | 377 | return; |
373 | 378 | ||
374 | /* The completion writeback can happen at any time, | 379 | /* The completion writeback can happen at any time, |
@@ -378,26 +383,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) | |||
378 | 383 | ||
379 | #if (BITS_PER_LONG == 64) | 384 | #if (BITS_PER_LONG == 64) |
380 | phys_complete = | 385 | phys_complete = |
381 | chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | 386 | ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; |
382 | #else | 387 | #else |
383 | phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; | 388 | phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; |
384 | #endif | 389 | #endif |
385 | 390 | ||
386 | if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | 391 | if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == |
387 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | 392 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { |
388 | printk("IOAT: Channel halted, chanerr = %x\n", | 393 | dev_err(&ioat_chan->device->pdev->dev, |
389 | readl(chan->reg_base + IOAT_CHANERR_OFFSET)); | 394 | "ioatdma: Channel halted, chanerr = %x\n", |
395 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); | ||
390 | 396 | ||
391 | /* TODO do something to salvage the situation */ | 397 | /* TODO do something to salvage the situation */ |
392 | } | 398 | } |
393 | 399 | ||
394 | if (phys_complete == chan->last_completion) { | 400 | if (phys_complete == ioat_chan->last_completion) { |
395 | spin_unlock(&chan->cleanup_lock); | 401 | spin_unlock(&ioat_chan->cleanup_lock); |
396 | return; | 402 | return; |
397 | } | 403 | } |
398 | 404 | ||
399 | spin_lock_bh(&chan->desc_lock); | 405 | spin_lock_bh(&ioat_chan->desc_lock); |
400 | list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) { | 406 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
401 | 407 | ||
402 | /* | 408 | /* |
403 | * Incoming DMA requests may use multiple descriptors, due to | 409 | * Incoming DMA requests may use multiple descriptors, due to |
@@ -407,31 +413,36 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) | |||
407 | if (desc->async_tx.cookie) { | 413 | if (desc->async_tx.cookie) { |
408 | cookie = desc->async_tx.cookie; | 414 | cookie = desc->async_tx.cookie; |
409 | 415 | ||
410 | /* yes we are unmapping both _page and _single alloc'd | 416 | /* |
411 | regions with unmap_page. Is this *really* that bad? | 417 | * yes we are unmapping both _page and _single alloc'd |
412 | */ | 418 | * regions with unmap_page. Is this *really* that bad? |
413 | pci_unmap_page(chan->device->pdev, | 419 | */ |
420 | pci_unmap_page(ioat_chan->device->pdev, | ||
414 | pci_unmap_addr(desc, dst), | 421 | pci_unmap_addr(desc, dst), |
415 | pci_unmap_len(desc, len), | 422 | pci_unmap_len(desc, len), |
416 | PCI_DMA_FROMDEVICE); | 423 | PCI_DMA_FROMDEVICE); |
417 | pci_unmap_page(chan->device->pdev, | 424 | pci_unmap_page(ioat_chan->device->pdev, |
418 | pci_unmap_addr(desc, src), | 425 | pci_unmap_addr(desc, src), |
419 | pci_unmap_len(desc, len), | 426 | pci_unmap_len(desc, len), |
420 | PCI_DMA_TODEVICE); | 427 | PCI_DMA_TODEVICE); |
421 | } | 428 | } |
422 | 429 | ||
423 | if (desc->async_tx.phys != phys_complete) { | 430 | if (desc->async_tx.phys != phys_complete) { |
424 | /* a completed entry, but not the last, so cleanup | 431 | /* |
432 | * a completed entry, but not the last, so cleanup | ||
425 | * if the client is done with the descriptor | 433 | * if the client is done with the descriptor |
426 | */ | 434 | */ |
427 | if (desc->async_tx.ack) { | 435 | if (desc->async_tx.ack) { |
428 | list_del(&desc->node); | 436 | list_del(&desc->node); |
429 | list_add_tail(&desc->node, &chan->free_desc); | 437 | list_add_tail(&desc->node, |
438 | &ioat_chan->free_desc); | ||
430 | } else | 439 | } else |
431 | desc->async_tx.cookie = 0; | 440 | desc->async_tx.cookie = 0; |
432 | } else { | 441 | } else { |
433 | /* last used desc. Do not remove, so we can append from | 442 | /* |
434 | it, but don't look at it next time, either */ | 443 | * last used desc. Do not remove, so we can append from |
444 | * it, but don't look at it next time, either | ||
445 | */ | ||
435 | desc->async_tx.cookie = 0; | 446 | desc->async_tx.cookie = 0; |
436 | 447 | ||
437 | /* TODO check status bits? */ | 448 | /* TODO check status bits? */ |
@@ -439,13 +450,13 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan) | |||
439 | } | 450 | } |
440 | } | 451 | } |
441 | 452 | ||
442 | spin_unlock_bh(&chan->desc_lock); | 453 | spin_unlock_bh(&ioat_chan->desc_lock); |
443 | 454 | ||
444 | chan->last_completion = phys_complete; | 455 | ioat_chan->last_completion = phys_complete; |
445 | if (cookie != 0) | 456 | if (cookie != 0) |
446 | chan->completed_cookie = cookie; | 457 | ioat_chan->completed_cookie = cookie; |
447 | 458 | ||
448 | spin_unlock(&chan->cleanup_lock); | 459 | spin_unlock(&ioat_chan->cleanup_lock); |
449 | } | 460 | } |
450 | 461 | ||
451 | static void ioat_dma_dependency_added(struct dma_chan *chan) | 462 | static void ioat_dma_dependency_added(struct dma_chan *chan) |
@@ -466,11 +477,10 @@ static void ioat_dma_dependency_added(struct dma_chan *chan) | |||
466 | * @done: if not %NULL, updated with last completed transaction | 477 | * @done: if not %NULL, updated with last completed transaction |
467 | * @used: if not %NULL, updated with last used transaction | 478 | * @used: if not %NULL, updated with last used transaction |
468 | */ | 479 | */ |
469 | |||
470 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | 480 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, |
471 | dma_cookie_t cookie, | 481 | dma_cookie_t cookie, |
472 | dma_cookie_t *done, | 482 | dma_cookie_t *done, |
473 | dma_cookie_t *used) | 483 | dma_cookie_t *used) |
474 | { | 484 | { |
475 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 485 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
476 | dma_cookie_t last_used; | 486 | dma_cookie_t last_used; |
@@ -481,7 +491,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | |||
481 | last_complete = ioat_chan->completed_cookie; | 491 | last_complete = ioat_chan->completed_cookie; |
482 | 492 | ||
483 | if (done) | 493 | if (done) |
484 | *done= last_complete; | 494 | *done = last_complete; |
485 | if (used) | 495 | if (used) |
486 | *used = last_used; | 496 | *used = last_used; |
487 | 497 | ||
@@ -495,7 +505,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | |||
495 | last_complete = ioat_chan->completed_cookie; | 505 | last_complete = ioat_chan->completed_cookie; |
496 | 506 | ||
497 | if (done) | 507 | if (done) |
498 | *done= last_complete; | 508 | *done = last_complete; |
499 | if (used) | 509 | if (used) |
500 | *used = last_used; | 510 | *used = last_used; |
501 | 511 | ||
@@ -538,13 +548,13 @@ static irqreturn_t ioat_do_interrupt(int irq, void *data) | |||
538 | 548 | ||
539 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | 549 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); |
540 | 550 | ||
541 | printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus); | 551 | printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus); |
542 | 552 | ||
543 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | 553 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); |
544 | return IRQ_HANDLED; | 554 | return IRQ_HANDLED; |
545 | } | 555 | } |
546 | 556 | ||
547 | static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan) | 557 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) |
548 | { | 558 | { |
549 | struct ioat_desc_sw *desc; | 559 | struct ioat_desc_sw *desc; |
550 | 560 | ||
@@ -608,9 +618,11 @@ static int ioat_self_test(struct ioat_device *device) | |||
608 | 618 | ||
609 | /* Start copy, using first DMA channel */ | 619 | /* Start copy, using first DMA channel */ |
610 | dma_chan = container_of(device->common.channels.next, | 620 | dma_chan = container_of(device->common.channels.next, |
611 | struct dma_chan, | 621 | struct dma_chan, |
612 | device_node); | 622 | device_node); |
613 | if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { | 623 | if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { |
624 | dev_err(&device->pdev->dev, | ||
625 | "selftest cannot allocate chan resource\n"); | ||
614 | err = -ENODEV; | 626 | err = -ENODEV; |
615 | goto out; | 627 | goto out; |
616 | } | 628 | } |
@@ -628,12 +640,14 @@ static int ioat_self_test(struct ioat_device *device) | |||
628 | msleep(1); | 640 | msleep(1); |
629 | 641 | ||
630 | if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 642 | if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
631 | printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n"); | 643 | dev_err(&device->pdev->dev, |
644 | "ioatdma: Self-test copy timed out, disabling\n"); | ||
632 | err = -ENODEV; | 645 | err = -ENODEV; |
633 | goto free_resources; | 646 | goto free_resources; |
634 | } | 647 | } |
635 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | 648 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { |
636 | printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n"); | 649 | dev_err(&device->pdev->dev, |
650 | "ioatdma: Self-test copy failed compare, disabling\n"); | ||
637 | err = -ENODEV; | 651 | err = -ENODEV; |
638 | goto free_resources; | 652 | goto free_resources; |
639 | } | 653 | } |
@@ -647,7 +661,7 @@ out: | |||
647 | } | 661 | } |
648 | 662 | ||
649 | static int __devinit ioat_probe(struct pci_dev *pdev, | 663 | static int __devinit ioat_probe(struct pci_dev *pdev, |
650 | const struct pci_device_id *ent) | 664 | const struct pci_device_id *ent) |
651 | { | 665 | { |
652 | int err; | 666 | int err; |
653 | unsigned long mmio_start, mmio_len; | 667 | unsigned long mmio_start, mmio_len; |
@@ -691,7 +705,9 @@ static int __devinit ioat_probe(struct pci_dev *pdev, | |||
691 | goto err_dma_pool; | 705 | goto err_dma_pool; |
692 | } | 706 | } |
693 | 707 | ||
694 | device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES); | 708 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
709 | sizeof(u64), SMP_CACHE_BYTES, | ||
710 | SMP_CACHE_BYTES); | ||
695 | if (!device->completion_pool) { | 711 | if (!device->completion_pool) { |
696 | err = -ENOMEM; | 712 | err = -ENOMEM; |
697 | goto err_completion_pool; | 713 | goto err_completion_pool; |
@@ -713,22 +729,26 @@ static int __devinit ioat_probe(struct pci_dev *pdev, | |||
713 | 729 | ||
714 | device->reg_base = reg_base; | 730 | device->reg_base = reg_base; |
715 | 731 | ||
716 | writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET); | 732 | writeb(IOAT_INTRCTRL_MASTER_INT_EN, |
733 | device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
717 | pci_set_master(pdev); | 734 | pci_set_master(pdev); |
718 | 735 | ||
719 | INIT_LIST_HEAD(&device->common.channels); | 736 | INIT_LIST_HEAD(&device->common.channels); |
720 | enumerate_dma_channels(device); | 737 | ioat_dma_enumerate_channels(device); |
721 | 738 | ||
722 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | 739 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); |
723 | device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources; | 740 | device->common.device_alloc_chan_resources = |
724 | device->common.device_free_chan_resources = ioat_dma_free_chan_resources; | 741 | ioat_dma_alloc_chan_resources; |
742 | device->common.device_free_chan_resources = | ||
743 | ioat_dma_free_chan_resources; | ||
725 | device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; | 744 | device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; |
726 | device->common.device_is_tx_complete = ioat_dma_is_complete; | 745 | device->common.device_is_tx_complete = ioat_dma_is_complete; |
727 | device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; | 746 | device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; |
728 | device->common.device_dependency_added = ioat_dma_dependency_added; | 747 | device->common.device_dependency_added = ioat_dma_dependency_added; |
729 | device->common.dev = &pdev->dev; | 748 | device->common.dev = &pdev->dev; |
730 | printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n", | 749 | printk(KERN_INFO |
731 | device->common.chancnt); | 750 | "ioatdma: Intel(R) I/OAT DMA Engine found, %d channels\n", |
751 | device->common.chancnt); | ||
732 | 752 | ||
733 | err = ioat_self_test(device); | 753 | err = ioat_self_test(device); |
734 | if (err) | 754 | if (err) |
@@ -754,7 +774,8 @@ err_set_dma_mask: | |||
754 | pci_disable_device(pdev); | 774 | pci_disable_device(pdev); |
755 | err_enable_device: | 775 | err_enable_device: |
756 | 776 | ||
757 | printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n"); | 777 | printk(KERN_INFO |
778 | "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n"); | ||
758 | 779 | ||
759 | return err; | 780 | return err; |
760 | } | 781 | } |
@@ -786,7 +807,8 @@ static void __devexit ioat_remove(struct pci_dev *pdev) | |||
786 | iounmap(device->reg_base); | 807 | iounmap(device->reg_base); |
787 | pci_release_regions(pdev); | 808 | pci_release_regions(pdev); |
788 | pci_disable_device(pdev); | 809 | pci_disable_device(pdev); |
789 | list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { | 810 | list_for_each_entry_safe(chan, _chan, |
811 | &device->common.channels, device_node) { | ||
790 | ioat_chan = to_ioat_chan(chan); | 812 | ioat_chan = to_ioat_chan(chan); |
791 | list_del(&chan->device_node); | 813 | list_del(&chan->device_node); |
792 | kfree(ioat_chan); | 814 | kfree(ioat_chan); |