aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma.c7
-rw-r--r--drivers/dma/ioat/dma.h3
-rw-r--r--drivers/dma/ioat/dma_v2.c5
-rw-r--r--drivers/dma/ioat/dma_v2.h3
-rw-r--r--drivers/dma/ioat/pci.c16
5 files changed, 26 insertions, 8 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 32a757be75c1..c524d36d3c2e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -251,12 +251,12 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
252 252
253 /* write address into NextDescriptor field of last desc in chain */ 253 /* write address into NextDescriptor field of last desc in chain */
254 first = to_ioat_desc(tx->tx_list.next); 254 first = to_ioat_desc(desc->tx_list.next);
255 chain_tail = to_ioat_desc(ioat->used_desc.prev); 255 chain_tail = to_ioat_desc(ioat->used_desc.prev);
256 /* make descriptor updates globally visible before chaining */ 256 /* make descriptor updates globally visible before chaining */
257 wmb(); 257 wmb();
258 chain_tail->hw->next = first->txd.phys; 258 chain_tail->hw->next = first->txd.phys;
259 list_splice_tail_init(&tx->tx_list, &ioat->used_desc); 259 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
260 dump_desc_dbg(ioat, chain_tail); 260 dump_desc_dbg(ioat, chain_tail);
261 dump_desc_dbg(ioat, first); 261 dump_desc_dbg(ioat, first);
262 262
@@ -298,6 +298,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
298 298
299 memset(desc, 0, sizeof(*desc)); 299 memset(desc, 0, sizeof(*desc));
300 300
301 INIT_LIST_HEAD(&desc_sw->tx_list);
301 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); 302 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
302 desc_sw->txd.tx_submit = ioat1_tx_submit; 303 desc_sw->txd.tx_submit = ioat1_tx_submit;
303 desc_sw->hw = desc; 304 desc_sw->hw = desc;
@@ -522,7 +523,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
522 523
523 desc->txd.flags = flags; 524 desc->txd.flags = flags;
524 desc->len = total_len; 525 desc->len = total_len;
525 list_splice(&chain, &desc->txd.tx_list); 526 list_splice(&chain, &desc->tx_list);
526 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 527 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
527 hw->ctl_f.compl_write = 1; 528 hw->ctl_f.compl_write = 1;
528 hw->tx_cnt = tx_cnt; 529 hw->tx_cnt = tx_cnt;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 0e37e426c729..6a675a2a2d1c 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -171,7 +171,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
171 * struct ioat_desc_sw - wrapper around hardware descriptor 171 * struct ioat_desc_sw - wrapper around hardware descriptor
172 * @hw: hardware DMA descriptor (for memcpy) 172 * @hw: hardware DMA descriptor (for memcpy)
173 * @node: this descriptor will either be on the free list, 173 * @node: this descriptor will either be on the free list,
174 * or attached to a transaction list (async_tx.tx_list) 174 * or attached to a transaction list (tx_list)
175 * @txd: the generic software descriptor for all engines 175 * @txd: the generic software descriptor for all engines
176 * @id: identifier for debug 176 * @id: identifier for debug
177 */ 177 */
@@ -179,6 +179,7 @@ struct ioat_desc_sw {
179 struct ioat_dma_descriptor *hw; 179 struct ioat_dma_descriptor *hw;
180 struct list_head node; 180 struct list_head node;
181 size_t len; 181 size_t len;
182 struct list_head tx_list;
182 struct dma_async_tx_descriptor txd; 183 struct dma_async_tx_descriptor txd;
183 #ifdef DEBUG 184 #ifdef DEBUG
184 int id; 185 int id;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 7bbbd83d12e6..5d6ac49e0d32 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -397,11 +397,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
397 return NULL; 397 return NULL;
398 memset(hw, 0, sizeof(*hw)); 398 memset(hw, 0, sizeof(*hw));
399 399
400 desc = kzalloc(sizeof(*desc), flags); 400 desc = kmem_cache_alloc(ioat2_cache, flags);
401 if (!desc) { 401 if (!desc) {
402 pci_pool_free(dma->dma_pool, hw, phys); 402 pci_pool_free(dma->dma_pool, hw, phys);
403 return NULL; 403 return NULL;
404 } 404 }
405 memset(desc, 0, sizeof(*desc));
405 406
406 dma_async_tx_descriptor_init(&desc->txd, chan); 407 dma_async_tx_descriptor_init(&desc->txd, chan);
407 desc->txd.tx_submit = ioat2_tx_submit_unlock; 408 desc->txd.tx_submit = ioat2_tx_submit_unlock;
@@ -416,7 +417,7 @@ static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *cha
416 417
417 dma = to_ioatdma_device(chan->device); 418 dma = to_ioatdma_device(chan->device);
418 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); 419 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
419 kfree(desc); 420 kmem_cache_free(ioat2_cache, desc);
420} 421}
421 422
422static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) 423static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 246e646b1904..1d849ef74d5f 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -142,8 +142,8 @@ struct ioat_ring_ent {
142 struct ioat_pq_update_descriptor *pqu; 142 struct ioat_pq_update_descriptor *pqu;
143 struct ioat_raw_descriptor *raw; 143 struct ioat_raw_descriptor *raw;
144 }; 144 };
145 struct dma_async_tx_descriptor txd;
146 size_t len; 145 size_t len;
146 struct dma_async_tx_descriptor txd;
147 enum sum_check_flags *result; 147 enum sum_check_flags *result;
148 #ifdef DEBUG 148 #ifdef DEBUG
149 int id; 149 int id;
@@ -186,4 +186,5 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data); 187void ioat2_timer_event(unsigned long data);
188extern struct kobj_type ioat2_ktype; 188extern struct kobj_type ioat2_ktype;
189extern struct kmem_cache *ioat2_cache;
189#endif /* IOATDMA_V2_H */ 190#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index b77d3a2864ad..c788fa266470 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -83,6 +83,8 @@ static int ioat_dca_enabled = 1;
83module_param(ioat_dca_enabled, int, 0644); 83module_param(ioat_dca_enabled, int, 0644);
84MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 84MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
85 85
86struct kmem_cache *ioat2_cache;
87
86#define DRV_NAME "ioatdma" 88#define DRV_NAME "ioatdma"
87 89
88static struct pci_driver ioat_pci_driver = { 90static struct pci_driver ioat_pci_driver = {
@@ -182,15 +184,27 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
182 184
183static int __init ioat_init_module(void) 185static int __init ioat_init_module(void)
184{ 186{
187 int err;
188
185 pr_info("%s: Intel(R) QuickData Technology Driver %s\n", 189 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
186 DRV_NAME, IOAT_DMA_VERSION); 190 DRV_NAME, IOAT_DMA_VERSION);
187 191
188 return pci_register_driver(&ioat_pci_driver); 192 ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
193 0, SLAB_HWCACHE_ALIGN, NULL);
194 if (!ioat2_cache)
195 return -ENOMEM;
196
197 err = pci_register_driver(&ioat_pci_driver);
198 if (err)
199 kmem_cache_destroy(ioat2_cache);
200
201 return err;
189} 202}
190module_init(ioat_init_module); 203module_init(ioat_init_module);
191 204
192static void __exit ioat_exit_module(void) 205static void __exit ioat_exit_module(void)
193{ 206{
194 pci_unregister_driver(&ioat_pci_driver); 207 pci_unregister_driver(&ioat_pci_driver);
208 kmem_cache_destroy(ioat2_cache);
195} 209}
196module_exit(ioat_exit_module); 210module_exit(ioat_exit_module);