aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2015-08-26 16:17:24 -0400
committerVinod Koul <vinod.koul@intel.com>2015-09-21 11:40:05 -0400
commitad4a7b5065c1b4f5176e7d031c3cc2b36f776884 (patch)
treefebe068710e3019801c09c014d6cee2843cb24c3 /drivers/dma/ioat
parent6ff33f3902c3b1c5d0db6b1e2c70b6d76fba357f (diff)
dmaengine: ioatdma: adding shutdown support
The ioatdma needs to be queisced and block all additional op submission during reboots. When NET_DMA was used, this caused issue as ops were still being sent to ioatdma during reboots even though PCI BME has been turned off. Even though NET_DMA has been deprecated, we need to prevent similar situations. The shutdown handler should address that. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/init.c26
-rw-r--r--drivers/dma/ioat/prep.c34
4 files changed, 66 insertions, 3 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index f66b7e640610..1d5df2ef148b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -197,7 +197,8 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
197void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) 197void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
198{ 198{
199 spin_lock_bh(&ioat_chan->prep_lock); 199 spin_lock_bh(&ioat_chan->prep_lock);
200 __ioat_start_null_desc(ioat_chan); 200 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
201 __ioat_start_null_desc(ioat_chan);
201 spin_unlock_bh(&ioat_chan->prep_lock); 202 spin_unlock_bh(&ioat_chan->prep_lock);
202} 203}
203 204
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1bc084986646..8f4e607d5817 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -82,8 +82,9 @@ struct ioatdma_device {
82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
83 struct dma_device dma_dev; 83 struct dma_device dma_dev;
84 u8 version; 84 u8 version;
85 struct msix_entry msix_entries[4]; 85#define IOAT_MAX_CHANS 4
86 struct ioatdma_chan *idx[4]; 86 struct msix_entry msix_entries[IOAT_MAX_CHANS];
87 struct ioatdma_chan *idx[IOAT_MAX_CHANS];
87 struct dca_provider *dca; 88 struct dca_provider *dca;
88 enum ioat_irq_mode irq_mode; 89 enum ioat_irq_mode irq_mode;
89 u32 cap; 90 u32 cap;
@@ -95,6 +96,7 @@ struct ioatdma_chan {
95 dma_addr_t last_completion; 96 dma_addr_t last_completion;
96 spinlock_t cleanup_lock; 97 spinlock_t cleanup_lock;
97 unsigned long state; 98 unsigned long state;
99 #define IOAT_CHAN_DOWN 0
98 #define IOAT_COMPLETION_ACK 1 100 #define IOAT_COMPLETION_ACK 1
99 #define IOAT_RESET_PENDING 2 101 #define IOAT_RESET_PENDING 2
100 #define IOAT_KOBJ_INIT_FAIL 3 102 #define IOAT_KOBJ_INIT_FAIL 3
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 1c3c9b0abf4e..793c5dd6a0e7 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1186,6 +1186,31 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1186 return 0; 1186 return 0;
1187} 1187}
1188 1188
1189static void ioat_shutdown(struct pci_dev *pdev)
1190{
1191 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1192 struct ioatdma_chan *ioat_chan;
1193 int i;
1194
1195 if (!ioat_dma)
1196 return;
1197
1198 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1199 ioat_chan = ioat_dma->idx[i];
1200 if (!ioat_chan)
1201 continue;
1202
1203 spin_lock_bh(&ioat_chan->prep_lock);
1204 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1205 del_timer_sync(&ioat_chan->timer);
1206 spin_unlock_bh(&ioat_chan->prep_lock);
1207 /* this should quiesce then reset */
1208 ioat_reset_hw(ioat_chan);
1209 }
1210
1211 ioat_disable_interrupts(ioat_dma);
1212}
1213
1189#define DRV_NAME "ioatdma" 1214#define DRV_NAME "ioatdma"
1190 1215
1191static struct pci_driver ioat_pci_driver = { 1216static struct pci_driver ioat_pci_driver = {
@@ -1193,6 +1218,7 @@ static struct pci_driver ioat_pci_driver = {
1193 .id_table = ioat_pci_tbl, 1218 .id_table = ioat_pci_tbl,
1194 .probe = ioat_pci_probe, 1219 .probe = ioat_pci_probe,
1195 .remove = ioat_remove, 1220 .remove = ioat_remove,
1221 .shutdown = ioat_shutdown,
1196}; 1222};
1197 1223
1198static struct ioatdma_device * 1224static struct ioatdma_device *
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
index ad4fb41cd23b..6bb4a13a8fbd 100644
--- a/drivers/dma/ioat/prep.c
+++ b/drivers/dma/ioat/prep.c
@@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
121 size_t total_len = len; 121 size_t total_len = len;
122 int num_descs, idx, i; 122 int num_descs, idx, i;
123 123
124 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
125 return NULL;
126
124 num_descs = ioat_xferlen_to_descs(ioat_chan, len); 127 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
125 if (likely(num_descs) && 128 if (likely(num_descs) &&
126 ioat_check_space_lock(ioat_chan, num_descs) == 0) 129 ioat_check_space_lock(ioat_chan, num_descs) == 0)
@@ -254,6 +257,11 @@ struct dma_async_tx_descriptor *
254ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 257ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
255 unsigned int src_cnt, size_t len, unsigned long flags) 258 unsigned int src_cnt, size_t len, unsigned long flags)
256{ 259{
260 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
261
262 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
263 return NULL;
264
257 return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); 265 return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
258} 266}
259 267
@@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
262 unsigned int src_cnt, size_t len, 270 unsigned int src_cnt, size_t len,
263 enum sum_check_flags *result, unsigned long flags) 271 enum sum_check_flags *result, unsigned long flags)
264{ 272{
273 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
274
275 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
276 return NULL;
277
265 /* the cleanup routine only sets bits on validate failure, it 278 /* the cleanup routine only sets bits on validate failure, it
266 * does not clear bits on validate success... so clear it here 279 * does not clear bits on validate success... so clear it here
267 */ 280 */
@@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
574 unsigned int src_cnt, const unsigned char *scf, size_t len, 587 unsigned int src_cnt, const unsigned char *scf, size_t len,
575 unsigned long flags) 588 unsigned long flags)
576{ 589{
590 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
591
592 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
593 return NULL;
594
577 /* specify valid address for disabled result */ 595 /* specify valid address for disabled result */
578 if (flags & DMA_PREP_PQ_DISABLE_P) 596 if (flags & DMA_PREP_PQ_DISABLE_P)
579 dst[0] = dst[1]; 597 dst[0] = dst[1];
@@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
614 unsigned int src_cnt, const unsigned char *scf, size_t len, 632 unsigned int src_cnt, const unsigned char *scf, size_t len,
615 enum sum_check_flags *pqres, unsigned long flags) 633 enum sum_check_flags *pqres, unsigned long flags)
616{ 634{
635 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
636
637 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
638 return NULL;
639
617 /* specify valid address for disabled result */ 640 /* specify valid address for disabled result */
618 if (flags & DMA_PREP_PQ_DISABLE_P) 641 if (flags & DMA_PREP_PQ_DISABLE_P)
619 pq[0] = pq[1]; 642 pq[0] = pq[1];
@@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
638{ 661{
639 unsigned char scf[MAX_SCF]; 662 unsigned char scf[MAX_SCF];
640 dma_addr_t pq[2]; 663 dma_addr_t pq[2];
664 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
665
666 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
667 return NULL;
641 668
642 if (src_cnt > MAX_SCF) 669 if (src_cnt > MAX_SCF)
643 return NULL; 670 return NULL;
@@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
661{ 688{
662 unsigned char scf[MAX_SCF]; 689 unsigned char scf[MAX_SCF];
663 dma_addr_t pq[2]; 690 dma_addr_t pq[2];
691 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
692
693 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
694 return NULL;
664 695
665 if (src_cnt > MAX_SCF) 696 if (src_cnt > MAX_SCF)
666 return NULL; 697 return NULL;
@@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
689 struct ioat_ring_ent *desc; 720 struct ioat_ring_ent *desc;
690 struct ioat_dma_descriptor *hw; 721 struct ioat_dma_descriptor *hw;
691 722
723 if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
724 return NULL;
725
692 if (ioat_check_space_lock(ioat_chan, 1) == 0) 726 if (ioat_check_space_lock(ioat_chan, 1) == 0)
693 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); 727 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
694 else 728 else