aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ioat/dma.h')
-rw-r--r--drivers/dma/ioat/dma.h112
1 files changed, 93 insertions, 19 deletions
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index ec851cf5345c..dbfccac3e80c 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -23,6 +23,7 @@
23 23
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include "hw.h" 25#include "hw.h"
26#include "registers.h"
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/dmapool.h> 28#include <linux/dmapool.h>
28#include <linux/cache.h> 29#include <linux/cache.h>
@@ -33,7 +34,6 @@
33 34
34#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 35#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
35#define IOAT_DMA_DCA_ANY_CPU ~0 36#define IOAT_DMA_DCA_ANY_CPU ~0
36#define IOAT_WATCHDOG_PERIOD (2 * HZ)
37 37
38#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) 38#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
39#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 39#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
@@ -42,9 +42,6 @@
42 42
43#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) 43#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
44 44
45#define RESET_DELAY msecs_to_jiffies(100)
46#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
47
48/* 45/*
49 * workaround for IOAT ver.3.0 null descriptor issue 46 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0) 47 * (channel returns error when size is 0)
@@ -72,7 +69,6 @@ struct ioatdma_device {
72 struct pci_pool *completion_pool; 69 struct pci_pool *completion_pool;
73 struct dma_device common; 70 struct dma_device common;
74 u8 version; 71 u8 version;
75 struct delayed_work work;
76 struct msix_entry msix_entries[4]; 72 struct msix_entry msix_entries[4];
77 struct ioat_chan_common *idx[4]; 73 struct ioat_chan_common *idx[4];
78 struct dca_provider *dca; 74 struct dca_provider *dca;
@@ -81,24 +77,21 @@ struct ioatdma_device {
81}; 77};
82 78
83struct ioat_chan_common { 79struct ioat_chan_common {
80 struct dma_chan common;
84 void __iomem *reg_base; 81 void __iomem *reg_base;
85
86 unsigned long last_completion; 82 unsigned long last_completion;
87 unsigned long last_completion_time;
88
89 spinlock_t cleanup_lock; 83 spinlock_t cleanup_lock;
90 dma_cookie_t completed_cookie; 84 dma_cookie_t completed_cookie;
91 unsigned long watchdog_completion; 85 unsigned long state;
92 int watchdog_tcp_cookie; 86 #define IOAT_COMPLETION_PENDING 0
93 u32 watchdog_last_tcp_cookie; 87 #define IOAT_COMPLETION_ACK 1
94 struct delayed_work work; 88 #define IOAT_RESET_PENDING 2
95 89 struct timer_list timer;
90 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
91 #define RESET_DELAY msecs_to_jiffies(100)
96 struct ioatdma_device *device; 92 struct ioatdma_device *device;
97 struct dma_chan common;
98
99 dma_addr_t completion_dma; 93 dma_addr_t completion_dma;
100 u64 *completion; 94 u64 *completion;
101 unsigned long last_compl_desc_addr_hw;
102 struct tasklet_struct cleanup_task; 95 struct tasklet_struct cleanup_task;
103}; 96};
104 97
@@ -148,7 +141,6 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
148 141
149 last_used = c->cookie; 142 last_used = c->cookie;
150 last_complete = chan->completed_cookie; 143 last_complete = chan->completed_cookie;
151 chan->watchdog_tcp_cookie = cookie;
152 144
153 if (done) 145 if (done)
154 *done = last_complete; 146 *done = last_complete;
@@ -215,6 +207,85 @@ ioat_chan_by_index(struct ioatdma_device *device, int index)
215 return device->idx[index]; 207 return device->idx[index];
216} 208}
217 209
210static inline u64 ioat_chansts(struct ioat_chan_common *chan)
211{
212 u8 ver = chan->device->version;
213 u64 status;
214 u32 status_lo;
215
216 /* We need to read the low address first as this causes the
217 * chipset to latch the upper bits for the subsequent read
218 */
219 status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
220 status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
221 status <<= 32;
222 status |= status_lo;
223
224 return status;
225}
226
227static inline void ioat_start(struct ioat_chan_common *chan)
228{
229 u8 ver = chan->device->version;
230
231 writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
232}
233
234static inline u64 ioat_chansts_to_addr(u64 status)
235{
236 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
237}
238
239static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
240{
241 return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
242}
243
244static inline void ioat_suspend(struct ioat_chan_common *chan)
245{
246 u8 ver = chan->device->version;
247
248 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
249}
250
251static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
252{
253 struct ioat_chan_common *chan = &ioat->base;
254
255 writel(addr & 0x00000000FFFFFFFF,
256 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
257 writel(addr >> 32,
258 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
259}
260
261static inline bool is_ioat_active(unsigned long status)
262{
263 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
264}
265
266static inline bool is_ioat_idle(unsigned long status)
267{
268 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
269}
270
271static inline bool is_ioat_halted(unsigned long status)
272{
273 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
274}
275
276static inline bool is_ioat_suspended(unsigned long status)
277{
278 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
279}
280
281/* channel was fatally programmed */
282static inline bool is_ioat_bug(unsigned long err)
283{
284 return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
285 IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
286 IOAT_CHANERR_LENGTH_ERR));
287}
288
218int __devinit ioat_probe(struct ioatdma_device *device); 289int __devinit ioat_probe(struct ioatdma_device *device);
219int __devinit ioat_register(struct ioatdma_device *device); 290int __devinit ioat_register(struct ioatdma_device *device);
220int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); 291int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
@@ -224,8 +295,11 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
224unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 295unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
225void ioat_init_channel(struct ioatdma_device *device, 296void ioat_init_channel(struct ioatdma_device *device,
226 struct ioat_chan_common *chan, int idx, 297 struct ioat_chan_common *chan, int idx,
227 work_func_t work_fn, void (*tasklet)(unsigned long), 298 void (*timer_fn)(unsigned long),
228 unsigned long tasklet_data); 299 void (*tasklet)(unsigned long),
300 unsigned long ioat);
229void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 301void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
230 size_t len, struct ioat_dma_descriptor *hw); 302 size_t len, struct ioat_dma_descriptor *hw);
303bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
304 unsigned long *phys_complete);
231#endif /* IOATDMA_H */ 305#endif /* IOATDMA_H */