aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/ioat/dma.c711
-rw-r--r--drivers/dma/ioat/dma.h49
2 files changed, 390 insertions, 370 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index cc5c557ddc83..2e81e0c76e61 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -47,15 +47,15 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work);
47static void ioat_dma_chan_watchdog(struct work_struct *work); 47static void ioat_dma_chan_watchdog(struct work_struct *work);
48 48
49/* internal functions */ 49/* internal functions */
50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat);
52 52
53static struct ioat_desc_sw * 53static struct ioat_desc_sw *
54ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); 54ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat);
55static struct ioat_desc_sw * 55static struct ioat_desc_sw *
56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); 56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat);
57 57
58static inline struct ioat_dma_chan * 58static inline struct ioat_chan_common *
59ioat_chan_by_index(struct ioatdma_device *device, int index) 59ioat_chan_by_index(struct ioatdma_device *device, int index)
60{ 60{
61 return device->idx[index]; 61 return device->idx[index];
@@ -69,7 +69,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index)
69static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) 69static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
70{ 70{
71 struct ioatdma_device *instance = data; 71 struct ioatdma_device *instance = data;
72 struct ioat_dma_chan *ioat_chan; 72 struct ioat_chan_common *chan;
73 unsigned long attnstatus; 73 unsigned long attnstatus;
74 int bit; 74 int bit;
75 u8 intrctrl; 75 u8 intrctrl;
@@ -86,8 +86,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
86 86
87 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 87 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
88 for_each_bit(bit, &attnstatus, BITS_PER_LONG) { 88 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
89 ioat_chan = ioat_chan_by_index(instance, bit); 89 chan = ioat_chan_by_index(instance, bit);
90 tasklet_schedule(&ioat_chan->cleanup_task); 90 tasklet_schedule(&chan->cleanup_task);
91 } 91 }
92 92
93 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 93 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -101,9 +101,9 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
101 */ 101 */
102static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) 102static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
103{ 103{
104 struct ioat_dma_chan *ioat_chan = data; 104 struct ioat_chan_common *chan = data;
105 105
106 tasklet_schedule(&ioat_chan->cleanup_task); 106 tasklet_schedule(&chan->cleanup_task);
107 107
108 return IRQ_HANDLED; 108 return IRQ_HANDLED;
109} 109}
@@ -119,7 +119,8 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
119 u8 xfercap_scale; 119 u8 xfercap_scale;
120 u32 xfercap; 120 u32 xfercap;
121 int i; 121 int i;
122 struct ioat_dma_chan *ioat_chan; 122 struct ioat_chan_common *chan;
123 struct ioat_dma_chan *ioat;
123 struct device *dev = &device->pdev->dev; 124 struct device *dev = &device->pdev->dev;
124 struct dma_device *dma = &device->common; 125 struct dma_device *dma = &device->common;
125 126
@@ -133,29 +134,30 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
133 dma->chancnt--; 134 dma->chancnt--;
134#endif 135#endif
135 for (i = 0; i < dma->chancnt; i++) { 136 for (i = 0; i < dma->chancnt; i++) {
136 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); 137 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
137 if (!ioat_chan) { 138 if (!ioat) {
138 dma->chancnt = i; 139 dma->chancnt = i;
139 break; 140 break;
140 } 141 }
141 142
142 ioat_chan->device = device; 143 chan = &ioat->base;
143 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 144 chan->device = device;
144 ioat_chan->xfercap = xfercap; 145 chan->reg_base = device->reg_base + (0x80 * (i + 1));
145 ioat_chan->desccount = 0; 146 ioat->xfercap = xfercap;
146 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); 147 ioat->desccount = 0;
147 spin_lock_init(&ioat_chan->cleanup_lock); 148 INIT_DELAYED_WORK(&chan->work, ioat_dma_chan_reset_part2);
148 spin_lock_init(&ioat_chan->desc_lock); 149 spin_lock_init(&chan->cleanup_lock);
149 INIT_LIST_HEAD(&ioat_chan->free_desc); 150 spin_lock_init(&ioat->desc_lock);
150 INIT_LIST_HEAD(&ioat_chan->used_desc); 151 INIT_LIST_HEAD(&ioat->free_desc);
152 INIT_LIST_HEAD(&ioat->used_desc);
151 /* This should be made common somewhere in dmaengine.c */ 153 /* This should be made common somewhere in dmaengine.c */
152 ioat_chan->common.device = &device->common; 154 chan->common.device = &device->common;
153 list_add_tail(&ioat_chan->common.device_node, &dma->channels); 155 list_add_tail(&chan->common.device_node, &dma->channels);
154 device->idx[i] = ioat_chan; 156 device->idx[i] = chan;
155 tasklet_init(&ioat_chan->cleanup_task, 157 tasklet_init(&chan->cleanup_task,
156 ioat_dma_cleanup_tasklet, 158 ioat_dma_cleanup_tasklet,
157 (unsigned long) ioat_chan); 159 (unsigned long) ioat);
158 tasklet_disable(&ioat_chan->cleanup_task); 160 tasklet_disable(&chan->cleanup_task);
159 } 161 }
160 return dma->chancnt; 162 return dma->chancnt;
161} 163}
@@ -166,39 +168,42 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
166 * @chan: DMA channel handle 168 * @chan: DMA channel handle
167 */ 169 */
168static inline void 170static inline void
169__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) 171__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
170{ 172{
171 ioat_chan->pending = 0; 173 void __iomem *reg_base = ioat->base.reg_base;
172 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); 174
175 ioat->pending = 0;
176 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
173} 177}
174 178
175static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 179static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
176{ 180{
177 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 181 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
178 182
179 if (ioat_chan->pending > 0) { 183 if (ioat->pending > 0) {
180 spin_lock_bh(&ioat_chan->desc_lock); 184 spin_lock_bh(&ioat->desc_lock);
181 __ioat1_dma_memcpy_issue_pending(ioat_chan); 185 __ioat1_dma_memcpy_issue_pending(ioat);
182 spin_unlock_bh(&ioat_chan->desc_lock); 186 spin_unlock_bh(&ioat->desc_lock);
183 } 187 }
184} 188}
185 189
186static inline void 190static inline void
187__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) 191__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
188{ 192{
189 ioat_chan->pending = 0; 193 void __iomem *reg_base = ioat->base.reg_base;
190 writew(ioat_chan->dmacount, 194
191 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 195 ioat->pending = 0;
196 writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
192} 197}
193 198
194static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) 199static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
195{ 200{
196 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 201 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
197 202
198 if (ioat_chan->pending > 0) { 203 if (ioat->pending > 0) {
199 spin_lock_bh(&ioat_chan->desc_lock); 204 spin_lock_bh(&ioat->desc_lock);
200 __ioat2_dma_memcpy_issue_pending(ioat_chan); 205 __ioat2_dma_memcpy_issue_pending(ioat);
201 spin_unlock_bh(&ioat_chan->desc_lock); 206 spin_unlock_bh(&ioat->desc_lock);
202 } 207 }
203} 208}
204 209
@@ -208,84 +213,88 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
208 */ 213 */
209static void ioat_dma_chan_reset_part2(struct work_struct *work) 214static void ioat_dma_chan_reset_part2(struct work_struct *work)
210{ 215{
211 struct ioat_dma_chan *ioat_chan = 216 struct ioat_chan_common *chan;
212 container_of(work, struct ioat_dma_chan, work.work); 217 struct ioat_dma_chan *ioat;
213 struct ioat_desc_sw *desc; 218 struct ioat_desc_sw *desc;
214 219
215 spin_lock_bh(&ioat_chan->cleanup_lock); 220 chan = container_of(work, struct ioat_chan_common, work.work);
216 spin_lock_bh(&ioat_chan->desc_lock); 221 ioat = container_of(chan, struct ioat_dma_chan, base);
222 spin_lock_bh(&chan->cleanup_lock);
223 spin_lock_bh(&ioat->desc_lock);
217 224
218 ioat_chan->completion_virt->low = 0; 225 chan->completion_virt->low = 0;
219 ioat_chan->completion_virt->high = 0; 226 chan->completion_virt->high = 0;
220 ioat_chan->pending = 0; 227 ioat->pending = 0;
221 228
222 /* 229 /*
223 * count the descriptors waiting, and be sure to do it 230 * count the descriptors waiting, and be sure to do it
224 * right for both the CB1 line and the CB2 ring 231 * right for both the CB1 line and the CB2 ring
225 */ 232 */
226 ioat_chan->dmacount = 0; 233 ioat->dmacount = 0;
227 if (ioat_chan->used_desc.prev) { 234 if (ioat->used_desc.prev) {
228 desc = to_ioat_desc(ioat_chan->used_desc.prev); 235 desc = to_ioat_desc(ioat->used_desc.prev);
229 do { 236 do {
230 ioat_chan->dmacount++; 237 ioat->dmacount++;
231 desc = to_ioat_desc(desc->node.next); 238 desc = to_ioat_desc(desc->node.next);
232 } while (&desc->node != ioat_chan->used_desc.next); 239 } while (&desc->node != ioat->used_desc.next);
233 } 240 }
234 241
235 /* 242 /*
236 * write the new starting descriptor address 243 * write the new starting descriptor address
237 * this puts channel engine into ARMED state 244 * this puts channel engine into ARMED state
238 */ 245 */
239 desc = to_ioat_desc(ioat_chan->used_desc.prev); 246 desc = to_ioat_desc(ioat->used_desc.prev);
240 switch (ioat_chan->device->version) { 247 switch (chan->device->version) {
241 case IOAT_VER_1_2: 248 case IOAT_VER_1_2:
242 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 249 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
243 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 250 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
244 writel(((u64) desc->txd.phys) >> 32, 251 writel(((u64) desc->txd.phys) >> 32,
245 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 252 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
246 253
247 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base 254 writeb(IOAT_CHANCMD_START, chan->reg_base
248 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 255 + IOAT_CHANCMD_OFFSET(chan->device->version));
249 break; 256 break;
250 case IOAT_VER_2_0: 257 case IOAT_VER_2_0:
251 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 258 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
252 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 259 chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
253 writel(((u64) desc->txd.phys) >> 32, 260 writel(((u64) desc->txd.phys) >> 32,
254 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 261 chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
255 262
256 /* tell the engine to go with what's left to be done */ 263 /* tell the engine to go with what's left to be done */
257 writew(ioat_chan->dmacount, 264 writew(ioat->dmacount,
258 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 265 chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
259 266
260 break; 267 break;
261 } 268 }
262 dev_err(to_dev(ioat_chan), 269 dev_err(to_dev(chan),
263 "chan%d reset - %d descs waiting, %d total desc\n", 270 "chan%d reset - %d descs waiting, %d total desc\n",
264 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); 271 chan_num(chan), ioat->dmacount, ioat->desccount);
265 272
266 spin_unlock_bh(&ioat_chan->desc_lock); 273 spin_unlock_bh(&ioat->desc_lock);
267 spin_unlock_bh(&ioat_chan->cleanup_lock); 274 spin_unlock_bh(&chan->cleanup_lock);
268} 275}
269 276
270/** 277/**
271 * ioat_dma_reset_channel - restart a channel 278 * ioat_dma_reset_channel - restart a channel
272 * @ioat_chan: IOAT DMA channel handle 279 * @ioat: IOAT DMA channel handle
273 */ 280 */
274static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) 281static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat)
275{ 282{
283 struct ioat_chan_common *chan = &ioat->base;
284 void __iomem *reg_base = chan->reg_base;
276 u32 chansts, chanerr; 285 u32 chansts, chanerr;
277 286
278 if (!ioat_chan->used_desc.prev) 287 if (!ioat->used_desc.prev)
279 return; 288 return;
280 289
281 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 290 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
282 chansts = (ioat_chan->completion_virt->low 291 chansts = (chan->completion_virt->low
283 & IOAT_CHANSTS_DMA_TRANSFER_STATUS); 292 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
284 if (chanerr) { 293 if (chanerr) {
285 dev_err(to_dev(ioat_chan), 294 dev_err(to_dev(chan),
286 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", 295 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
287 chan_num(ioat_chan), chansts, chanerr); 296 chan_num(chan), chansts, chanerr);
288 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 297 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
289 } 298 }
290 299
291 /* 300 /*
@@ -296,15 +305,14 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
296 * while we're waiting. 305 * while we're waiting.
297 */ 306 */
298 307
299 spin_lock_bh(&ioat_chan->desc_lock); 308 spin_lock_bh(&ioat->desc_lock);
300 ioat_chan->pending = INT_MIN; 309 ioat->pending = INT_MIN;
301 writeb(IOAT_CHANCMD_RESET, 310 writeb(IOAT_CHANCMD_RESET,
302 ioat_chan->reg_base 311 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
303 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 312 spin_unlock_bh(&ioat->desc_lock);
304 spin_unlock_bh(&ioat_chan->desc_lock);
305 313
306 /* schedule the 2nd half instead of sleeping a long time */ 314 /* schedule the 2nd half instead of sleeping a long time */
307 schedule_delayed_work(&ioat_chan->work, RESET_DELAY); 315 schedule_delayed_work(&chan->work, RESET_DELAY);
308} 316}
309 317
310/** 318/**
@@ -314,7 +322,8 @@ static void ioat_dma_chan_watchdog(struct work_struct *work)
314{ 322{
315 struct ioatdma_device *device = 323 struct ioatdma_device *device =
316 container_of(work, struct ioatdma_device, work.work); 324 container_of(work, struct ioatdma_device, work.work);
317 struct ioat_dma_chan *ioat_chan; 325 struct ioat_dma_chan *ioat;
326 struct ioat_chan_common *chan;
318 int i; 327 int i;
319 328
320 union { 329 union {
@@ -327,23 +336,21 @@ static void ioat_dma_chan_watchdog(struct work_struct *work)
327 unsigned long compl_desc_addr_hw; 336 unsigned long compl_desc_addr_hw;
328 337
329 for (i = 0; i < device->common.chancnt; i++) { 338 for (i = 0; i < device->common.chancnt; i++) {
330 ioat_chan = ioat_chan_by_index(device, i); 339 chan = ioat_chan_by_index(device, i);
340 ioat = container_of(chan, struct ioat_dma_chan, base);
331 341
332 if (ioat_chan->device->version == IOAT_VER_1_2 342 if (chan->device->version == IOAT_VER_1_2
333 /* have we started processing anything yet */ 343 /* have we started processing anything yet */
334 && ioat_chan->last_completion 344 && chan->last_completion
335 /* have we completed any since last watchdog cycle? */ 345 /* have we completed any since last watchdog cycle? */
336 && (ioat_chan->last_completion == 346 && (chan->last_completion == chan->watchdog_completion)
337 ioat_chan->watchdog_completion)
338 /* has TCP stuck on one cookie since last watchdog? */ 347 /* has TCP stuck on one cookie since last watchdog? */
339 && (ioat_chan->watchdog_tcp_cookie == 348 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
340 ioat_chan->watchdog_last_tcp_cookie) 349 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
341 && (ioat_chan->watchdog_tcp_cookie !=
342 ioat_chan->completed_cookie)
343 /* is there something in the chain to be processed? */ 350 /* is there something in the chain to be processed? */
344 /* CB1 chain always has at least the last one processed */ 351 /* CB1 chain always has at least the last one processed */
345 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) 352 && (ioat->used_desc.prev != ioat->used_desc.next)
346 && ioat_chan->pending == 0) { 353 && ioat->pending == 0) {
347 354
348 /* 355 /*
349 * check CHANSTS register for completed 356 * check CHANSTS register for completed
@@ -360,10 +367,10 @@ static void ioat_dma_chan_watchdog(struct work_struct *work)
360 * try resetting the channel 367 * try resetting the channel
361 */ 368 */
362 369
363 completion_hw.low = readl(ioat_chan->reg_base + 370 completion_hw.low = readl(chan->reg_base +
364 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); 371 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
365 completion_hw.high = readl(ioat_chan->reg_base + 372 completion_hw.high = readl(chan->reg_base +
366 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); 373 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
367#if (BITS_PER_LONG == 64) 374#if (BITS_PER_LONG == 64)
368 compl_desc_addr_hw = 375 compl_desc_addr_hw =
369 completion_hw.full 376 completion_hw.full
@@ -374,15 +381,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work)
374#endif 381#endif
375 382
376 if ((compl_desc_addr_hw != 0) 383 if ((compl_desc_addr_hw != 0)
377 && (compl_desc_addr_hw != ioat_chan->watchdog_completion) 384 && (compl_desc_addr_hw != chan->watchdog_completion)
378 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { 385 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
379 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; 386 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
380 ioat_chan->completion_virt->low = completion_hw.low; 387 chan->completion_virt->low = completion_hw.low;
381 ioat_chan->completion_virt->high = completion_hw.high; 388 chan->completion_virt->high = completion_hw.high;
382 } else { 389 } else {
383 ioat_dma_reset_channel(ioat_chan); 390 ioat_dma_reset_channel(ioat);
384 ioat_chan->watchdog_completion = 0; 391 chan->watchdog_completion = 0;
385 ioat_chan->last_compl_desc_addr_hw = 0; 392 chan->last_compl_desc_addr_hw = 0;
386 } 393 }
387 394
388 /* 395 /*
@@ -393,25 +400,22 @@ static void ioat_dma_chan_watchdog(struct work_struct *work)
393 * else 400 * else
394 * try resetting the channel 401 * try resetting the channel
395 */ 402 */
396 } else if (ioat_chan->device->version == IOAT_VER_2_0 403 } else if (chan->device->version == IOAT_VER_2_0
397 && ioat_chan->used_desc.prev 404 && ioat->used_desc.prev
398 && ioat_chan->last_completion 405 && chan->last_completion
399 && ioat_chan->last_completion == ioat_chan->watchdog_completion) { 406 && chan->last_completion == chan->watchdog_completion) {
400 407
401 if (ioat_chan->pending < ioat_pending_level) 408 if (ioat->pending < ioat_pending_level)
402 ioat2_dma_memcpy_issue_pending(&ioat_chan->common); 409 ioat2_dma_memcpy_issue_pending(&chan->common);
403 else { 410 else {
404 ioat_dma_reset_channel(ioat_chan); 411 ioat_dma_reset_channel(ioat);
405 ioat_chan->watchdog_completion = 0; 412 chan->watchdog_completion = 0;
406 } 413 }
407 } else { 414 } else {
408 ioat_chan->last_compl_desc_addr_hw = 0; 415 chan->last_compl_desc_addr_hw = 0;
409 ioat_chan->watchdog_completion 416 chan->watchdog_completion = chan->last_completion;
410 = ioat_chan->last_completion;
411 } 417 }
412 418 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
413 ioat_chan->watchdog_last_tcp_cookie =
414 ioat_chan->watchdog_tcp_cookie;
415 } 419 }
416 420
417 schedule_delayed_work(&device->work, WATCHDOG_DELAY); 421 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
@@ -419,40 +423,42 @@ static void ioat_dma_chan_watchdog(struct work_struct *work)
419 423
420static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 424static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
421{ 425{
422 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 426 struct dma_chan *c = tx->chan;
427 struct ioat_dma_chan *ioat = to_ioat_chan(c);
423 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); 428 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
424 struct ioat_desc_sw *first; 429 struct ioat_desc_sw *first;
425 struct ioat_desc_sw *chain_tail; 430 struct ioat_desc_sw *chain_tail;
426 dma_cookie_t cookie; 431 dma_cookie_t cookie;
427 432
428 spin_lock_bh(&ioat_chan->desc_lock); 433 spin_lock_bh(&ioat->desc_lock);
429 /* cookie incr and addition to used_list must be atomic */ 434 /* cookie incr and addition to used_list must be atomic */
430 cookie = ioat_chan->common.cookie; 435 cookie = c->cookie;
431 cookie++; 436 cookie++;
432 if (cookie < 0) 437 if (cookie < 0)
433 cookie = 1; 438 cookie = 1;
434 ioat_chan->common.cookie = tx->cookie = cookie; 439 c->cookie = cookie;
440 tx->cookie = cookie;
435 441
436 /* write address into NextDescriptor field of last desc in chain */ 442 /* write address into NextDescriptor field of last desc in chain */
437 first = to_ioat_desc(tx->tx_list.next); 443 first = to_ioat_desc(tx->tx_list.next);
438 chain_tail = to_ioat_desc(ioat_chan->used_desc.prev); 444 chain_tail = to_ioat_desc(ioat->used_desc.prev);
439 /* make descriptor updates globally visible before chaining */ 445 /* make descriptor updates globally visible before chaining */
440 wmb(); 446 wmb();
441 chain_tail->hw->next = first->txd.phys; 447 chain_tail->hw->next = first->txd.phys;
442 list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc); 448 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
443 449
444 ioat_chan->dmacount += desc->tx_cnt; 450 ioat->dmacount += desc->tx_cnt;
445 ioat_chan->pending += desc->tx_cnt; 451 ioat->pending += desc->tx_cnt;
446 if (ioat_chan->pending >= ioat_pending_level) 452 if (ioat->pending >= ioat_pending_level)
447 __ioat1_dma_memcpy_issue_pending(ioat_chan); 453 __ioat1_dma_memcpy_issue_pending(ioat);
448 spin_unlock_bh(&ioat_chan->desc_lock); 454 spin_unlock_bh(&ioat->desc_lock);
449 455
450 return cookie; 456 return cookie;
451} 457}
452 458
453static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) 459static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
454{ 460{
455 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 461 struct ioat_dma_chan *ioat = to_ioat_chan(tx->chan);
456 struct ioat_desc_sw *first = tx_to_ioat_desc(tx); 462 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
457 struct ioat_desc_sw *new; 463 struct ioat_desc_sw *new;
458 struct ioat_dma_descriptor *hw; 464 struct ioat_dma_descriptor *hw;
@@ -471,11 +477,11 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
471 new = first; 477 new = first;
472 478
473 /* 479 /*
474 * ioat_chan->desc_lock is still in force in version 2 path 480 * ioat->desc_lock is still in force in version 2 path
475 * it gets unlocked at end of this function 481 * it gets unlocked at end of this function
476 */ 482 */
477 do { 483 do {
478 copy = min_t(size_t, len, ioat_chan->xfercap); 484 copy = min_t(size_t, len, ioat->xfercap);
479 485
480 async_tx_ack(&new->txd); 486 async_tx_ack(&new->txd);
481 487
@@ -489,11 +495,11 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
489 dst += copy; 495 dst += copy;
490 src += copy; 496 src += copy;
491 desc_count++; 497 desc_count++;
492 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); 498 } while (len && (new = ioat2_dma_get_next_descriptor(ioat)));
493 499
494 if (!new) { 500 if (!new) {
495 dev_err(to_dev(ioat_chan), "tx submit failed\n"); 501 dev_err(to_dev(&ioat->base), "tx submit failed\n");
496 spin_unlock_bh(&ioat_chan->desc_lock); 502 spin_unlock_bh(&ioat->desc_lock);
497 return -ENOMEM; 503 return -ENOMEM;
498 } 504 }
499 505
@@ -521,35 +527,35 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
521 } 527 }
522 528
523 /* cookie incr and addition to used_list must be atomic */ 529 /* cookie incr and addition to used_list must be atomic */
524 cookie = ioat_chan->common.cookie; 530 cookie = ioat->base.common.cookie;
525 cookie++; 531 cookie++;
526 if (cookie < 0) 532 if (cookie < 0)
527 cookie = 1; 533 cookie = 1;
528 ioat_chan->common.cookie = new->txd.cookie = cookie; 534 ioat->base.common.cookie = new->txd.cookie = cookie;
529 535
530 ioat_chan->dmacount += desc_count; 536 ioat->dmacount += desc_count;
531 ioat_chan->pending += desc_count; 537 ioat->pending += desc_count;
532 if (ioat_chan->pending >= ioat_pending_level) 538 if (ioat->pending >= ioat_pending_level)
533 __ioat2_dma_memcpy_issue_pending(ioat_chan); 539 __ioat2_dma_memcpy_issue_pending(ioat);
534 spin_unlock_bh(&ioat_chan->desc_lock); 540 spin_unlock_bh(&ioat->desc_lock);
535 541
536 return cookie; 542 return cookie;
537} 543}
538 544
539/** 545/**
540 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair 546 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
541 * @ioat_chan: the channel supplying the memory pool for the descriptors 547 * @ioat: the channel supplying the memory pool for the descriptors
542 * @flags: allocation flags 548 * @flags: allocation flags
543 */ 549 */
544static struct ioat_desc_sw * 550static struct ioat_desc_sw *
545ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) 551ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
546{ 552{
547 struct ioat_dma_descriptor *desc; 553 struct ioat_dma_descriptor *desc;
548 struct ioat_desc_sw *desc_sw; 554 struct ioat_desc_sw *desc_sw;
549 struct ioatdma_device *ioatdma_device; 555 struct ioatdma_device *ioatdma_device;
550 dma_addr_t phys; 556 dma_addr_t phys;
551 557
552 ioatdma_device = to_ioatdma_device(ioat_chan->common.device); 558 ioatdma_device = ioat->base.device;
553 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); 559 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
554 if (unlikely(!desc)) 560 if (unlikely(!desc))
555 return NULL; 561 return NULL;
@@ -561,8 +567,8 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
561 } 567 }
562 568
563 memset(desc, 0, sizeof(*desc)); 569 memset(desc, 0, sizeof(*desc));
564 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common); 570 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
565 switch (ioat_chan->device->version) { 571 switch (ioatdma_device->version) {
566 case IOAT_VER_1_2: 572 case IOAT_VER_1_2:
567 desc_sw->txd.tx_submit = ioat1_tx_submit; 573 desc_sw->txd.tx_submit = ioat1_tx_submit;
568 break; 574 break;
@@ -585,26 +591,26 @@ MODULE_PARM_DESC(ioat_initial_desc_count,
585 591
586/** 592/**
587 * ioat2_dma_massage_chan_desc - link the descriptors into a circle 593 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
588 * @ioat_chan: the channel to be massaged 594 * @ioat: the channel to be massaged
589 */ 595 */
590static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) 596static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat)
591{ 597{
592 struct ioat_desc_sw *desc, *_desc; 598 struct ioat_desc_sw *desc, *_desc;
593 599
594 /* setup used_desc */ 600 /* setup used_desc */
595 ioat_chan->used_desc.next = ioat_chan->free_desc.next; 601 ioat->used_desc.next = ioat->free_desc.next;
596 ioat_chan->used_desc.prev = NULL; 602 ioat->used_desc.prev = NULL;
597 603
598 /* pull free_desc out of the circle so that every node is a hw 604 /* pull free_desc out of the circle so that every node is a hw
599 * descriptor, but leave it pointing to the list 605 * descriptor, but leave it pointing to the list
600 */ 606 */
601 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; 607 ioat->free_desc.prev->next = ioat->free_desc.next;
602 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; 608 ioat->free_desc.next->prev = ioat->free_desc.prev;
603 609
604 /* circle link the hw descriptors */ 610 /* circle link the hw descriptors */
605 desc = to_ioat_desc(ioat_chan->free_desc.next); 611 desc = to_ioat_desc(ioat->free_desc.next);
606 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; 612 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
607 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { 613 list_for_each_entry_safe(desc, _desc, ioat->free_desc.next, node) {
608 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; 614 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
609 } 615 }
610} 616}
@@ -613,9 +619,10 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
613 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 619 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
614 * @chan: the channel to be filled out 620 * @chan: the channel to be filled out
615 */ 621 */
616static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 622static int ioat_dma_alloc_chan_resources(struct dma_chan *c)
617{ 623{
618 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 624 struct ioat_dma_chan *ioat = to_ioat_chan(c);
625 struct ioat_chan_common *chan = &ioat->base;
619 struct ioat_desc_sw *desc; 626 struct ioat_desc_sw *desc;
620 u16 chanctrl; 627 u16 chanctrl;
621 u32 chanerr; 628 u32 chanerr;
@@ -623,89 +630,87 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
623 LIST_HEAD(tmp_list); 630 LIST_HEAD(tmp_list);
624 631
625 /* have we already been set up? */ 632 /* have we already been set up? */
626 if (!list_empty(&ioat_chan->free_desc)) 633 if (!list_empty(&ioat->free_desc))
627 return ioat_chan->desccount; 634 return ioat->desccount;
628 635
629 /* Setup register to interrupt and write completion status on error */ 636 /* Setup register to interrupt and write completion status on error */
630 chanctrl = IOAT_CHANCTRL_ERR_INT_EN | 637 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
631 IOAT_CHANCTRL_ANY_ERR_ABORT_EN | 638 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
632 IOAT_CHANCTRL_ERR_COMPLETION_EN; 639 IOAT_CHANCTRL_ERR_COMPLETION_EN;
633 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); 640 writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET);
634 641
635 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 642 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
636 if (chanerr) { 643 if (chanerr) {
637 dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr); 644 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
638 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 645 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
639 } 646 }
640 647
641 /* Allocate descriptors */ 648 /* Allocate descriptors */
642 for (i = 0; i < ioat_initial_desc_count; i++) { 649 for (i = 0; i < ioat_initial_desc_count; i++) {
643 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); 650 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
644 if (!desc) { 651 if (!desc) {
645 dev_err(to_dev(ioat_chan), 652 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
646 "Only %d initial descriptors\n", i);
647 break; 653 break;
648 } 654 }
649 list_add_tail(&desc->node, &tmp_list); 655 list_add_tail(&desc->node, &tmp_list);
650 } 656 }
651 spin_lock_bh(&ioat_chan->desc_lock); 657 spin_lock_bh(&ioat->desc_lock);
652 ioat_chan->desccount = i; 658 ioat->desccount = i;
653 list_splice(&tmp_list, &ioat_chan->free_desc); 659 list_splice(&tmp_list, &ioat->free_desc);
654 if (ioat_chan->device->version != IOAT_VER_1_2) 660 if (chan->device->version != IOAT_VER_1_2)
655 ioat2_dma_massage_chan_desc(ioat_chan); 661 ioat2_dma_massage_chan_desc(ioat);
656 spin_unlock_bh(&ioat_chan->desc_lock); 662 spin_unlock_bh(&ioat->desc_lock);
657 663
658 /* allocate a completion writeback area */ 664 /* allocate a completion writeback area */
659 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 665 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
660 ioat_chan->completion_virt = 666 chan->completion_virt = pci_pool_alloc(chan->device->completion_pool,
661 pci_pool_alloc(ioat_chan->device->completion_pool, 667 GFP_KERNEL,
662 GFP_KERNEL, 668 &chan->completion_addr);
663 &ioat_chan->completion_addr); 669 memset(chan->completion_virt, 0,
664 memset(ioat_chan->completion_virt, 0, 670 sizeof(*chan->completion_virt));
665 sizeof(*ioat_chan->completion_virt)); 671 writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF,
666 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, 672 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
667 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 673 writel(((u64) chan->completion_addr) >> 32,
668 writel(((u64) ioat_chan->completion_addr) >> 32, 674 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
669 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 675
670 676 tasklet_enable(&chan->cleanup_task);
671 tasklet_enable(&ioat_chan->cleanup_task); 677 ioat_dma_start_null_desc(ioat); /* give chain to dma device */
672 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ 678 return ioat->desccount;
673 return ioat_chan->desccount;
674} 679}
675 680
676/** 681/**
677 * ioat_dma_free_chan_resources - release all the descriptors 682 * ioat_dma_free_chan_resources - release all the descriptors
678 * @chan: the channel to be cleaned 683 * @chan: the channel to be cleaned
679 */ 684 */
680static void ioat_dma_free_chan_resources(struct dma_chan *chan) 685static void ioat_dma_free_chan_resources(struct dma_chan *c)
681{ 686{
682 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 687 struct ioat_dma_chan *ioat = to_ioat_chan(c);
683 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); 688 struct ioat_chan_common *chan = &ioat->base;
689 struct ioatdma_device *ioatdma_device = chan->device;
684 struct ioat_desc_sw *desc, *_desc; 690 struct ioat_desc_sw *desc, *_desc;
685 int in_use_descs = 0; 691 int in_use_descs = 0;
686 692
687 /* Before freeing channel resources first check 693 /* Before freeing channel resources first check
688 * if they have been previously allocated for this channel. 694 * if they have been previously allocated for this channel.
689 */ 695 */
690 if (ioat_chan->desccount == 0) 696 if (ioat->desccount == 0)
691 return; 697 return;
692 698
693 tasklet_disable(&ioat_chan->cleanup_task); 699 tasklet_disable(&chan->cleanup_task);
694 ioat_dma_memcpy_cleanup(ioat_chan); 700 ioat_dma_memcpy_cleanup(ioat);
695 701
696 /* Delay 100ms after reset to allow internal DMA logic to quiesce 702 /* Delay 100ms after reset to allow internal DMA logic to quiesce
697 * before removing DMA descriptor resources. 703 * before removing DMA descriptor resources.
698 */ 704 */
699 writeb(IOAT_CHANCMD_RESET, 705 writeb(IOAT_CHANCMD_RESET,
700 ioat_chan->reg_base 706 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
701 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
702 mdelay(100); 707 mdelay(100);
703 708
704 spin_lock_bh(&ioat_chan->desc_lock); 709 spin_lock_bh(&ioat->desc_lock);
705 switch (ioat_chan->device->version) { 710 switch (chan->device->version) {
706 case IOAT_VER_1_2: 711 case IOAT_VER_1_2:
707 list_for_each_entry_safe(desc, _desc, 712 list_for_each_entry_safe(desc, _desc,
708 &ioat_chan->used_desc, node) { 713 &ioat->used_desc, node) {
709 in_use_descs++; 714 in_use_descs++;
710 list_del(&desc->node); 715 list_del(&desc->node);
711 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 716 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
@@ -713,7 +718,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
713 kfree(desc); 718 kfree(desc);
714 } 719 }
715 list_for_each_entry_safe(desc, _desc, 720 list_for_each_entry_safe(desc, _desc,
716 &ioat_chan->free_desc, node) { 721 &ioat->free_desc, node) {
717 list_del(&desc->node); 722 list_del(&desc->node);
718 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 723 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
719 desc->txd.phys); 724 desc->txd.phys);
@@ -723,62 +728,61 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
723 case IOAT_VER_2_0: 728 case IOAT_VER_2_0:
724 case IOAT_VER_3_0: 729 case IOAT_VER_3_0:
725 list_for_each_entry_safe(desc, _desc, 730 list_for_each_entry_safe(desc, _desc,
726 ioat_chan->free_desc.next, node) { 731 ioat->free_desc.next, node) {
727 list_del(&desc->node); 732 list_del(&desc->node);
728 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 733 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
729 desc->txd.phys); 734 desc->txd.phys);
730 kfree(desc); 735 kfree(desc);
731 } 736 }
732 desc = to_ioat_desc(ioat_chan->free_desc.next); 737 desc = to_ioat_desc(ioat->free_desc.next);
733 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 738 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
734 desc->txd.phys); 739 desc->txd.phys);
735 kfree(desc); 740 kfree(desc);
736 INIT_LIST_HEAD(&ioat_chan->free_desc); 741 INIT_LIST_HEAD(&ioat->free_desc);
737 INIT_LIST_HEAD(&ioat_chan->used_desc); 742 INIT_LIST_HEAD(&ioat->used_desc);
738 break; 743 break;
739 } 744 }
740 spin_unlock_bh(&ioat_chan->desc_lock); 745 spin_unlock_bh(&ioat->desc_lock);
741 746
742 pci_pool_free(ioatdma_device->completion_pool, 747 pci_pool_free(ioatdma_device->completion_pool,
743 ioat_chan->completion_virt, 748 chan->completion_virt,
744 ioat_chan->completion_addr); 749 chan->completion_addr);
745 750
746 /* one is ok since we left it on there on purpose */ 751 /* one is ok since we left it on there on purpose */
747 if (in_use_descs > 1) 752 if (in_use_descs > 1)
748 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", 753 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
749 in_use_descs - 1); 754 in_use_descs - 1);
750 755
751 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 756 chan->last_completion = chan->completion_addr = 0;
752 ioat_chan->pending = 0; 757 chan->watchdog_completion = 0;
753 ioat_chan->dmacount = 0; 758 chan->last_compl_desc_addr_hw = 0;
754 ioat_chan->desccount = 0; 759 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
755 ioat_chan->watchdog_completion = 0; 760 ioat->pending = 0;
756 ioat_chan->last_compl_desc_addr_hw = 0; 761 ioat->dmacount = 0;
757 ioat_chan->watchdog_tcp_cookie = 762 ioat->desccount = 0;
758 ioat_chan->watchdog_last_tcp_cookie = 0;
759} 763}
760 764
761/** 765/**
762 * ioat_dma_get_next_descriptor - return the next available descriptor 766 * ioat1_dma_get_next_descriptor - return the next available descriptor
763 * @ioat_chan: IOAT DMA channel handle 767 * @ioat: IOAT DMA channel handle
764 * 768 *
765 * Gets the next descriptor from the chain, and must be called with the 769 * Gets the next descriptor from the chain, and must be called with the
766 * channel's desc_lock held. Allocates more descriptors if the channel 770 * channel's desc_lock held. Allocates more descriptors if the channel
767 * has run out. 771 * has run out.
768 */ 772 */
769static struct ioat_desc_sw * 773static struct ioat_desc_sw *
770ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 774ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
771{ 775{
772 struct ioat_desc_sw *new; 776 struct ioat_desc_sw *new;
773 777
774 if (!list_empty(&ioat_chan->free_desc)) { 778 if (!list_empty(&ioat->free_desc)) {
775 new = to_ioat_desc(ioat_chan->free_desc.next); 779 new = to_ioat_desc(ioat->free_desc.next);
776 list_del(&new->node); 780 list_del(&new->node);
777 } else { 781 } else {
778 /* try to get another desc */ 782 /* try to get another desc */
779 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 783 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
780 if (!new) { 784 if (!new) {
781 dev_err(to_dev(ioat_chan), "alloc failed\n"); 785 dev_err(to_dev(&ioat->base), "alloc failed\n");
782 return NULL; 786 return NULL;
783 } 787 }
784 } 788 }
@@ -788,7 +792,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
788} 792}
789 793
790static struct ioat_desc_sw * 794static struct ioat_desc_sw *
791ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 795ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
792{ 796{
793 struct ioat_desc_sw *new; 797 struct ioat_desc_sw *new;
794 798
@@ -801,15 +805,15 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
801 * linking in a new set of descriptors, since the device 805 * linking in a new set of descriptors, since the device
802 * has probably already read the pointer to it 806 * has probably already read the pointer to it
803 */ 807 */
804 if (ioat_chan->used_desc.prev && 808 if (ioat->used_desc.prev &&
805 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { 809 ioat->used_desc.next == ioat->used_desc.prev->prev) {
806 810
807 struct ioat_desc_sw *desc; 811 struct ioat_desc_sw *desc;
808 struct ioat_desc_sw *noop_desc; 812 struct ioat_desc_sw *noop_desc;
809 int i; 813 int i;
810 814
811 /* set up the noop descriptor */ 815 /* set up the noop descriptor */
812 noop_desc = to_ioat_desc(ioat_chan->used_desc.next); 816 noop_desc = to_ioat_desc(ioat->used_desc.next);
813 /* set size to non-zero value (channel returns error when size is 0) */ 817 /* set size to non-zero value (channel returns error when size is 0) */
814 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; 818 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
815 noop_desc->hw->ctl = 0; 819 noop_desc->hw->ctl = 0;
@@ -817,60 +821,61 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
817 noop_desc->hw->src_addr = 0; 821 noop_desc->hw->src_addr = 0;
818 noop_desc->hw->dst_addr = 0; 822 noop_desc->hw->dst_addr = 0;
819 823
820 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; 824 ioat->used_desc.next = ioat->used_desc.next->next;
821 ioat_chan->pending++; 825 ioat->pending++;
822 ioat_chan->dmacount++; 826 ioat->dmacount++;
823 827
824 /* try to get a few more descriptors */ 828 /* try to get a few more descriptors */
825 for (i = 16; i; i--) { 829 for (i = 16; i; i--) {
826 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 830 desc = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
827 if (!desc) { 831 if (!desc) {
828 dev_err(to_dev(ioat_chan), "alloc failed\n"); 832 dev_err(to_dev(&ioat->base),
833 "alloc failed\n");
829 break; 834 break;
830 } 835 }
831 list_add_tail(&desc->node, ioat_chan->used_desc.next); 836 list_add_tail(&desc->node, ioat->used_desc.next);
832 837
833 desc->hw->next 838 desc->hw->next
834 = to_ioat_desc(desc->node.next)->txd.phys; 839 = to_ioat_desc(desc->node.next)->txd.phys;
835 to_ioat_desc(desc->node.prev)->hw->next 840 to_ioat_desc(desc->node.prev)->hw->next
836 = desc->txd.phys; 841 = desc->txd.phys;
837 ioat_chan->desccount++; 842 ioat->desccount++;
838 } 843 }
839 844
840 ioat_chan->used_desc.next = noop_desc->node.next; 845 ioat->used_desc.next = noop_desc->node.next;
841 } 846 }
842 new = to_ioat_desc(ioat_chan->used_desc.next); 847 new = to_ioat_desc(ioat->used_desc.next);
843 prefetch(new); 848 prefetch(new);
844 ioat_chan->used_desc.next = new->node.next; 849 ioat->used_desc.next = new->node.next;
845 850
846 if (ioat_chan->used_desc.prev == NULL) 851 if (ioat->used_desc.prev == NULL)
847 ioat_chan->used_desc.prev = &new->node; 852 ioat->used_desc.prev = &new->node;
848 853
849 prefetch(new->hw); 854 prefetch(new->hw);
850 return new; 855 return new;
851} 856}
852 857
853static struct ioat_desc_sw * 858static struct ioat_desc_sw *
854ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 859ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
855{ 860{
856 if (!ioat_chan) 861 if (!ioat)
857 return NULL; 862 return NULL;
858 863
859 switch (ioat_chan->device->version) { 864 switch (ioat->base.device->version) {
860 case IOAT_VER_1_2: 865 case IOAT_VER_1_2:
861 return ioat1_dma_get_next_descriptor(ioat_chan); 866 return ioat1_dma_get_next_descriptor(ioat);
862 case IOAT_VER_2_0: 867 case IOAT_VER_2_0:
863 case IOAT_VER_3_0: 868 case IOAT_VER_3_0:
864 return ioat2_dma_get_next_descriptor(ioat_chan); 869 return ioat2_dma_get_next_descriptor(ioat);
865 } 870 }
866 return NULL; 871 return NULL;
867} 872}
868 873
869static struct dma_async_tx_descriptor * 874static struct dma_async_tx_descriptor *
870ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 875ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
871 dma_addr_t dma_src, size_t len, unsigned long flags) 876 dma_addr_t dma_src, size_t len, unsigned long flags)
872{ 877{
873 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 878 struct ioat_dma_chan *ioat = to_ioat_chan(c);
874 struct ioat_desc_sw *desc; 879 struct ioat_desc_sw *desc;
875 size_t copy; 880 size_t copy;
876 LIST_HEAD(chain); 881 LIST_HEAD(chain);
@@ -880,14 +885,14 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
880 struct ioat_dma_descriptor *hw = NULL; 885 struct ioat_dma_descriptor *hw = NULL;
881 int tx_cnt = 0; 886 int tx_cnt = 0;
882 887
883 spin_lock_bh(&ioat_chan->desc_lock); 888 spin_lock_bh(&ioat->desc_lock);
884 desc = ioat_dma_get_next_descriptor(ioat_chan); 889 desc = ioat_dma_get_next_descriptor(ioat);
885 do { 890 do {
886 if (!desc) 891 if (!desc)
887 break; 892 break;
888 893
889 tx_cnt++; 894 tx_cnt++;
890 copy = min_t(size_t, len, ioat_chan->xfercap); 895 copy = min_t(size_t, len, ioat->xfercap);
891 896
892 hw = desc->hw; 897 hw = desc->hw;
893 hw->size = copy; 898 hw->size = copy;
@@ -904,7 +909,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
904 struct ioat_desc_sw *next; 909 struct ioat_desc_sw *next;
905 910
906 async_tx_ack(&desc->txd); 911 async_tx_ack(&desc->txd);
907 next = ioat_dma_get_next_descriptor(ioat_chan); 912 next = ioat_dma_get_next_descriptor(ioat);
908 hw->next = next ? next->txd.phys : 0; 913 hw->next = next ? next->txd.phys : 0;
909 desc = next; 914 desc = next;
910 } else 915 } else
@@ -912,14 +917,16 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
912 } while (len); 917 } while (len);
913 918
914 if (!desc) { 919 if (!desc) {
915 dev_err(to_dev(ioat_chan), 920 struct ioat_chan_common *chan = &ioat->base;
921
922 dev_err(to_dev(chan),
916 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", 923 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
917 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); 924 chan_num(chan), ioat->dmacount, ioat->desccount);
918 list_splice(&chain, &ioat_chan->free_desc); 925 list_splice(&chain, &ioat->free_desc);
919 spin_unlock_bh(&ioat_chan->desc_lock); 926 spin_unlock_bh(&ioat->desc_lock);
920 return NULL; 927 return NULL;
921 } 928 }
922 spin_unlock_bh(&ioat_chan->desc_lock); 929 spin_unlock_bh(&ioat->desc_lock);
923 930
924 desc->txd.flags = flags; 931 desc->txd.flags = flags;
925 desc->tx_cnt = tx_cnt; 932 desc->tx_cnt = tx_cnt;
@@ -934,17 +941,17 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
934} 941}
935 942
936static struct dma_async_tx_descriptor * 943static struct dma_async_tx_descriptor *
937ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 944ioat2_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
938 dma_addr_t dma_src, size_t len, unsigned long flags) 945 dma_addr_t dma_src, size_t len, unsigned long flags)
939{ 946{
940 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 947 struct ioat_dma_chan *ioat = to_ioat_chan(c);
941 struct ioat_desc_sw *new; 948 struct ioat_desc_sw *new;
942 949
943 spin_lock_bh(&ioat_chan->desc_lock); 950 spin_lock_bh(&ioat->desc_lock);
944 new = ioat2_dma_get_next_descriptor(ioat_chan); 951 new = ioat2_dma_get_next_descriptor(ioat);
945 952
946 /* 953 /*
947 * leave ioat_chan->desc_lock set in ioat 2 path 954 * leave ioat->desc_lock set in ioat 2 path
948 * it will get unlocked at end of tx_submit 955 * it will get unlocked at end of tx_submit
949 */ 956 */
950 957
@@ -955,10 +962,12 @@ ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
955 new->txd.flags = flags; 962 new->txd.flags = flags;
956 return &new->txd; 963 return &new->txd;
957 } else { 964 } else {
958 spin_unlock_bh(&ioat_chan->desc_lock); 965 struct ioat_chan_common *chan = &ioat->base;
959 dev_err(to_dev(ioat_chan), 966
967 spin_unlock_bh(&ioat->desc_lock);
968 dev_err(to_dev(chan),
960 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", 969 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
961 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); 970 chan_num(chan), ioat->dmacount, ioat->desccount);
962 return NULL; 971 return NULL;
963 } 972 }
964} 973}
@@ -968,20 +977,20 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
968 struct ioat_dma_chan *chan = (void *)data; 977 struct ioat_dma_chan *chan = (void *)data;
969 ioat_dma_memcpy_cleanup(chan); 978 ioat_dma_memcpy_cleanup(chan);
970 writew(IOAT_CHANCTRL_INT_DISABLE, 979 writew(IOAT_CHANCTRL_INT_DISABLE,
971 chan->reg_base + IOAT_CHANCTRL_OFFSET); 980 chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
972} 981}
973 982
974static void 983static void
975ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) 984ioat_dma_unmap(struct ioat_chan_common *chan, struct ioat_desc_sw *desc)
976{ 985{
977 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 986 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
978 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 987 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
979 pci_unmap_single(ioat_chan->device->pdev, 988 pci_unmap_single(chan->device->pdev,
980 pci_unmap_addr(desc, dst), 989 pci_unmap_addr(desc, dst),
981 pci_unmap_len(desc, len), 990 pci_unmap_len(desc, len),
982 PCI_DMA_FROMDEVICE); 991 PCI_DMA_FROMDEVICE);
983 else 992 else
984 pci_unmap_page(ioat_chan->device->pdev, 993 pci_unmap_page(chan->device->pdev,
985 pci_unmap_addr(desc, dst), 994 pci_unmap_addr(desc, dst),
986 pci_unmap_len(desc, len), 995 pci_unmap_len(desc, len),
987 PCI_DMA_FROMDEVICE); 996 PCI_DMA_FROMDEVICE);
@@ -989,12 +998,12 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
989 998
990 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 999 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
991 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1000 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
992 pci_unmap_single(ioat_chan->device->pdev, 1001 pci_unmap_single(chan->device->pdev,
993 pci_unmap_addr(desc, src), 1002 pci_unmap_addr(desc, src),
994 pci_unmap_len(desc, len), 1003 pci_unmap_len(desc, len),
995 PCI_DMA_TODEVICE); 1004 PCI_DMA_TODEVICE);
996 else 1005 else
997 pci_unmap_page(ioat_chan->device->pdev, 1006 pci_unmap_page(chan->device->pdev,
998 pci_unmap_addr(desc, src), 1007 pci_unmap_addr(desc, src),
999 pci_unmap_len(desc, len), 1008 pci_unmap_len(desc, len),
1000 PCI_DMA_TODEVICE); 1009 PCI_DMA_TODEVICE);
@@ -1005,8 +1014,9 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1005 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 1014 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1006 * @chan: ioat channel to be cleaned up 1015 * @chan: ioat channel to be cleaned up
1007 */ 1016 */
1008static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) 1017static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat)
1009{ 1018{
1019 struct ioat_chan_common *chan = &ioat->base;
1010 unsigned long phys_complete; 1020 unsigned long phys_complete;
1011 struct ioat_desc_sw *desc, *_desc; 1021 struct ioat_desc_sw *desc, *_desc;
1012 dma_cookie_t cookie = 0; 1022 dma_cookie_t cookie = 0;
@@ -1014,9 +1024,9 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1014 struct ioat_desc_sw *latest_desc; 1024 struct ioat_desc_sw *latest_desc;
1015 struct dma_async_tx_descriptor *tx; 1025 struct dma_async_tx_descriptor *tx;
1016 1026
1017 prefetch(ioat_chan->completion_virt); 1027 prefetch(chan->completion_virt);
1018 1028
1019 if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) 1029 if (!spin_trylock_bh(&chan->cleanup_lock))
1020 return; 1030 return;
1021 1031
1022 /* The completion writeback can happen at any time, 1032 /* The completion writeback can happen at any time,
@@ -1026,49 +1036,47 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1026 1036
1027#if (BITS_PER_LONG == 64) 1037#if (BITS_PER_LONG == 64)
1028 phys_complete = 1038 phys_complete =
1029 ioat_chan->completion_virt->full 1039 chan->completion_virt->full
1030 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 1040 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1031#else 1041#else
1032 phys_complete = 1042 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1033 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1034#endif 1043#endif
1035 1044
1036 if ((ioat_chan->completion_virt->full 1045 if ((chan->completion_virt->full
1037 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == 1046 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
1038 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { 1047 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1039 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", 1048 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
1040 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); 1049 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
1041 1050
1042 /* TODO do something to salvage the situation */ 1051 /* TODO do something to salvage the situation */
1043 } 1052 }
1044 1053
1045 if (phys_complete == ioat_chan->last_completion) { 1054 if (phys_complete == chan->last_completion) {
1046 spin_unlock_bh(&ioat_chan->cleanup_lock); 1055 spin_unlock_bh(&chan->cleanup_lock);
1047 /* 1056 /*
1048 * perhaps we're stuck so hard that the watchdog can't go off? 1057 * perhaps we're stuck so hard that the watchdog can't go off?
1049 * try to catch it after 2 seconds 1058 * try to catch it after 2 seconds
1050 */ 1059 */
1051 if (ioat_chan->device->version != IOAT_VER_3_0) { 1060 if (chan->device->version != IOAT_VER_3_0) {
1052 if (time_after(jiffies, 1061 if (time_after(jiffies,
1053 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { 1062 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1054 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); 1063 ioat_dma_chan_watchdog(&(chan->device->work.work));
1055 ioat_chan->last_completion_time = jiffies; 1064 chan->last_completion_time = jiffies;
1056 } 1065 }
1057 } 1066 }
1058 return; 1067 return;
1059 } 1068 }
1060 ioat_chan->last_completion_time = jiffies; 1069 chan->last_completion_time = jiffies;
1061 1070
1062 cookie = 0; 1071 cookie = 0;
1063 if (!spin_trylock_bh(&ioat_chan->desc_lock)) { 1072 if (!spin_trylock_bh(&ioat->desc_lock)) {
1064 spin_unlock_bh(&ioat_chan->cleanup_lock); 1073 spin_unlock_bh(&chan->cleanup_lock);
1065 return; 1074 return;
1066 } 1075 }
1067 1076
1068 switch (ioat_chan->device->version) { 1077 switch (chan->device->version) {
1069 case IOAT_VER_1_2: 1078 case IOAT_VER_1_2:
1070 list_for_each_entry_safe(desc, _desc, 1079 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
1071 &ioat_chan->used_desc, node) {
1072 tx = &desc->txd; 1080 tx = &desc->txd;
1073 /* 1081 /*
1074 * Incoming DMA requests may use multiple descriptors, 1082 * Incoming DMA requests may use multiple descriptors,
@@ -1077,7 +1085,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1077 */ 1085 */
1078 if (tx->cookie) { 1086 if (tx->cookie) {
1079 cookie = tx->cookie; 1087 cookie = tx->cookie;
1080 ioat_dma_unmap(ioat_chan, desc); 1088 ioat_dma_unmap(chan, desc);
1081 if (tx->callback) { 1089 if (tx->callback) {
1082 tx->callback(tx->callback_param); 1090 tx->callback(tx->callback_param);
1083 tx->callback = NULL; 1091 tx->callback = NULL;
@@ -1091,7 +1099,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1091 */ 1099 */
1092 if (async_tx_test_ack(tx)) { 1100 if (async_tx_test_ack(tx)) {
1093 list_move_tail(&desc->node, 1101 list_move_tail(&desc->node,
1094 &ioat_chan->free_desc); 1102 &ioat->free_desc);
1095 } else 1103 } else
1096 tx->cookie = 0; 1104 tx->cookie = 0;
1097 } else { 1105 } else {
@@ -1110,11 +1118,11 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1110 case IOAT_VER_2_0: 1118 case IOAT_VER_2_0:
1111 case IOAT_VER_3_0: 1119 case IOAT_VER_3_0:
1112 /* has some other thread has already cleaned up? */ 1120 /* has some other thread has already cleaned up? */
1113 if (ioat_chan->used_desc.prev == NULL) 1121 if (ioat->used_desc.prev == NULL)
1114 break; 1122 break;
1115 1123
1116 /* work backwards to find latest finished desc */ 1124 /* work backwards to find latest finished desc */
1117 desc = to_ioat_desc(ioat_chan->used_desc.next); 1125 desc = to_ioat_desc(ioat->used_desc.next);
1118 tx = &desc->txd; 1126 tx = &desc->txd;
1119 latest_desc = NULL; 1127 latest_desc = NULL;
1120 do { 1128 do {
@@ -1125,18 +1133,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1125 latest_desc = desc; 1133 latest_desc = desc;
1126 break; 1134 break;
1127 } 1135 }
1128 } while (&desc->node != ioat_chan->used_desc.prev); 1136 } while (&desc->node != ioat->used_desc.prev);
1129 1137
1130 if (latest_desc != NULL) { 1138 if (latest_desc != NULL) {
1131 /* work forwards to clear finished descriptors */ 1139 /* work forwards to clear finished descriptors */
1132 for (desc = to_ioat_desc(ioat_chan->used_desc.prev); 1140 for (desc = to_ioat_desc(ioat->used_desc.prev);
1133 &desc->node != latest_desc->node.next && 1141 &desc->node != latest_desc->node.next &&
1134 &desc->node != ioat_chan->used_desc.next; 1142 &desc->node != ioat->used_desc.next;
1135 desc = to_ioat_desc(desc->node.next)) { 1143 desc = to_ioat_desc(desc->node.next)) {
1136 if (tx->cookie) { 1144 if (tx->cookie) {
1137 cookie = tx->cookie; 1145 cookie = tx->cookie;
1138 tx->cookie = 0; 1146 tx->cookie = 0;
1139 ioat_dma_unmap(ioat_chan, desc); 1147 ioat_dma_unmap(chan, desc);
1140 if (tx->callback) { 1148 if (tx->callback) {
1141 tx->callback(tx->callback_param); 1149 tx->callback(tx->callback_param);
1142 tx->callback = NULL; 1150 tx->callback = NULL;
@@ -1145,21 +1153,21 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1145 } 1153 }
1146 1154
1147 /* move used.prev up beyond those that are finished */ 1155 /* move used.prev up beyond those that are finished */
1148 if (&desc->node == ioat_chan->used_desc.next) 1156 if (&desc->node == ioat->used_desc.next)
1149 ioat_chan->used_desc.prev = NULL; 1157 ioat->used_desc.prev = NULL;
1150 else 1158 else
1151 ioat_chan->used_desc.prev = &desc->node; 1159 ioat->used_desc.prev = &desc->node;
1152 } 1160 }
1153 break; 1161 break;
1154 } 1162 }
1155 1163
1156 spin_unlock_bh(&ioat_chan->desc_lock); 1164 spin_unlock_bh(&ioat->desc_lock);
1157 1165
1158 ioat_chan->last_completion = phys_complete; 1166 chan->last_completion = phys_complete;
1159 if (cookie != 0) 1167 if (cookie != 0)
1160 ioat_chan->completed_cookie = cookie; 1168 chan->completed_cookie = cookie;
1161 1169
1162 spin_unlock_bh(&ioat_chan->cleanup_lock); 1170 spin_unlock_bh(&chan->cleanup_lock);
1163} 1171}
1164 1172
1165/** 1173/**
@@ -1170,17 +1178,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1170 * @used: if not %NULL, updated with last used transaction 1178 * @used: if not %NULL, updated with last used transaction
1171 */ 1179 */
1172static enum dma_status 1180static enum dma_status
1173ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, 1181ioat_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
1174 dma_cookie_t *done, dma_cookie_t *used) 1182 dma_cookie_t *done, dma_cookie_t *used)
1175{ 1183{
1176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 1184 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1185 struct ioat_chan_common *chan = &ioat->base;
1177 dma_cookie_t last_used; 1186 dma_cookie_t last_used;
1178 dma_cookie_t last_complete; 1187 dma_cookie_t last_complete;
1179 enum dma_status ret; 1188 enum dma_status ret;
1180 1189
1181 last_used = chan->cookie; 1190 last_used = c->cookie;
1182 last_complete = ioat_chan->completed_cookie; 1191 last_complete = chan->completed_cookie;
1183 ioat_chan->watchdog_tcp_cookie = cookie; 1192 chan->watchdog_tcp_cookie = cookie;
1184 1193
1185 if (done) 1194 if (done)
1186 *done = last_complete; 1195 *done = last_complete;
@@ -1191,10 +1200,10 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie,
1191 if (ret == DMA_SUCCESS) 1200 if (ret == DMA_SUCCESS)
1192 return ret; 1201 return ret;
1193 1202
1194 ioat_dma_memcpy_cleanup(ioat_chan); 1203 ioat_dma_memcpy_cleanup(ioat);
1195 1204
1196 last_used = chan->cookie; 1205 last_used = c->cookie;
1197 last_complete = ioat_chan->completed_cookie; 1206 last_complete = chan->completed_cookie;
1198 1207
1199 if (done) 1208 if (done)
1200 *done = last_complete; 1209 *done = last_complete;
@@ -1204,19 +1213,20 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie,
1204 return dma_async_is_complete(cookie, last_complete, last_used); 1213 return dma_async_is_complete(cookie, last_complete, last_used);
1205} 1214}
1206 1215
1207static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) 1216static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat)
1208{ 1217{
1218 struct ioat_chan_common *chan = &ioat->base;
1209 struct ioat_desc_sw *desc; 1219 struct ioat_desc_sw *desc;
1210 struct ioat_dma_descriptor *hw; 1220 struct ioat_dma_descriptor *hw;
1211 1221
1212 spin_lock_bh(&ioat_chan->desc_lock); 1222 spin_lock_bh(&ioat->desc_lock);
1213 1223
1214 desc = ioat_dma_get_next_descriptor(ioat_chan); 1224 desc = ioat_dma_get_next_descriptor(ioat);
1215 1225
1216 if (!desc) { 1226 if (!desc) {
1217 dev_err(to_dev(ioat_chan), 1227 dev_err(to_dev(chan),
1218 "Unable to start null desc - get next desc failed\n"); 1228 "Unable to start null desc - get next desc failed\n");
1219 spin_unlock_bh(&ioat_chan->desc_lock); 1229 spin_unlock_bh(&ioat->desc_lock);
1220 return; 1230 return;
1221 } 1231 }
1222 1232
@@ -1230,31 +1240,31 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1230 hw->src_addr = 0; 1240 hw->src_addr = 0;
1231 hw->dst_addr = 0; 1241 hw->dst_addr = 0;
1232 async_tx_ack(&desc->txd); 1242 async_tx_ack(&desc->txd);
1233 switch (ioat_chan->device->version) { 1243 switch (chan->device->version) {
1234 case IOAT_VER_1_2: 1244 case IOAT_VER_1_2:
1235 hw->next = 0; 1245 hw->next = 0;
1236 list_add_tail(&desc->node, &ioat_chan->used_desc); 1246 list_add_tail(&desc->node, &ioat->used_desc);
1237 1247
1238 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 1248 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
1239 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 1249 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1240 writel(((u64) desc->txd.phys) >> 32, 1250 writel(((u64) desc->txd.phys) >> 32,
1241 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); 1251 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1242 1252
1243 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base 1253 writeb(IOAT_CHANCMD_START, chan->reg_base
1244 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 1254 + IOAT_CHANCMD_OFFSET(chan->device->version));
1245 break; 1255 break;
1246 case IOAT_VER_2_0: 1256 case IOAT_VER_2_0:
1247 case IOAT_VER_3_0: 1257 case IOAT_VER_3_0:
1248 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 1258 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
1249 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 1259 chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1250 writel(((u64) desc->txd.phys) >> 32, 1260 writel(((u64) desc->txd.phys) >> 32,
1251 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 1261 chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1252 1262
1253 ioat_chan->dmacount++; 1263 ioat->dmacount++;
1254 __ioat2_dma_memcpy_issue_pending(ioat_chan); 1264 __ioat2_dma_memcpy_issue_pending(ioat);
1255 break; 1265 break;
1256 } 1266 }
1257 spin_unlock_bh(&ioat_chan->desc_lock); 1267 spin_unlock_bh(&ioat->desc_lock);
1258} 1268}
1259 1269
1260/* 1270/*
@@ -1371,7 +1381,7 @@ MODULE_PARM_DESC(ioat_interrupt_style,
1371 */ 1381 */
1372static int ioat_dma_setup_interrupts(struct ioatdma_device *device) 1382static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1373{ 1383{
1374 struct ioat_dma_chan *ioat_chan; 1384 struct ioat_chan_common *chan;
1375 struct pci_dev *pdev = device->pdev; 1385 struct pci_dev *pdev = device->pdev;
1376 struct device *dev = &pdev->dev; 1386 struct device *dev = &pdev->dev;
1377 struct msix_entry *msix; 1387 struct msix_entry *msix;
@@ -1404,15 +1414,15 @@ msix:
1404 1414
1405 for (i = 0; i < msixcnt; i++) { 1415 for (i = 0; i < msixcnt; i++) {
1406 msix = &device->msix_entries[i]; 1416 msix = &device->msix_entries[i];
1407 ioat_chan = ioat_chan_by_index(device, i); 1417 chan = ioat_chan_by_index(device, i);
1408 err = devm_request_irq(dev, msix->vector, 1418 err = devm_request_irq(dev, msix->vector,
1409 ioat_dma_do_interrupt_msix, 0, 1419 ioat_dma_do_interrupt_msix, 0,
1410 "ioat-msix", ioat_chan); 1420 "ioat-msix", chan);
1411 if (err) { 1421 if (err) {
1412 for (j = 0; j < i; j++) { 1422 for (j = 0; j < i; j++) {
1413 msix = &device->msix_entries[j]; 1423 msix = &device->msix_entries[j];
1414 ioat_chan = ioat_chan_by_index(device, j); 1424 chan = ioat_chan_by_index(device, j);
1415 devm_free_irq(dev, msix->vector, ioat_chan); 1425 devm_free_irq(dev, msix->vector, chan);
1416 } 1426 }
1417 goto msix_single_vector; 1427 goto msix_single_vector;
1418 } 1428 }
@@ -1594,8 +1604,8 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
1594{ 1604{
1595 struct pci_dev *pdev = device->pdev; 1605 struct pci_dev *pdev = device->pdev;
1596 struct dma_device *dma; 1606 struct dma_device *dma;
1597 struct dma_chan *chan; 1607 struct dma_chan *c;
1598 struct ioat_dma_chan *ioat_chan; 1608 struct ioat_chan_common *chan;
1599 int err; 1609 int err;
1600 1610
1601 dma = &device->common; 1611 dma = &device->common;
@@ -1607,10 +1617,10 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
1607 return err; 1617 return err;
1608 ioat_set_tcp_copy_break(2048); 1618 ioat_set_tcp_copy_break(2048);
1609 1619
1610 list_for_each_entry(chan, &dma->channels, device_node) { 1620 list_for_each_entry(c, &dma->channels, device_node) {
1611 ioat_chan = to_ioat_chan(chan); 1621 chan = to_chan_common(c);
1612 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, 1622 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
1613 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 1623 chan->reg_base + IOAT_DCACTRL_OFFSET);
1614 } 1624 }
1615 1625
1616 err = ioat_register(device); 1626 err = ioat_register(device);
@@ -1629,8 +1639,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1629{ 1639{
1630 struct pci_dev *pdev = device->pdev; 1640 struct pci_dev *pdev = device->pdev;
1631 struct dma_device *dma; 1641 struct dma_device *dma;
1632 struct dma_chan *chan; 1642 struct dma_chan *c;
1633 struct ioat_dma_chan *ioat_chan; 1643 struct ioat_chan_common *chan;
1634 int err; 1644 int err;
1635 u16 dev_id; 1645 u16 dev_id;
1636 1646
@@ -1656,10 +1666,10 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1656 return err; 1666 return err;
1657 ioat_set_tcp_copy_break(262144); 1667 ioat_set_tcp_copy_break(262144);
1658 1668
1659 list_for_each_entry(chan, &dma->channels, device_node) { 1669 list_for_each_entry(c, &dma->channels, device_node) {
1660 ioat_chan = to_ioat_chan(chan); 1670 chan = to_chan_common(c);
1661 writel(IOAT_DMA_DCA_ANY_CPU, 1671 writel(IOAT_DMA_DCA_ANY_CPU,
1662 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 1672 chan->reg_base + IOAT_DCACTRL_OFFSET);
1663 } 1673 }
1664 1674
1665 err = ioat_register(device); 1675 err = ioat_register(device);
@@ -1673,8 +1683,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1673 1683
1674void ioat_dma_remove(struct ioatdma_device *device) 1684void ioat_dma_remove(struct ioatdma_device *device)
1675{ 1685{
1676 struct dma_chan *chan, *_chan;
1677 struct ioat_dma_chan *ioat_chan;
1678 struct dma_device *dma = &device->common; 1686 struct dma_device *dma = &device->common;
1679 1687
1680 if (device->version != IOAT_VER_3_0) 1688 if (device->version != IOAT_VER_3_0)
@@ -1687,9 +1695,6 @@ void ioat_dma_remove(struct ioatdma_device *device)
1687 pci_pool_destroy(device->dma_pool); 1695 pci_pool_destroy(device->dma_pool);
1688 pci_pool_destroy(device->completion_pool); 1696 pci_pool_destroy(device->completion_pool);
1689 1697
1690 list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) { 1698 INIT_LIST_HEAD(&dma->channels);
1691 ioat_chan = to_ioat_chan(chan);
1692 list_del(&chan->device_node);
1693 }
1694} 1699}
1695 1700
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 9f0c853b6a77..5b31db73ad8e 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -35,7 +35,6 @@
35#define IOAT_DMA_DCA_ANY_CPU ~0 35#define IOAT_DMA_DCA_ANY_CPU ~0
36#define IOAT_WATCHDOG_PERIOD (2 * HZ) 36#define IOAT_WATCHDOG_PERIOD (2 * HZ)
37 37
38#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
39#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) 38#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
40#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 39#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
41#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) 40#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
@@ -74,37 +73,24 @@ struct ioatdma_device {
74 u8 version; 73 u8 version;
75 struct delayed_work work; 74 struct delayed_work work;
76 struct msix_entry msix_entries[4]; 75 struct msix_entry msix_entries[4];
77 struct ioat_dma_chan *idx[4]; 76 struct ioat_chan_common *idx[4];
78 struct dca_provider *dca; 77 struct dca_provider *dca;
79 void (*intr_quirk)(struct ioatdma_device *device); 78 void (*intr_quirk)(struct ioatdma_device *device);
80}; 79};
81 80
82/** 81struct ioat_chan_common {
83 * struct ioat_dma_chan - internal representation of a DMA channel
84 */
85struct ioat_dma_chan {
86
87 void __iomem *reg_base; 82 void __iomem *reg_base;
88 83
89 dma_cookie_t completed_cookie;
90 unsigned long last_completion; 84 unsigned long last_completion;
91 unsigned long last_completion_time; 85 unsigned long last_completion_time;
92 86
93 size_t xfercap; /* XFERCAP register value expanded out */
94
95 spinlock_t cleanup_lock; 87 spinlock_t cleanup_lock;
96 spinlock_t desc_lock; 88 dma_cookie_t completed_cookie;
97 struct list_head free_desc;
98 struct list_head used_desc;
99 unsigned long watchdog_completion; 89 unsigned long watchdog_completion;
100 int watchdog_tcp_cookie; 90 int watchdog_tcp_cookie;
101 u32 watchdog_last_tcp_cookie; 91 u32 watchdog_last_tcp_cookie;
102 struct delayed_work work; 92 struct delayed_work work;
103 93
104 int pending;
105 u16 dmacount;
106 u16 desccount;
107
108 struct ioatdma_device *device; 94 struct ioatdma_device *device;
109 struct dma_chan common; 95 struct dma_chan common;
110 96
@@ -120,6 +106,35 @@ struct ioat_dma_chan {
120 struct tasklet_struct cleanup_task; 106 struct tasklet_struct cleanup_task;
121}; 107};
122 108
109/**
110 * struct ioat_dma_chan - internal representation of a DMA channel
111 */
112struct ioat_dma_chan {
113 struct ioat_chan_common base;
114
115 size_t xfercap; /* XFERCAP register value expanded out */
116
117 spinlock_t desc_lock;
118 struct list_head free_desc;
119 struct list_head used_desc;
120
121 int pending;
122 u16 dmacount;
123 u16 desccount;
124};
125
126static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
127{
128 return container_of(c, struct ioat_chan_common, common);
129}
130
131static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
132{
133 struct ioat_chan_common *chan = to_chan_common(c);
134
135 return container_of(chan, struct ioat_dma_chan, base);
136}
137
123/* wrapper around hardware descriptor format + additional software fields */ 138/* wrapper around hardware descriptor format + additional software fields */
124 139
125/** 140/**