diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:52:57 -0400 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:52:57 -0400 |
| commit | e12c4fa377ffda2490476caae17f24daaf9c9bd7 (patch) | |
| tree | aa7a19d8f7f931709a8a835a4b0d441f83d759fd /drivers/dma | |
| parent | a348a7e6fdbcd2d5192a09719a479bb238fde727 (diff) | |
| parent | 4b652f0db3be891c7b76b109c3b55003b920fc96 (diff) | |
Merge branch 'ioat' into dmaengine
Diffstat (limited to 'drivers/dma')
| -rw-r--r-- | drivers/dma/Makefile | 3 | ||||
| -rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
| -rw-r--r-- | drivers/dma/ioat/dca.c (renamed from drivers/dma/ioat_dca.c) | 13 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma.c | 1139 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma.h | 306 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v2.c | 881 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v2.h | 146 | ||||
| -rw-r--r-- | drivers/dma/ioat/hw.h (renamed from drivers/dma/ioatdma_hw.h) | 45 | ||||
| -rw-r--r-- | drivers/dma/ioat/pci.c (renamed from drivers/dma/ioat.c) | 147 | ||||
| -rw-r--r-- | drivers/dma/ioat/registers.h (renamed from drivers/dma/ioatdma_registers.h) | 34 | ||||
| -rw-r--r-- | drivers/dma/ioat_dma.c | 1741 | ||||
| -rw-r--r-- | drivers/dma/ioatdma.h | 165 | ||||
| -rw-r--r-- | drivers/dma/iovlock.c | 10 |
13 files changed, 2598 insertions, 2034 deletions
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 40e1e008357..8f115e93b4a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
| 2 | obj-$(CONFIG_NET_DMA) += iovlock.o | 2 | obj-$(CONFIG_NET_DMA) += iovlock.o |
| 3 | obj-$(CONFIG_DMATEST) += dmatest.o | 3 | obj-$(CONFIG_DMATEST) += dmatest.o |
| 4 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 4 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
| 5 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | ||
| 6 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
| 7 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 6 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
| 8 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 7 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile new file mode 100644 index 00000000000..205a639e84d --- /dev/null +++ b/drivers/dma/ioat/Makefile | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | ||
| 2 | ioatdma-objs := pci.o dma.o dma_v2.o dca.o | ||
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat/dca.c index c012a1e1504..69d02615c4d 100644 --- a/drivers/dma/ioat_dca.c +++ b/drivers/dma/ioat/dca.c | |||
| @@ -33,8 +33,8 @@ | |||
| 33 | #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) | 33 | #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) |
| 34 | #endif | 34 | #endif |
| 35 | 35 | ||
| 36 | #include "ioatdma.h" | 36 | #include "dma.h" |
| 37 | #include "ioatdma_registers.h" | 37 | #include "registers.h" |
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| 40 | * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 | 40 | * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 |
| @@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = { | |||
| 242 | }; | 242 | }; |
| 243 | 243 | ||
| 244 | 244 | ||
| 245 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 245 | struct dca_provider * __devinit |
| 246 | ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
| 246 | { | 247 | { |
| 247 | struct dca_provider *dca; | 248 | struct dca_provider *dca; |
| 248 | struct ioat_dca_priv *ioatdca; | 249 | struct ioat_dca_priv *ioatdca; |
| @@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) | |||
| 407 | return slots; | 408 | return slots; |
| 408 | } | 409 | } |
| 409 | 410 | ||
| 410 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 411 | struct dca_provider * __devinit |
| 412 | ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
| 411 | { | 413 | { |
| 412 | struct dca_provider *dca; | 414 | struct dca_provider *dca; |
| 413 | struct ioat_dca_priv *ioatdca; | 415 | struct ioat_dca_priv *ioatdca; |
| @@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) | |||
| 602 | return slots; | 604 | return slots; |
| 603 | } | 605 | } |
| 604 | 606 | ||
| 605 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 607 | struct dca_provider * __devinit |
| 608 | ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
| 606 | { | 609 | { |
| 607 | struct dca_provider *dca; | 610 | struct dca_provider *dca; |
| 608 | struct ioat_dca_priv *ioatdca; | 611 | struct ioat_dca_priv *ioatdca; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c new file mode 100644 index 00000000000..17a518d0386 --- /dev/null +++ b/drivers/dma/ioat/dma.c | |||
| @@ -0,0 +1,1139 @@ | |||
| 1 | /* | ||
| 2 | * Intel I/OAT DMA Linux driver | ||
| 3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | ||
| 19 | * the file called "COPYING". | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | /* | ||
| 24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
| 25 | * copy operations. | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/module.h> | ||
| 30 | #include <linux/pci.h> | ||
| 31 | #include <linux/interrupt.h> | ||
| 32 | #include <linux/dmaengine.h> | ||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/dma-mapping.h> | ||
| 35 | #include <linux/workqueue.h> | ||
| 36 | #include <linux/i7300_idle.h> | ||
| 37 | #include "dma.h" | ||
| 38 | #include "registers.h" | ||
| 39 | #include "hw.h" | ||
| 40 | |||
| 41 | int ioat_pending_level = 4; | ||
| 42 | module_param(ioat_pending_level, int, 0644); | ||
| 43 | MODULE_PARM_DESC(ioat_pending_level, | ||
| 44 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
| 45 | |||
| 46 | /* internal functions */ | ||
| 47 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); | ||
| 48 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | ||
| 49 | |||
| 50 | /** | ||
| 51 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | ||
| 52 | * @irq: interrupt id | ||
| 53 | * @data: interrupt data | ||
| 54 | */ | ||
| 55 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | ||
| 56 | { | ||
| 57 | struct ioatdma_device *instance = data; | ||
| 58 | struct ioat_chan_common *chan; | ||
| 59 | unsigned long attnstatus; | ||
| 60 | int bit; | ||
| 61 | u8 intrctrl; | ||
| 62 | |||
| 63 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 64 | |||
| 65 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
| 66 | return IRQ_NONE; | ||
| 67 | |||
| 68 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
| 69 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 70 | return IRQ_NONE; | ||
| 71 | } | ||
| 72 | |||
| 73 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
| 74 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | ||
| 75 | chan = ioat_chan_by_index(instance, bit); | ||
| 76 | tasklet_schedule(&chan->cleanup_task); | ||
| 77 | } | ||
| 78 | |||
| 79 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 80 | return IRQ_HANDLED; | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 84 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | ||
| 85 | * @irq: interrupt id | ||
| 86 | * @data: interrupt data | ||
| 87 | */ | ||
| 88 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||
| 89 | { | ||
| 90 | struct ioat_chan_common *chan = data; | ||
| 91 | |||
| 92 | tasklet_schedule(&chan->cleanup_task); | ||
| 93 | |||
| 94 | return IRQ_HANDLED; | ||
| 95 | } | ||
| 96 | |||
| 97 | static void ioat1_cleanup_tasklet(unsigned long data); | ||
| 98 | |||
| 99 | /* common channel initialization */ | ||
| 100 | void ioat_init_channel(struct ioatdma_device *device, | ||
| 101 | struct ioat_chan_common *chan, int idx, | ||
| 102 | void (*timer_fn)(unsigned long), | ||
| 103 | void (*tasklet)(unsigned long), | ||
| 104 | unsigned long ioat) | ||
| 105 | { | ||
| 106 | struct dma_device *dma = &device->common; | ||
| 107 | |||
| 108 | chan->device = device; | ||
| 109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | ||
| 110 | spin_lock_init(&chan->cleanup_lock); | ||
| 111 | chan->common.device = dma; | ||
| 112 | list_add_tail(&chan->common.device_node, &dma->channels); | ||
| 113 | device->idx[idx] = chan; | ||
| 114 | init_timer(&chan->timer); | ||
| 115 | chan->timer.function = timer_fn; | ||
| 116 | chan->timer.data = ioat; | ||
| 117 | tasklet_init(&chan->cleanup_task, tasklet, ioat); | ||
| 118 | tasklet_disable(&chan->cleanup_task); | ||
| 119 | } | ||
| 120 | |||
| 121 | static void ioat1_timer_event(unsigned long data); | ||
| 122 | |||
| 123 | /** | ||
| 124 | * ioat1_dma_enumerate_channels - find and initialize the device's channels | ||
| 125 | * @device: the device to be enumerated | ||
| 126 | */ | ||
| 127 | static int ioat1_enumerate_channels(struct ioatdma_device *device) | ||
| 128 | { | ||
| 129 | u8 xfercap_scale; | ||
| 130 | u32 xfercap; | ||
| 131 | int i; | ||
| 132 | struct ioat_dma_chan *ioat; | ||
| 133 | struct device *dev = &device->pdev->dev; | ||
| 134 | struct dma_device *dma = &device->common; | ||
| 135 | |||
| 136 | INIT_LIST_HEAD(&dma->channels); | ||
| 137 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
| 138 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
| 139 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | ||
| 140 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
| 141 | dma->chancnt, ARRAY_SIZE(device->idx)); | ||
| 142 | dma->chancnt = ARRAY_SIZE(device->idx); | ||
| 143 | } | ||
| 144 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
| 145 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ | ||
| 146 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | ||
| 147 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); | ||
| 148 | |||
| 149 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
| 150 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
| 151 | dma->chancnt--; | ||
| 152 | #endif | ||
| 153 | for (i = 0; i < dma->chancnt; i++) { | ||
| 154 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | ||
| 155 | if (!ioat) | ||
| 156 | break; | ||
| 157 | |||
| 158 | ioat_init_channel(device, &ioat->base, i, | ||
| 159 | ioat1_timer_event, | ||
| 160 | ioat1_cleanup_tasklet, | ||
| 161 | (unsigned long) ioat); | ||
| 162 | ioat->xfercap = xfercap; | ||
| 163 | spin_lock_init(&ioat->desc_lock); | ||
| 164 | INIT_LIST_HEAD(&ioat->free_desc); | ||
| 165 | INIT_LIST_HEAD(&ioat->used_desc); | ||
| 166 | } | ||
| 167 | dma->chancnt = i; | ||
| 168 | return i; | ||
| 169 | } | ||
| 170 | |||
| 171 | /** | ||
| 172 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
| 173 | * descriptors to hw | ||
| 174 | * @chan: DMA channel handle | ||
| 175 | */ | ||
| 176 | static inline void | ||
| 177 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) | ||
| 178 | { | ||
| 179 | void __iomem *reg_base = ioat->base.reg_base; | ||
| 180 | |||
| 181 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", | ||
| 182 | __func__, ioat->pending); | ||
| 183 | ioat->pending = 0; | ||
| 184 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | ||
| 185 | } | ||
| 186 | |||
| 187 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
| 188 | { | ||
| 189 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); | ||
| 190 | |||
| 191 | if (ioat->pending > 0) { | ||
| 192 | spin_lock_bh(&ioat->desc_lock); | ||
| 193 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
| 194 | spin_unlock_bh(&ioat->desc_lock); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | |||
| 198 | /** | ||
| 199 | * ioat1_reset_channel - restart a channel | ||
| 200 | * @ioat: IOAT DMA channel handle | ||
| 201 | */ | ||
| 202 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | ||
| 203 | { | ||
| 204 | struct ioat_chan_common *chan = &ioat->base; | ||
| 205 | void __iomem *reg_base = chan->reg_base; | ||
| 206 | u32 chansts, chanerr; | ||
| 207 | |||
| 208 | dev_warn(to_dev(chan), "reset\n"); | ||
| 209 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); | ||
| 210 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; | ||
| 211 | if (chanerr) { | ||
| 212 | dev_err(to_dev(chan), | ||
| 213 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
| 214 | chan_num(chan), chansts, chanerr); | ||
| 215 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | ||
| 216 | } | ||
| 217 | |||
| 218 | /* | ||
| 219 | * whack it upside the head with a reset | ||
| 220 | * and wait for things to settle out. | ||
| 221 | * force the pending count to a really big negative | ||
| 222 | * to make sure no one forces an issue_pending | ||
| 223 | * while we're waiting. | ||
| 224 | */ | ||
| 225 | |||
| 226 | ioat->pending = INT_MIN; | ||
| 227 | writeb(IOAT_CHANCMD_RESET, | ||
| 228 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
| 229 | set_bit(IOAT_RESET_PENDING, &chan->state); | ||
| 230 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | ||
| 231 | } | ||
| 232 | |||
| 233 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 234 | { | ||
| 235 | struct dma_chan *c = tx->chan; | ||
| 236 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
| 237 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); | ||
| 238 | struct ioat_chan_common *chan = &ioat->base; | ||
| 239 | struct ioat_desc_sw *first; | ||
| 240 | struct ioat_desc_sw *chain_tail; | ||
| 241 | dma_cookie_t cookie; | ||
| 242 | |||
| 243 | spin_lock_bh(&ioat->desc_lock); | ||
| 244 | /* cookie incr and addition to used_list must be atomic */ | ||
| 245 | cookie = c->cookie; | ||
| 246 | cookie++; | ||
| 247 | if (cookie < 0) | ||
| 248 | cookie = 1; | ||
| 249 | c->cookie = cookie; | ||
| 250 | tx->cookie = cookie; | ||
| 251 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
| 252 | |||
| 253 | /* write address into NextDescriptor field of last desc in chain */ | ||
| 254 | first = to_ioat_desc(tx->tx_list.next); | ||
| 255 | chain_tail = to_ioat_desc(ioat->used_desc.prev); | ||
| 256 | /* make descriptor updates globally visible before chaining */ | ||
| 257 | wmb(); | ||
| 258 | chain_tail->hw->next = first->txd.phys; | ||
| 259 | list_splice_tail_init(&tx->tx_list, &ioat->used_desc); | ||
| 260 | dump_desc_dbg(ioat, chain_tail); | ||
| 261 | dump_desc_dbg(ioat, first); | ||
| 262 | |||
| 263 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
| 264 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 265 | |||
| 266 | ioat->pending += desc->hw->tx_cnt; | ||
| 267 | if (ioat->pending >= ioat_pending_level) | ||
| 268 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
| 269 | spin_unlock_bh(&ioat->desc_lock); | ||
| 270 | |||
| 271 | return cookie; | ||
| 272 | } | ||
| 273 | |||
| 274 | /** | ||
| 275 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | ||
| 276 | * @ioat: the channel supplying the memory pool for the descriptors | ||
| 277 | * @flags: allocation flags | ||
| 278 | */ | ||
| 279 | static struct ioat_desc_sw * | ||
| 280 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) | ||
| 281 | { | ||
| 282 | struct ioat_dma_descriptor *desc; | ||
| 283 | struct ioat_desc_sw *desc_sw; | ||
| 284 | struct ioatdma_device *ioatdma_device; | ||
| 285 | dma_addr_t phys; | ||
| 286 | |||
| 287 | ioatdma_device = ioat->base.device; | ||
| 288 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | ||
| 289 | if (unlikely(!desc)) | ||
| 290 | return NULL; | ||
| 291 | |||
| 292 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | ||
| 293 | if (unlikely(!desc_sw)) { | ||
| 294 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | ||
| 295 | return NULL; | ||
| 296 | } | ||
| 297 | |||
| 298 | memset(desc, 0, sizeof(*desc)); | ||
| 299 | |||
| 300 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); | ||
| 301 | desc_sw->txd.tx_submit = ioat1_tx_submit; | ||
| 302 | desc_sw->hw = desc; | ||
| 303 | desc_sw->txd.phys = phys; | ||
| 304 | set_desc_id(desc_sw, -1); | ||
| 305 | |||
| 306 | return desc_sw; | ||
| 307 | } | ||
| 308 | |||
| 309 | static int ioat_initial_desc_count = 256; | ||
| 310 | module_param(ioat_initial_desc_count, int, 0644); | ||
| 311 | MODULE_PARM_DESC(ioat_initial_desc_count, | ||
| 312 | "ioat1: initial descriptors per channel (default: 256)"); | ||
| 313 | /** | ||
| 314 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors | ||
| 315 | * @chan: the channel to be filled out | ||
| 316 | */ | ||
| 317 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) | ||
| 318 | { | ||
| 319 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
| 320 | struct ioat_chan_common *chan = &ioat->base; | ||
| 321 | struct ioat_desc_sw *desc; | ||
| 322 | u32 chanerr; | ||
| 323 | int i; | ||
| 324 | LIST_HEAD(tmp_list); | ||
| 325 | |||
| 326 | /* have we already been set up? */ | ||
| 327 | if (!list_empty(&ioat->free_desc)) | ||
| 328 | return ioat->desccount; | ||
| 329 | |||
| 330 | /* Setup register to interrupt and write completion status on error */ | ||
| 331 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 332 | |||
| 333 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 334 | if (chanerr) { | ||
| 335 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
| 336 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 337 | } | ||
| 338 | |||
| 339 | /* Allocate descriptors */ | ||
| 340 | for (i = 0; i < ioat_initial_desc_count; i++) { | ||
| 341 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); | ||
| 342 | if (!desc) { | ||
| 343 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); | ||
| 344 | break; | ||
| 345 | } | ||
| 346 | set_desc_id(desc, i); | ||
| 347 | list_add_tail(&desc->node, &tmp_list); | ||
| 348 | } | ||
| 349 | spin_lock_bh(&ioat->desc_lock); | ||
| 350 | ioat->desccount = i; | ||
| 351 | list_splice(&tmp_list, &ioat->free_desc); | ||
| 352 | spin_unlock_bh(&ioat->desc_lock); | ||
| 353 | |||
| 354 | /* allocate a completion writeback area */ | ||
| 355 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
| 356 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
| 357 | GFP_KERNEL, &chan->completion_dma); | ||
| 358 | memset(chan->completion, 0, sizeof(*chan->completion)); | ||
| 359 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | ||
| 360 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
| 361 | writel(((u64) chan->completion_dma) >> 32, | ||
| 362 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
| 363 | |||
| 364 | tasklet_enable(&chan->cleanup_task); | ||
| 365 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ | ||
| 366 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
| 367 | __func__, ioat->desccount); | ||
| 368 | return ioat->desccount; | ||
| 369 | } | ||
| 370 | |||
| 371 | /** | ||
| 372 | * ioat1_dma_free_chan_resources - release all the descriptors | ||
| 373 | * @chan: the channel to be cleaned | ||
| 374 | */ | ||
| 375 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) | ||
| 376 | { | ||
| 377 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
| 378 | struct ioat_chan_common *chan = &ioat->base; | ||
| 379 | struct ioatdma_device *ioatdma_device = chan->device; | ||
| 380 | struct ioat_desc_sw *desc, *_desc; | ||
| 381 | int in_use_descs = 0; | ||
| 382 | |||
| 383 | /* Before freeing channel resources first check | ||
| 384 | * if they have been previously allocated for this channel. | ||
| 385 | */ | ||
| 386 | if (ioat->desccount == 0) | ||
| 387 | return; | ||
| 388 | |||
| 389 | tasklet_disable(&chan->cleanup_task); | ||
| 390 | del_timer_sync(&chan->timer); | ||
| 391 | ioat1_cleanup(ioat); | ||
| 392 | |||
| 393 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
| 394 | * before removing DMA descriptor resources. | ||
| 395 | */ | ||
| 396 | writeb(IOAT_CHANCMD_RESET, | ||
| 397 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
| 398 | mdelay(100); | ||
| 399 | |||
| 400 | spin_lock_bh(&ioat->desc_lock); | ||
| 401 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { | ||
| 402 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | ||
| 403 | __func__, desc_id(desc)); | ||
| 404 | dump_desc_dbg(ioat, desc); | ||
| 405 | in_use_descs++; | ||
| 406 | list_del(&desc->node); | ||
| 407 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
| 408 | desc->txd.phys); | ||
| 409 | kfree(desc); | ||
| 410 | } | ||
| 411 | list_for_each_entry_safe(desc, _desc, | ||
| 412 | &ioat->free_desc, node) { | ||
| 413 | list_del(&desc->node); | ||
| 414 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
| 415 | desc->txd.phys); | ||
| 416 | kfree(desc); | ||
| 417 | } | ||
| 418 | spin_unlock_bh(&ioat->desc_lock); | ||
| 419 | |||
| 420 | pci_pool_free(ioatdma_device->completion_pool, | ||
| 421 | chan->completion, | ||
| 422 | chan->completion_dma); | ||
| 423 | |||
| 424 | /* one is ok since we left it on there on purpose */ | ||
| 425 | if (in_use_descs > 1) | ||
| 426 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | ||
| 427 | in_use_descs - 1); | ||
| 428 | |||
| 429 | chan->last_completion = 0; | ||
| 430 | chan->completion_dma = 0; | ||
| 431 | ioat->pending = 0; | ||
| 432 | ioat->desccount = 0; | ||
| 433 | } | ||
| 434 | |||
| 435 | /** | ||
| 436 | * ioat1_dma_get_next_descriptor - return the next available descriptor | ||
| 437 | * @ioat: IOAT DMA channel handle | ||
| 438 | * | ||
| 439 | * Gets the next descriptor from the chain, and must be called with the | ||
| 440 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
| 441 | * has run out. | ||
| 442 | */ | ||
| 443 | static struct ioat_desc_sw * | ||
| 444 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) | ||
| 445 | { | ||
| 446 | struct ioat_desc_sw *new; | ||
| 447 | |||
| 448 | if (!list_empty(&ioat->free_desc)) { | ||
| 449 | new = to_ioat_desc(ioat->free_desc.next); | ||
| 450 | list_del(&new->node); | ||
| 451 | } else { | ||
| 452 | /* try to get another desc */ | ||
| 453 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); | ||
| 454 | if (!new) { | ||
| 455 | dev_err(to_dev(&ioat->base), "alloc failed\n"); | ||
| 456 | return NULL; | ||
| 457 | } | ||
| 458 | } | ||
| 459 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", | ||
| 460 | __func__, desc_id(new)); | ||
| 461 | prefetch(new->hw); | ||
| 462 | return new; | ||
| 463 | } | ||
| 464 | |||
| 465 | static struct dma_async_tx_descriptor * | ||
| 466 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | ||
| 467 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
| 468 | { | ||
| 469 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
| 470 | struct ioat_desc_sw *desc; | ||
| 471 | size_t copy; | ||
| 472 | LIST_HEAD(chain); | ||
| 473 | dma_addr_t src = dma_src; | ||
| 474 | dma_addr_t dest = dma_dest; | ||
| 475 | size_t total_len = len; | ||
| 476 | struct ioat_dma_descriptor *hw = NULL; | ||
| 477 | int tx_cnt = 0; | ||
| 478 | |||
| 479 | spin_lock_bh(&ioat->desc_lock); | ||
| 480 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
| 481 | do { | ||
| 482 | if (!desc) | ||
| 483 | break; | ||
| 484 | |||
| 485 | tx_cnt++; | ||
| 486 | copy = min_t(size_t, len, ioat->xfercap); | ||
| 487 | |||
| 488 | hw = desc->hw; | ||
| 489 | hw->size = copy; | ||
| 490 | hw->ctl = 0; | ||
| 491 | hw->src_addr = src; | ||
| 492 | hw->dst_addr = dest; | ||
| 493 | |||
| 494 | list_add_tail(&desc->node, &chain); | ||
| 495 | |||
| 496 | len -= copy; | ||
| 497 | dest += copy; | ||
| 498 | src += copy; | ||
| 499 | if (len) { | ||
| 500 | struct ioat_desc_sw *next; | ||
| 501 | |||
| 502 | async_tx_ack(&desc->txd); | ||
| 503 | next = ioat1_dma_get_next_descriptor(ioat); | ||
| 504 | hw->next = next ? next->txd.phys : 0; | ||
| 505 | dump_desc_dbg(ioat, desc); | ||
| 506 | desc = next; | ||
| 507 | } else | ||
| 508 | hw->next = 0; | ||
| 509 | } while (len); | ||
| 510 | |||
| 511 | if (!desc) { | ||
| 512 | struct ioat_chan_common *chan = &ioat->base; | ||
| 513 | |||
| 514 | dev_err(to_dev(chan), | ||
| 515 | "chan%d - get_next_desc failed\n", chan_num(chan)); | ||
| 516 | list_splice(&chain, &ioat->free_desc); | ||
| 517 | spin_unlock_bh(&ioat->desc_lock); | ||
| 518 | return NULL; | ||
| 519 | } | ||
| 520 | spin_unlock_bh(&ioat->desc_lock); | ||
| 521 | |||
| 522 | desc->txd.flags = flags; | ||
| 523 | desc->len = total_len; | ||
| 524 | list_splice(&chain, &desc->txd.tx_list); | ||
| 525 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
| 526 | hw->ctl_f.compl_write = 1; | ||
| 527 | hw->tx_cnt = tx_cnt; | ||
| 528 | dump_desc_dbg(ioat, desc); | ||
| 529 | |||
| 530 | return &desc->txd; | ||
| 531 | } | ||
| 532 | |||
| 533 | static void ioat1_cleanup_tasklet(unsigned long data) | ||
| 534 | { | ||
| 535 | struct ioat_dma_chan *chan = (void *)data; | ||
| 536 | |||
| 537 | ioat1_cleanup(chan); | ||
| 538 | writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 539 | } | ||
| 540 | |||
| 541 | static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
| 542 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
| 543 | { | ||
| 544 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
| 545 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
| 546 | pci_unmap_single(pdev, addr, len, direction); | ||
| 547 | else | ||
| 548 | pci_unmap_page(pdev, addr, len, direction); | ||
| 549 | } | ||
| 550 | |||
| 551 | |||
| 552 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
| 553 | size_t len, struct ioat_dma_descriptor *hw) | ||
| 554 | { | ||
| 555 | struct pci_dev *pdev = chan->device->pdev; | ||
| 556 | size_t offset = len - hw->size; | ||
| 557 | |||
| 558 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
| 559 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
| 560 | PCI_DMA_FROMDEVICE, flags, 1); | ||
| 561 | |||
| 562 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
| 563 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
| 564 | PCI_DMA_TODEVICE, flags, 0); | ||
| 565 | } | ||
| 566 | |||
| 567 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | ||
| 568 | { | ||
| 569 | unsigned long phys_complete; | ||
| 570 | u64 completion; | ||
| 571 | |||
| 572 | completion = *chan->completion; | ||
| 573 | phys_complete = ioat_chansts_to_addr(completion); | ||
| 574 | |||
| 575 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | ||
| 576 | (unsigned long long) phys_complete); | ||
| 577 | |||
| 578 | if (is_ioat_halted(completion)) { | ||
| 579 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 580 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", | ||
| 581 | chanerr); | ||
| 582 | |||
| 583 | /* TODO do something to salvage the situation */ | ||
| 584 | } | ||
| 585 | |||
| 586 | return phys_complete; | ||
| 587 | } | ||
| 588 | |||
| 589 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||
| 590 | unsigned long *phys_complete) | ||
| 591 | { | ||
| 592 | *phys_complete = ioat_get_current_completion(chan); | ||
| 593 | if (*phys_complete == chan->last_completion) | ||
| 594 | return false; | ||
| 595 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
| 596 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 597 | |||
| 598 | return true; | ||
| 599 | } | ||
| 600 | |||
| 601 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) | ||
| 602 | { | ||
| 603 | struct ioat_chan_common *chan = &ioat->base; | ||
| 604 | struct list_head *_desc, *n; | ||
| 605 | struct dma_async_tx_descriptor *tx; | ||
| 606 | |||
| 607 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", | ||
| 608 | __func__, phys_complete); | ||
| 609 | list_for_each_safe(_desc, n, &ioat->used_desc) { | ||
| 610 | struct ioat_desc_sw *desc; | ||
| 611 | |||
| 612 | prefetch(n); | ||
| 613 | desc = list_entry(_desc, typeof(*desc), node); | ||
| 614 | tx = &desc->txd; | ||
| 615 | /* | ||
| 616 | * Incoming DMA requests may use multiple descriptors, | ||
| 617 | * due to exceeding xfercap, perhaps. If so, only the | ||
| 618 | * last one will have a cookie, and require unmapping. | ||
| 619 | */ | ||
| 620 | dump_desc_dbg(ioat, desc); | ||
| 621 | if (tx->cookie) { | ||
| 622 | chan->completed_cookie = tx->cookie; | ||
| 623 | tx->cookie = 0; | ||
| 624 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
| 625 | if (tx->callback) { | ||
| 626 | tx->callback(tx->callback_param); | ||
| 627 | tx->callback = NULL; | ||
| 628 | } | ||
| 629 | } | ||
| 630 | |||
| 631 | if (tx->phys != phys_complete) { | ||
| 632 | /* | ||
| 633 | * a completed entry, but not the last, so clean | ||
| 634 | * up if the client is done with the descriptor | ||
| 635 | */ | ||
| 636 | if (async_tx_test_ack(tx)) | ||
| 637 | list_move_tail(&desc->node, &ioat->free_desc); | ||
| 638 | } else { | ||
| 639 | /* | ||
| 640 | * last used desc. Do not remove, so we can | ||
| 641 | * append from it. | ||
| 642 | */ | ||
| 643 | |||
| 644 | /* if nothing else is pending, cancel the | ||
| 645 | * completion timeout | ||
| 646 | */ | ||
| 647 | if (n == &ioat->used_desc) { | ||
| 648 | dev_dbg(to_dev(chan), | ||
| 649 | "%s cancel completion timeout\n", | ||
| 650 | __func__); | ||
| 651 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
| 652 | } | ||
| 653 | |||
| 654 | /* TODO check status bits? */ | ||
| 655 | break; | ||
| 656 | } | ||
| 657 | } | ||
| 658 | |||
| 659 | chan->last_completion = phys_complete; | ||
| 660 | } | ||
| 661 | |||
| 662 | /** | ||
| 663 | * ioat1_cleanup - cleanup up finished descriptors | ||
| 664 | * @chan: ioat channel to be cleaned up | ||
| 665 | * | ||
| 666 | * To prevent lock contention we defer cleanup when the locks are | ||
| 667 | * contended with a terminal timeout that forces cleanup and catches | ||
| 668 | * completion notification errors. | ||
| 669 | */ | ||
| 670 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||
| 671 | { | ||
| 672 | struct ioat_chan_common *chan = &ioat->base; | ||
| 673 | unsigned long phys_complete; | ||
| 674 | |||
| 675 | prefetch(chan->completion); | ||
| 676 | |||
| 677 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
| 678 | return; | ||
| 679 | |||
| 680 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
| 681 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 682 | return; | ||
| 683 | } | ||
| 684 | |||
| 685 | if (!spin_trylock_bh(&ioat->desc_lock)) { | ||
| 686 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 687 | return; | ||
| 688 | } | ||
| 689 | |||
| 690 | __cleanup(ioat, phys_complete); | ||
| 691 | |||
| 692 | spin_unlock_bh(&ioat->desc_lock); | ||
| 693 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 694 | } | ||
| 695 | |||
| 696 | static void ioat1_timer_event(unsigned long data) | ||
| 697 | { | ||
| 698 | struct ioat_dma_chan *ioat = (void *) data; | ||
| 699 | struct ioat_chan_common *chan = &ioat->base; | ||
| 700 | |||
| 701 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | ||
| 702 | |||
| 703 | spin_lock_bh(&chan->cleanup_lock); | ||
| 704 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | ||
| 705 | struct ioat_desc_sw *desc; | ||
| 706 | |||
| 707 | spin_lock_bh(&ioat->desc_lock); | ||
| 708 | |||
| 709 | /* restart active descriptors */ | ||
| 710 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
| 711 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
| 712 | ioat_start(chan); | ||
| 713 | |||
| 714 | ioat->pending = 0; | ||
| 715 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
| 716 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 717 | spin_unlock_bh(&ioat->desc_lock); | ||
| 718 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
| 719 | unsigned long phys_complete; | ||
| 720 | |||
| 721 | spin_lock_bh(&ioat->desc_lock); | ||
| 722 | /* if we haven't made progress and we have already | ||
| 723 | * acknowledged a pending completion once, then be more | ||
| 724 | * forceful with a restart | ||
| 725 | */ | ||
| 726 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
| 727 | __cleanup(ioat, phys_complete); | ||
| 728 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
| 729 | ioat1_reset_channel(ioat); | ||
| 730 | else { | ||
| 731 | u64 status = ioat_chansts(chan); | ||
| 732 | |||
| 733 | /* manually update the last completion address */ | ||
| 734 | if (ioat_chansts_to_addr(status) != 0) | ||
| 735 | *chan->completion = status; | ||
| 736 | |||
| 737 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
| 738 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 739 | } | ||
| 740 | spin_unlock_bh(&ioat->desc_lock); | ||
| 741 | } | ||
| 742 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 743 | } | ||
| 744 | |||
| 745 | static enum dma_status | ||
| 746 | ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
| 747 | dma_cookie_t *done, dma_cookie_t *used) | ||
| 748 | { | ||
| 749 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
| 750 | |||
| 751 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
| 752 | return DMA_SUCCESS; | ||
| 753 | |||
| 754 | ioat1_cleanup(ioat); | ||
| 755 | |||
| 756 | return ioat_is_complete(c, cookie, done, used); | ||
| 757 | } | ||
| 758 | |||
| 759 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | ||
| 760 | { | ||
| 761 | struct ioat_chan_common *chan = &ioat->base; | ||
| 762 | struct ioat_desc_sw *desc; | ||
| 763 | struct ioat_dma_descriptor *hw; | ||
| 764 | |||
| 765 | spin_lock_bh(&ioat->desc_lock); | ||
| 766 | |||
| 767 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
| 768 | |||
| 769 | if (!desc) { | ||
| 770 | dev_err(to_dev(chan), | ||
| 771 | "Unable to start null desc - get next desc failed\n"); | ||
| 772 | spin_unlock_bh(&ioat->desc_lock); | ||
| 773 | return; | ||
| 774 | } | ||
| 775 | |||
| 776 | hw = desc->hw; | ||
| 777 | hw->ctl = 0; | ||
| 778 | hw->ctl_f.null = 1; | ||
| 779 | hw->ctl_f.int_en = 1; | ||
| 780 | hw->ctl_f.compl_write = 1; | ||
| 781 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
| 782 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
| 783 | hw->src_addr = 0; | ||
| 784 | hw->dst_addr = 0; | ||
| 785 | async_tx_ack(&desc->txd); | ||
| 786 | hw->next = 0; | ||
| 787 | list_add_tail(&desc->node, &ioat->used_desc); | ||
| 788 | dump_desc_dbg(ioat, desc); | ||
| 789 | |||
| 790 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
| 791 | ioat_start(chan); | ||
| 792 | spin_unlock_bh(&ioat->desc_lock); | ||
| 793 | } | ||
| 794 | |||
| 795 | /* | ||
| 796 | * Perform a IOAT transaction to verify the HW works. | ||
| 797 | */ | ||
| 798 | #define IOAT_TEST_SIZE 2000 | ||
| 799 | |||
| 800 | static void __devinit ioat_dma_test_callback(void *dma_async_param) | ||
| 801 | { | ||
| 802 | struct completion *cmp = dma_async_param; | ||
| 803 | |||
| 804 | complete(cmp); | ||
| 805 | } | ||
| 806 | |||
| 807 | /** | ||
| 808 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
| 809 | * @device: device to be tested | ||
| 810 | */ | ||
| 811 | static int __devinit ioat_dma_self_test(struct ioatdma_device *device) | ||
| 812 | { | ||
| 813 | int i; | ||
| 814 | u8 *src; | ||
| 815 | u8 *dest; | ||
| 816 | struct dma_device *dma = &device->common; | ||
| 817 | struct device *dev = &device->pdev->dev; | ||
| 818 | struct dma_chan *dma_chan; | ||
| 819 | struct dma_async_tx_descriptor *tx; | ||
| 820 | dma_addr_t dma_dest, dma_src; | ||
| 821 | dma_cookie_t cookie; | ||
| 822 | int err = 0; | ||
| 823 | struct completion cmp; | ||
| 824 | unsigned long tmo; | ||
| 825 | unsigned long flags; | ||
| 826 | |||
| 827 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
| 828 | if (!src) | ||
| 829 | return -ENOMEM; | ||
| 830 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
| 831 | if (!dest) { | ||
| 832 | kfree(src); | ||
| 833 | return -ENOMEM; | ||
| 834 | } | ||
| 835 | |||
| 836 | /* Fill in src buffer */ | ||
| 837 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
| 838 | src[i] = (u8)i; | ||
| 839 | |||
| 840 | /* Start copy, using first DMA channel */ | ||
| 841 | dma_chan = container_of(dma->channels.next, struct dma_chan, | ||
| 842 | device_node); | ||
| 843 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | ||
| 844 | dev_err(dev, "selftest cannot allocate chan resource\n"); | ||
| 845 | err = -ENODEV; | ||
| 846 | goto out; | ||
| 847 | } | ||
| 848 | |||
| 849 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
| 850 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
| 851 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | | ||
| 852 | DMA_PREP_INTERRUPT; | ||
| 853 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
| 854 | IOAT_TEST_SIZE, flags); | ||
| 855 | if (!tx) { | ||
| 856 | dev_err(dev, "Self-test prep failed, disabling\n"); | ||
| 857 | err = -ENODEV; | ||
| 858 | goto free_resources; | ||
| 859 | } | ||
| 860 | |||
| 861 | async_tx_ack(tx); | ||
| 862 | init_completion(&cmp); | ||
| 863 | tx->callback = ioat_dma_test_callback; | ||
| 864 | tx->callback_param = &cmp; | ||
| 865 | cookie = tx->tx_submit(tx); | ||
| 866 | if (cookie < 0) { | ||
| 867 | dev_err(dev, "Self-test setup failed, disabling\n"); | ||
| 868 | err = -ENODEV; | ||
| 869 | goto free_resources; | ||
| 870 | } | ||
| 871 | dma->device_issue_pending(dma_chan); | ||
| 872 | |||
| 873 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
| 874 | |||
| 875 | if (tmo == 0 || | ||
| 876 | dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) | ||
| 877 | != DMA_SUCCESS) { | ||
| 878 | dev_err(dev, "Self-test copy timed out, disabling\n"); | ||
| 879 | err = -ENODEV; | ||
| 880 | goto free_resources; | ||
| 881 | } | ||
| 882 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
| 883 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | ||
| 884 | err = -ENODEV; | ||
| 885 | goto free_resources; | ||
| 886 | } | ||
| 887 | |||
| 888 | free_resources: | ||
| 889 | dma->device_free_chan_resources(dma_chan); | ||
| 890 | out: | ||
| 891 | kfree(src); | ||
| 892 | kfree(dest); | ||
| 893 | return err; | ||
| 894 | } | ||
| 895 | |||
| 896 | static char ioat_interrupt_style[32] = "msix"; | ||
| 897 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
| 898 | sizeof(ioat_interrupt_style), 0644); | ||
| 899 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
| 900 | "set ioat interrupt style: msix (default), " | ||
| 901 | "msix-single-vector, msi, intx)"); | ||
| 902 | |||
| 903 | /** | ||
| 904 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
| 905 | * @device: ioat device | ||
| 906 | */ | ||
| 907 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | ||
| 908 | { | ||
| 909 | struct ioat_chan_common *chan; | ||
| 910 | struct pci_dev *pdev = device->pdev; | ||
| 911 | struct device *dev = &pdev->dev; | ||
| 912 | struct msix_entry *msix; | ||
| 913 | int i, j, msixcnt; | ||
| 914 | int err = -EINVAL; | ||
| 915 | u8 intrctrl = 0; | ||
| 916 | |||
| 917 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
| 918 | goto msix; | ||
| 919 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
| 920 | goto msix_single_vector; | ||
| 921 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
| 922 | goto msi; | ||
| 923 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
| 924 | goto intx; | ||
| 925 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); | ||
| 926 | goto err_no_irq; | ||
| 927 | |||
| 928 | msix: | ||
| 929 | /* The number of MSI-X vectors should equal the number of channels */ | ||
| 930 | msixcnt = device->common.chancnt; | ||
| 931 | for (i = 0; i < msixcnt; i++) | ||
| 932 | device->msix_entries[i].entry = i; | ||
| 933 | |||
| 934 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); | ||
| 935 | if (err < 0) | ||
| 936 | goto msi; | ||
| 937 | if (err > 0) | ||
| 938 | goto msix_single_vector; | ||
| 939 | |||
| 940 | for (i = 0; i < msixcnt; i++) { | ||
| 941 | msix = &device->msix_entries[i]; | ||
| 942 | chan = ioat_chan_by_index(device, i); | ||
| 943 | err = devm_request_irq(dev, msix->vector, | ||
| 944 | ioat_dma_do_interrupt_msix, 0, | ||
| 945 | "ioat-msix", chan); | ||
| 946 | if (err) { | ||
| 947 | for (j = 0; j < i; j++) { | ||
| 948 | msix = &device->msix_entries[j]; | ||
| 949 | chan = ioat_chan_by_index(device, j); | ||
| 950 | devm_free_irq(dev, msix->vector, chan); | ||
| 951 | } | ||
| 952 | goto msix_single_vector; | ||
| 953 | } | ||
| 954 | } | ||
| 955 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
| 956 | goto done; | ||
| 957 | |||
| 958 | msix_single_vector: | ||
| 959 | msix = &device->msix_entries[0]; | ||
| 960 | msix->entry = 0; | ||
| 961 | err = pci_enable_msix(pdev, device->msix_entries, 1); | ||
| 962 | if (err) | ||
| 963 | goto msi; | ||
| 964 | |||
| 965 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, | ||
| 966 | "ioat-msix", device); | ||
| 967 | if (err) { | ||
| 968 | pci_disable_msix(pdev); | ||
| 969 | goto msi; | ||
| 970 | } | ||
| 971 | goto done; | ||
| 972 | |||
| 973 | msi: | ||
| 974 | err = pci_enable_msi(pdev); | ||
| 975 | if (err) | ||
| 976 | goto intx; | ||
| 977 | |||
| 978 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, | ||
| 979 | "ioat-msi", device); | ||
| 980 | if (err) { | ||
| 981 | pci_disable_msi(pdev); | ||
| 982 | goto intx; | ||
| 983 | } | ||
| 984 | goto done; | ||
| 985 | |||
| 986 | intx: | ||
| 987 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, | ||
| 988 | IRQF_SHARED, "ioat-intx", device); | ||
| 989 | if (err) | ||
| 990 | goto err_no_irq; | ||
| 991 | |||
| 992 | done: | ||
| 993 | if (device->intr_quirk) | ||
| 994 | device->intr_quirk(device); | ||
| 995 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
| 996 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 997 | return 0; | ||
| 998 | |||
| 999 | err_no_irq: | ||
| 1000 | /* Disable all interrupt generation */ | ||
| 1001 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 1002 | dev_err(dev, "no usable interrupts\n"); | ||
| 1003 | return err; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | static void ioat_disable_interrupts(struct ioatdma_device *device) | ||
| 1007 | { | ||
| 1008 | /* Disable all interrupt generation */ | ||
| 1009 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | int __devinit ioat_probe(struct ioatdma_device *device) | ||
| 1013 | { | ||
| 1014 | int err = -ENODEV; | ||
| 1015 | struct dma_device *dma = &device->common; | ||
| 1016 | struct pci_dev *pdev = device->pdev; | ||
| 1017 | struct device *dev = &pdev->dev; | ||
| 1018 | |||
| 1019 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
| 1020 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
| 1021 | sizeof(struct ioat_dma_descriptor), | ||
| 1022 | 64, 0); | ||
| 1023 | if (!device->dma_pool) { | ||
| 1024 | err = -ENOMEM; | ||
| 1025 | goto err_dma_pool; | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | device->completion_pool = pci_pool_create("completion_pool", pdev, | ||
| 1029 | sizeof(u64), SMP_CACHE_BYTES, | ||
| 1030 | SMP_CACHE_BYTES); | ||
| 1031 | |||
| 1032 | if (!device->completion_pool) { | ||
| 1033 | err = -ENOMEM; | ||
| 1034 | goto err_completion_pool; | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | device->enumerate_channels(device); | ||
| 1038 | |||
| 1039 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
| 1040 | dma->dev = &pdev->dev; | ||
| 1041 | |||
| 1042 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," | ||
| 1043 | " %d channels, device version 0x%02x, driver version %s\n", | ||
| 1044 | dma->chancnt, device->version, IOAT_DMA_VERSION); | ||
| 1045 | |||
| 1046 | if (!dma->chancnt) { | ||
| 1047 | dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " | ||
| 1048 | "zero channels detected\n"); | ||
| 1049 | goto err_setup_interrupts; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | err = ioat_dma_setup_interrupts(device); | ||
| 1053 | if (err) | ||
| 1054 | goto err_setup_interrupts; | ||
| 1055 | |||
| 1056 | err = ioat_dma_self_test(device); | ||
| 1057 | if (err) | ||
| 1058 | goto err_self_test; | ||
| 1059 | |||
| 1060 | return 0; | ||
| 1061 | |||
| 1062 | err_self_test: | ||
| 1063 | ioat_disable_interrupts(device); | ||
| 1064 | err_setup_interrupts: | ||
| 1065 | pci_pool_destroy(device->completion_pool); | ||
| 1066 | err_completion_pool: | ||
| 1067 | pci_pool_destroy(device->dma_pool); | ||
| 1068 | err_dma_pool: | ||
| 1069 | return err; | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | int __devinit ioat_register(struct ioatdma_device *device) | ||
| 1073 | { | ||
| 1074 | int err = dma_async_device_register(&device->common); | ||
| 1075 | |||
| 1076 | if (err) { | ||
| 1077 | ioat_disable_interrupts(device); | ||
| 1078 | pci_pool_destroy(device->completion_pool); | ||
| 1079 | pci_pool_destroy(device->dma_pool); | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | return err; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | ||
| 1086 | static void ioat1_intr_quirk(struct ioatdma_device *device) | ||
| 1087 | { | ||
| 1088 | struct pci_dev *pdev = device->pdev; | ||
| 1089 | u32 dmactrl; | ||
| 1090 | |||
| 1091 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
| 1092 | if (pdev->msi_enabled) | ||
| 1093 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
| 1094 | else | ||
| 1095 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | ||
| 1096 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||
| 1100 | { | ||
| 1101 | struct pci_dev *pdev = device->pdev; | ||
| 1102 | struct dma_device *dma; | ||
| 1103 | int err; | ||
| 1104 | |||
| 1105 | device->intr_quirk = ioat1_intr_quirk; | ||
| 1106 | device->enumerate_channels = ioat1_enumerate_channels; | ||
| 1107 | dma = &device->common; | ||
| 1108 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
| 1109 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
| 1110 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; | ||
| 1111 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | ||
| 1112 | dma->device_is_tx_complete = ioat1_dma_is_complete; | ||
| 1113 | |||
| 1114 | err = ioat_probe(device); | ||
| 1115 | if (err) | ||
| 1116 | return err; | ||
| 1117 | ioat_set_tcp_copy_break(4096); | ||
| 1118 | err = ioat_register(device); | ||
| 1119 | if (err) | ||
| 1120 | return err; | ||
| 1121 | if (dca) | ||
| 1122 | device->dca = ioat_dca_init(pdev, device->reg_base); | ||
| 1123 | |||
| 1124 | return err; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | void __devexit ioat_dma_remove(struct ioatdma_device *device) | ||
| 1128 | { | ||
| 1129 | struct dma_device *dma = &device->common; | ||
| 1130 | |||
| 1131 | ioat_disable_interrupts(device); | ||
| 1132 | |||
| 1133 | dma_async_device_unregister(dma); | ||
| 1134 | |||
| 1135 | pci_pool_destroy(device->dma_pool); | ||
| 1136 | pci_pool_destroy(device->completion_pool); | ||
| 1137 | |||
| 1138 | INIT_LIST_HEAD(&dma->channels); | ||
| 1139 | } | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h new file mode 100644 index 00000000000..d9d6a7e3cd7 --- /dev/null +++ b/drivers/dma/ioat/dma.h | |||
| @@ -0,0 +1,306 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License as published by the Free | ||
| 6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 7 | * any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | ||
| 19 | * file called COPYING. | ||
| 20 | */ | ||
| 21 | #ifndef IOATDMA_H | ||
| 22 | #define IOATDMA_H | ||
| 23 | |||
| 24 | #include <linux/dmaengine.h> | ||
| 25 | #include "hw.h" | ||
| 26 | #include "registers.h" | ||
| 27 | #include <linux/init.h> | ||
| 28 | #include <linux/dmapool.h> | ||
| 29 | #include <linux/cache.h> | ||
| 30 | #include <linux/pci_ids.h> | ||
| 31 | #include <net/tcp.h> | ||
| 32 | |||
| 33 | #define IOAT_DMA_VERSION "3.64" | ||
| 34 | |||
| 35 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | ||
| 36 | #define IOAT_DMA_DCA_ANY_CPU ~0 | ||
| 37 | |||
| 38 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | ||
| 39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | ||
| 40 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) | ||
| 41 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | ||
| 42 | |||
| 43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | ||
| 44 | |||
| 45 | /* | ||
| 46 | * workaround for IOAT ver.3.0 null descriptor issue | ||
| 47 | * (channel returns error when size is 0) | ||
| 48 | */ | ||
| 49 | #define NULL_DESC_BUFFER_SIZE 1 | ||
| 50 | |||
| 51 | /** | ||
| 52 | * struct ioatdma_device - internal representation of a IOAT device | ||
| 53 | * @pdev: PCI-Express device | ||
| 54 | * @reg_base: MMIO register space base address | ||
| 55 | * @dma_pool: for allocating DMA descriptors | ||
| 56 | * @common: embedded struct dma_device | ||
| 57 | * @version: version of ioatdma device | ||
| 58 | * @msix_entries: irq handlers | ||
| 59 | * @idx: per channel data | ||
| 60 | * @dca: direct cache access context | ||
| 61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | ||
| 62 | * @enumerate_channels: hw version specific channel enumeration | ||
| 63 | */ | ||
| 64 | |||
| 65 | struct ioatdma_device { | ||
| 66 | struct pci_dev *pdev; | ||
| 67 | void __iomem *reg_base; | ||
| 68 | struct pci_pool *dma_pool; | ||
| 69 | struct pci_pool *completion_pool; | ||
| 70 | struct dma_device common; | ||
| 71 | u8 version; | ||
| 72 | struct msix_entry msix_entries[4]; | ||
| 73 | struct ioat_chan_common *idx[4]; | ||
| 74 | struct dca_provider *dca; | ||
| 75 | void (*intr_quirk)(struct ioatdma_device *device); | ||
| 76 | int (*enumerate_channels)(struct ioatdma_device *device); | ||
| 77 | }; | ||
| 78 | |||
| 79 | struct ioat_chan_common { | ||
| 80 | struct dma_chan common; | ||
| 81 | void __iomem *reg_base; | ||
| 82 | unsigned long last_completion; | ||
| 83 | spinlock_t cleanup_lock; | ||
| 84 | dma_cookie_t completed_cookie; | ||
| 85 | unsigned long state; | ||
| 86 | #define IOAT_COMPLETION_PENDING 0 | ||
| 87 | #define IOAT_COMPLETION_ACK 1 | ||
| 88 | #define IOAT_RESET_PENDING 2 | ||
| 89 | struct timer_list timer; | ||
| 90 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | ||
| 91 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) | ||
| 92 | #define RESET_DELAY msecs_to_jiffies(100) | ||
| 93 | struct ioatdma_device *device; | ||
| 94 | dma_addr_t completion_dma; | ||
| 95 | u64 *completion; | ||
| 96 | struct tasklet_struct cleanup_task; | ||
| 97 | }; | ||
| 98 | |||
| 99 | |||
| 100 | /** | ||
| 101 | * struct ioat_dma_chan - internal representation of a DMA channel | ||
| 102 | */ | ||
| 103 | struct ioat_dma_chan { | ||
| 104 | struct ioat_chan_common base; | ||
| 105 | |||
| 106 | size_t xfercap; /* XFERCAP register value expanded out */ | ||
| 107 | |||
| 108 | spinlock_t desc_lock; | ||
| 109 | struct list_head free_desc; | ||
| 110 | struct list_head used_desc; | ||
| 111 | |||
| 112 | int pending; | ||
| 113 | u16 desccount; | ||
| 114 | }; | ||
| 115 | |||
| 116 | static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) | ||
| 117 | { | ||
| 118 | return container_of(c, struct ioat_chan_common, common); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) | ||
| 122 | { | ||
| 123 | struct ioat_chan_common *chan = to_chan_common(c); | ||
| 124 | |||
| 125 | return container_of(chan, struct ioat_dma_chan, base); | ||
| 126 | } | ||
| 127 | |||
| 128 | /** | ||
| 129 | * ioat_is_complete - poll the status of an ioat transaction | ||
| 130 | * @c: channel handle | ||
| 131 | * @cookie: transaction identifier | ||
| 132 | * @done: if set, updated with last completed transaction | ||
| 133 | * @used: if set, updated with last used transaction | ||
| 134 | */ | ||
| 135 | static inline enum dma_status | ||
| 136 | ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
| 137 | dma_cookie_t *done, dma_cookie_t *used) | ||
| 138 | { | ||
| 139 | struct ioat_chan_common *chan = to_chan_common(c); | ||
| 140 | dma_cookie_t last_used; | ||
| 141 | dma_cookie_t last_complete; | ||
| 142 | |||
| 143 | last_used = c->cookie; | ||
| 144 | last_complete = chan->completed_cookie; | ||
| 145 | |||
| 146 | if (done) | ||
| 147 | *done = last_complete; | ||
| 148 | if (used) | ||
| 149 | *used = last_used; | ||
| 150 | |||
| 151 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
| 152 | } | ||
| 153 | |||
| 154 | /* wrapper around hardware descriptor format + additional software fields */ | ||
| 155 | |||
| 156 | /** | ||
| 157 | * struct ioat_desc_sw - wrapper around hardware descriptor | ||
| 158 | * @hw: hardware DMA descriptor | ||
| 159 | * @node: this descriptor will either be on the free list, | ||
| 160 | * or attached to a transaction list (async_tx.tx_list) | ||
| 161 | * @txd: the generic software descriptor for all engines | ||
| 162 | * @id: identifier for debug | ||
| 163 | */ | ||
| 164 | struct ioat_desc_sw { | ||
| 165 | struct ioat_dma_descriptor *hw; | ||
| 166 | struct list_head node; | ||
| 167 | size_t len; | ||
| 168 | struct dma_async_tx_descriptor txd; | ||
| 169 | #ifdef DEBUG | ||
| 170 | int id; | ||
| 171 | #endif | ||
| 172 | }; | ||
| 173 | |||
| 174 | #ifdef DEBUG | ||
| 175 | #define set_desc_id(desc, i) ((desc)->id = (i)) | ||
| 176 | #define desc_id(desc) ((desc)->id) | ||
| 177 | #else | ||
| 178 | #define set_desc_id(desc, i) | ||
| 179 | #define desc_id(desc) (0) | ||
| 180 | #endif | ||
| 181 | |||
| 182 | static inline void | ||
| 183 | __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, | ||
| 184 | struct dma_async_tx_descriptor *tx, int id) | ||
| 185 | { | ||
| 186 | struct device *dev = to_dev(chan); | ||
| 187 | |||
| 188 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | ||
| 189 | " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, | ||
| 190 | (unsigned long long) tx->phys, | ||
| 191 | (unsigned long long) hw->next, tx->cookie, tx->flags, | ||
| 192 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | ||
| 193 | } | ||
| 194 | |||
| 195 | #define dump_desc_dbg(c, d) \ | ||
| 196 | ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) | ||
| 197 | |||
| 198 | static inline void ioat_set_tcp_copy_break(unsigned long copybreak) | ||
| 199 | { | ||
| 200 | #ifdef CONFIG_NET_DMA | ||
| 201 | sysctl_tcp_dma_copybreak = copybreak; | ||
| 202 | #endif | ||
| 203 | } | ||
| 204 | |||
| 205 | static inline struct ioat_chan_common * | ||
| 206 | ioat_chan_by_index(struct ioatdma_device *device, int index) | ||
| 207 | { | ||
| 208 | return device->idx[index]; | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline u64 ioat_chansts(struct ioat_chan_common *chan) | ||
| 212 | { | ||
| 213 | u8 ver = chan->device->version; | ||
| 214 | u64 status; | ||
| 215 | u32 status_lo; | ||
| 216 | |||
| 217 | /* We need to read the low address first as this causes the | ||
| 218 | * chipset to latch the upper bits for the subsequent read | ||
| 219 | */ | ||
| 220 | status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); | ||
| 221 | status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); | ||
| 222 | status <<= 32; | ||
| 223 | status |= status_lo; | ||
| 224 | |||
| 225 | return status; | ||
| 226 | } | ||
| 227 | |||
| 228 | static inline void ioat_start(struct ioat_chan_common *chan) | ||
| 229 | { | ||
| 230 | u8 ver = chan->device->version; | ||
| 231 | |||
| 232 | writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
| 233 | } | ||
| 234 | |||
| 235 | static inline u64 ioat_chansts_to_addr(u64 status) | ||
| 236 | { | ||
| 237 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline u32 ioat_chanerr(struct ioat_chan_common *chan) | ||
| 241 | { | ||
| 242 | return readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 243 | } | ||
| 244 | |||
| 245 | static inline void ioat_suspend(struct ioat_chan_common *chan) | ||
| 246 | { | ||
| 247 | u8 ver = chan->device->version; | ||
| 248 | |||
| 249 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
| 250 | } | ||
| 251 | |||
| 252 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | ||
| 253 | { | ||
| 254 | struct ioat_chan_common *chan = &ioat->base; | ||
| 255 | |||
| 256 | writel(addr & 0x00000000FFFFFFFF, | ||
| 257 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
| 258 | writel(addr >> 32, | ||
| 259 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
| 260 | } | ||
| 261 | |||
| 262 | static inline bool is_ioat_active(unsigned long status) | ||
| 263 | { | ||
| 264 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline bool is_ioat_idle(unsigned long status) | ||
| 268 | { | ||
| 269 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | ||
| 270 | } | ||
| 271 | |||
| 272 | static inline bool is_ioat_halted(unsigned long status) | ||
| 273 | { | ||
| 274 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | ||
| 275 | } | ||
| 276 | |||
| 277 | static inline bool is_ioat_suspended(unsigned long status) | ||
| 278 | { | ||
| 279 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | ||
| 280 | } | ||
| 281 | |||
| 282 | /* channel was fatally programmed */ | ||
| 283 | static inline bool is_ioat_bug(unsigned long err) | ||
| 284 | { | ||
| 285 | return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| | ||
| 286 | IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| | ||
| 287 | IOAT_CHANERR_LENGTH_ERR)); | ||
| 288 | } | ||
| 289 | |||
| 290 | int __devinit ioat_probe(struct ioatdma_device *device); | ||
| 291 | int __devinit ioat_register(struct ioatdma_device *device); | ||
| 292 | int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); | ||
| 293 | void __devexit ioat_dma_remove(struct ioatdma_device *device); | ||
| 294 | struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | ||
| 295 | void __iomem *iobase); | ||
| 296 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | ||
| 297 | void ioat_init_channel(struct ioatdma_device *device, | ||
| 298 | struct ioat_chan_common *chan, int idx, | ||
| 299 | void (*timer_fn)(unsigned long), | ||
| 300 | void (*tasklet)(unsigned long), | ||
| 301 | unsigned long ioat); | ||
| 302 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
| 303 | size_t len, struct ioat_dma_descriptor *hw); | ||
| 304 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||
| 305 | unsigned long *phys_complete); | ||
| 306 | #endif /* IOATDMA_H */ | ||
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c new file mode 100644 index 00000000000..460b7730133 --- /dev/null +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -0,0 +1,881 @@ | |||
| 1 | /* | ||
| 2 | * Intel I/OAT DMA Linux driver | ||
| 3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | ||
| 19 | * the file called "COPYING". | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | /* | ||
| 24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | ||
| 25 | * does asynchronous data movement and checksumming operations. | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/module.h> | ||
| 30 | #include <linux/pci.h> | ||
| 31 | #include <linux/interrupt.h> | ||
| 32 | #include <linux/dmaengine.h> | ||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/dma-mapping.h> | ||
| 35 | #include <linux/workqueue.h> | ||
| 36 | #include <linux/i7300_idle.h> | ||
| 37 | #include "dma.h" | ||
| 38 | #include "dma_v2.h" | ||
| 39 | #include "registers.h" | ||
| 40 | #include "hw.h" | ||
| 41 | |||
| 42 | static int ioat_ring_alloc_order = 8; | ||
| 43 | module_param(ioat_ring_alloc_order, int, 0644); | ||
| 44 | MODULE_PARM_DESC(ioat_ring_alloc_order, | ||
| 45 | "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); | ||
| 46 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; | ||
| 47 | module_param(ioat_ring_max_alloc_order, int, 0644); | ||
| 48 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | ||
| 49 | "ioat2+: upper limit for dynamic ring resizing (default: n=16)"); | ||
| 50 | |||
| 51 | static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | ||
| 52 | { | ||
| 53 | void * __iomem reg_base = ioat->base.reg_base; | ||
| 54 | |||
| 55 | ioat->pending = 0; | ||
| 56 | ioat->dmacount += ioat2_ring_pending(ioat);; | ||
| 57 | ioat->issued = ioat->head; | ||
| 58 | /* make descriptor updates globally visible before notifying channel */ | ||
| 59 | wmb(); | ||
| 60 | writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
| 61 | dev_dbg(to_dev(&ioat->base), | ||
| 62 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | ||
| 63 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | ||
| 64 | } | ||
| 65 | |||
| 66 | static void ioat2_issue_pending(struct dma_chan *chan) | ||
| 67 | { | ||
| 68 | struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); | ||
| 69 | |||
| 70 | spin_lock_bh(&ioat->ring_lock); | ||
| 71 | if (ioat->pending == 1) | ||
| 72 | __ioat2_issue_pending(ioat); | ||
| 73 | spin_unlock_bh(&ioat->ring_lock); | ||
| 74 | } | ||
| 75 | |||
| 76 | /** | ||
| 77 | * ioat2_update_pending - log pending descriptors | ||
| 78 | * @ioat: ioat2+ channel | ||
| 79 | * | ||
| 80 | * set pending to '1' unless pending is already set to '2', pending == 2 | ||
| 81 | * indicates that submission is temporarily blocked due to an in-flight | ||
| 82 | * reset. If we are already above the ioat_pending_level threshold then | ||
| 83 | * just issue pending. | ||
| 84 | * | ||
| 85 | * called with ring_lock held | ||
| 86 | */ | ||
| 87 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | ||
| 88 | { | ||
| 89 | if (unlikely(ioat->pending == 2)) | ||
| 90 | return; | ||
| 91 | else if (ioat2_ring_pending(ioat) > ioat_pending_level) | ||
| 92 | __ioat2_issue_pending(ioat); | ||
| 93 | else | ||
| 94 | ioat->pending = 1; | ||
| 95 | } | ||
| 96 | |||
| 97 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | ||
| 98 | { | ||
| 99 | struct ioat_ring_ent *desc; | ||
| 100 | struct ioat_dma_descriptor *hw; | ||
| 101 | int idx; | ||
| 102 | |||
| 103 | if (ioat2_ring_space(ioat) < 1) { | ||
| 104 | dev_err(to_dev(&ioat->base), | ||
| 105 | "Unable to start null desc - ring full\n"); | ||
| 106 | return; | ||
| 107 | } | ||
| 108 | |||
| 109 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", | ||
| 110 | __func__, ioat->head, ioat->tail, ioat->issued); | ||
| 111 | idx = ioat2_desc_alloc(ioat, 1); | ||
| 112 | desc = ioat2_get_ring_ent(ioat, idx); | ||
| 113 | |||
| 114 | hw = desc->hw; | ||
| 115 | hw->ctl = 0; | ||
| 116 | hw->ctl_f.null = 1; | ||
| 117 | hw->ctl_f.int_en = 1; | ||
| 118 | hw->ctl_f.compl_write = 1; | ||
| 119 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
| 120 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
| 121 | hw->src_addr = 0; | ||
| 122 | hw->dst_addr = 0; | ||
| 123 | async_tx_ack(&desc->txd); | ||
| 124 | ioat2_set_chainaddr(ioat, desc->txd.phys); | ||
| 125 | dump_desc_dbg(ioat, desc); | ||
| 126 | __ioat2_issue_pending(ioat); | ||
| 127 | } | ||
| 128 | |||
| 129 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | ||
| 130 | { | ||
| 131 | spin_lock_bh(&ioat->ring_lock); | ||
| 132 | __ioat2_start_null_desc(ioat); | ||
| 133 | spin_unlock_bh(&ioat->ring_lock); | ||
| 134 | } | ||
| 135 | |||
| 136 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | ||
| 137 | { | ||
| 138 | struct ioat_chan_common *chan = &ioat->base; | ||
| 139 | struct dma_async_tx_descriptor *tx; | ||
| 140 | struct ioat_ring_ent *desc; | ||
| 141 | bool seen_current = false; | ||
| 142 | u16 active; | ||
| 143 | int i; | ||
| 144 | |||
| 145 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | ||
| 146 | __func__, ioat->head, ioat->tail, ioat->issued); | ||
| 147 | |||
| 148 | active = ioat2_ring_active(ioat); | ||
| 149 | for (i = 0; i < active && !seen_current; i++) { | ||
| 150 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | ||
| 151 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | ||
| 152 | tx = &desc->txd; | ||
| 153 | dump_desc_dbg(ioat, desc); | ||
| 154 | if (tx->cookie) { | ||
| 155 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | ||
| 156 | chan->completed_cookie = tx->cookie; | ||
| 157 | tx->cookie = 0; | ||
| 158 | if (tx->callback) { | ||
| 159 | tx->callback(tx->callback_param); | ||
| 160 | tx->callback = NULL; | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | if (tx->phys == phys_complete) | ||
| 165 | seen_current = true; | ||
| 166 | } | ||
| 167 | ioat->tail += i; | ||
| 168 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | ||
| 169 | |||
| 170 | chan->last_completion = phys_complete; | ||
| 171 | if (ioat->head == ioat->tail) { | ||
| 172 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | ||
| 173 | __func__); | ||
| 174 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
| 175 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 179 | /** | ||
| 180 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | ||
| 181 | * @chan: ioat channel to be cleaned up | ||
| 182 | */ | ||
| 183 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | ||
| 184 | { | ||
| 185 | struct ioat_chan_common *chan = &ioat->base; | ||
| 186 | unsigned long phys_complete; | ||
| 187 | |||
| 188 | prefetch(chan->completion); | ||
| 189 | |||
| 190 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
| 191 | return; | ||
| 192 | |||
| 193 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
| 194 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 195 | return; | ||
| 196 | } | ||
| 197 | |||
| 198 | if (!spin_trylock_bh(&ioat->ring_lock)) { | ||
| 199 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 200 | return; | ||
| 201 | } | ||
| 202 | |||
| 203 | __cleanup(ioat, phys_complete); | ||
| 204 | |||
| 205 | spin_unlock_bh(&ioat->ring_lock); | ||
| 206 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 207 | } | ||
| 208 | |||
| 209 | static void ioat2_cleanup_tasklet(unsigned long data) | ||
| 210 | { | ||
| 211 | struct ioat2_dma_chan *ioat = (void *) data; | ||
| 212 | |||
| 213 | ioat2_cleanup(ioat); | ||
| 214 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 215 | } | ||
| 216 | |||
| 217 | static void __restart_chan(struct ioat2_dma_chan *ioat) | ||
| 218 | { | ||
| 219 | struct ioat_chan_common *chan = &ioat->base; | ||
| 220 | |||
| 221 | /* set the tail to be re-issued */ | ||
| 222 | ioat->issued = ioat->tail; | ||
| 223 | ioat->dmacount = 0; | ||
| 224 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
| 225 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 226 | |||
| 227 | dev_dbg(to_dev(chan), | ||
| 228 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | ||
| 229 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | ||
| 230 | |||
| 231 | if (ioat2_ring_pending(ioat)) { | ||
| 232 | struct ioat_ring_ent *desc; | ||
| 233 | |||
| 234 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | ||
| 235 | ioat2_set_chainaddr(ioat, desc->txd.phys); | ||
| 236 | __ioat2_issue_pending(ioat); | ||
| 237 | } else | ||
| 238 | __ioat2_start_null_desc(ioat); | ||
| 239 | } | ||
| 240 | |||
| 241 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
| 242 | { | ||
| 243 | struct ioat_chan_common *chan = &ioat->base; | ||
| 244 | unsigned long phys_complete; | ||
| 245 | u32 status; | ||
| 246 | |||
| 247 | status = ioat_chansts(chan); | ||
| 248 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
| 249 | ioat_suspend(chan); | ||
| 250 | while (is_ioat_active(status) || is_ioat_idle(status)) { | ||
| 251 | status = ioat_chansts(chan); | ||
| 252 | cpu_relax(); | ||
| 253 | } | ||
| 254 | |||
| 255 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
| 256 | __cleanup(ioat, phys_complete); | ||
| 257 | |||
| 258 | __restart_chan(ioat); | ||
| 259 | } | ||
| 260 | |||
| 261 | static bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | ||
| 262 | |||
| 263 | static void ioat2_timer_event(unsigned long data) | ||
| 264 | { | ||
| 265 | struct ioat2_dma_chan *ioat = (void *) data; | ||
| 266 | struct ioat_chan_common *chan = &ioat->base; | ||
| 267 | |||
| 268 | spin_lock_bh(&chan->cleanup_lock); | ||
| 269 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
| 270 | unsigned long phys_complete; | ||
| 271 | u64 status; | ||
| 272 | |||
| 273 | spin_lock_bh(&ioat->ring_lock); | ||
| 274 | status = ioat_chansts(chan); | ||
| 275 | |||
| 276 | /* when halted due to errors check for channel | ||
| 277 | * programming errors before advancing the completion state | ||
| 278 | */ | ||
| 279 | if (is_ioat_halted(status)) { | ||
| 280 | u32 chanerr; | ||
| 281 | |||
| 282 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 283 | BUG_ON(is_ioat_bug(chanerr)); | ||
| 284 | } | ||
| 285 | |||
| 286 | /* if we haven't made progress and we have already | ||
| 287 | * acknowledged a pending completion once, then be more | ||
| 288 | * forceful with a restart | ||
| 289 | */ | ||
| 290 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
| 291 | __cleanup(ioat, phys_complete); | ||
| 292 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
| 293 | ioat2_restart_channel(ioat); | ||
| 294 | else { | ||
| 295 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
| 296 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 297 | } | ||
| 298 | spin_unlock_bh(&ioat->ring_lock); | ||
| 299 | } else { | ||
| 300 | u16 active; | ||
| 301 | |||
| 302 | /* if the ring is idle, empty, and oversized try to step | ||
| 303 | * down the size | ||
| 304 | */ | ||
| 305 | spin_lock_bh(&ioat->ring_lock); | ||
| 306 | active = ioat2_ring_active(ioat); | ||
| 307 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
| 308 | reshape_ring(ioat, ioat->alloc_order-1); | ||
| 309 | spin_unlock_bh(&ioat->ring_lock); | ||
| 310 | |||
| 311 | /* keep shrinking until we get back to our minimum | ||
| 312 | * default size | ||
| 313 | */ | ||
| 314 | if (ioat->alloc_order > ioat_get_alloc_order()) | ||
| 315 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
| 316 | } | ||
| 317 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 318 | } | ||
| 319 | |||
| 320 | /** | ||
| 321 | * ioat2_enumerate_channels - find and initialize the device's channels | ||
| 322 | * @device: the device to be enumerated | ||
| 323 | */ | ||
| 324 | static int ioat2_enumerate_channels(struct ioatdma_device *device) | ||
| 325 | { | ||
| 326 | struct ioat2_dma_chan *ioat; | ||
| 327 | struct device *dev = &device->pdev->dev; | ||
| 328 | struct dma_device *dma = &device->common; | ||
| 329 | u8 xfercap_log; | ||
| 330 | int i; | ||
| 331 | |||
| 332 | INIT_LIST_HEAD(&dma->channels); | ||
| 333 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
| 334 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
| 335 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | ||
| 336 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
| 337 | dma->chancnt, ARRAY_SIZE(device->idx)); | ||
| 338 | dma->chancnt = ARRAY_SIZE(device->idx); | ||
| 339 | } | ||
| 340 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
| 341 | xfercap_log &= 0x1f; /* bits [4:0] valid */ | ||
| 342 | if (xfercap_log == 0) | ||
| 343 | return 0; | ||
| 344 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); | ||
| 345 | |||
| 346 | /* FIXME which i/oat version is i7300? */ | ||
| 347 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
| 348 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
| 349 | dma->chancnt--; | ||
| 350 | #endif | ||
| 351 | for (i = 0; i < dma->chancnt; i++) { | ||
| 352 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | ||
| 353 | if (!ioat) | ||
| 354 | break; | ||
| 355 | |||
| 356 | ioat_init_channel(device, &ioat->base, i, | ||
| 357 | ioat2_timer_event, | ||
| 358 | ioat2_cleanup_tasklet, | ||
| 359 | (unsigned long) ioat); | ||
| 360 | ioat->xfercap_log = xfercap_log; | ||
| 361 | spin_lock_init(&ioat->ring_lock); | ||
| 362 | } | ||
| 363 | dma->chancnt = i; | ||
| 364 | return i; | ||
| 365 | } | ||
| 366 | |||
| 367 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | ||
| 368 | { | ||
| 369 | struct dma_chan *c = tx->chan; | ||
| 370 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
| 371 | struct ioat_chan_common *chan = &ioat->base; | ||
| 372 | dma_cookie_t cookie = c->cookie; | ||
| 373 | |||
| 374 | cookie++; | ||
| 375 | if (cookie < 0) | ||
| 376 | cookie = 1; | ||
| 377 | tx->cookie = cookie; | ||
| 378 | c->cookie = cookie; | ||
| 379 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
| 380 | |||
| 381 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
| 382 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 383 | ioat2_update_pending(ioat); | ||
| 384 | spin_unlock_bh(&ioat->ring_lock); | ||
| 385 | |||
| 386 | return cookie; | ||
| 387 | } | ||
| 388 | |||
| 389 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) | ||
| 390 | { | ||
| 391 | struct ioat_dma_descriptor *hw; | ||
| 392 | struct ioat_ring_ent *desc; | ||
| 393 | struct ioatdma_device *dma; | ||
| 394 | dma_addr_t phys; | ||
| 395 | |||
| 396 | dma = to_ioatdma_device(chan->device); | ||
| 397 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); | ||
| 398 | if (!hw) | ||
| 399 | return NULL; | ||
| 400 | memset(hw, 0, sizeof(*hw)); | ||
| 401 | |||
| 402 | desc = kzalloc(sizeof(*desc), flags); | ||
| 403 | if (!desc) { | ||
| 404 | pci_pool_free(dma->dma_pool, hw, phys); | ||
| 405 | return NULL; | ||
| 406 | } | ||
| 407 | |||
| 408 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
| 409 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | ||
| 410 | desc->hw = hw; | ||
| 411 | desc->txd.phys = phys; | ||
| 412 | return desc; | ||
| 413 | } | ||
| 414 | |||
| 415 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | ||
| 416 | { | ||
| 417 | struct ioatdma_device *dma; | ||
| 418 | |||
| 419 | dma = to_ioatdma_device(chan->device); | ||
| 420 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | ||
| 421 | kfree(desc); | ||
| 422 | } | ||
| 423 | |||
| 424 | static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | ||
| 425 | { | ||
| 426 | struct ioat_ring_ent **ring; | ||
| 427 | int descs = 1 << order; | ||
| 428 | int i; | ||
| 429 | |||
| 430 | if (order > ioat_get_max_alloc_order()) | ||
| 431 | return NULL; | ||
| 432 | |||
| 433 | /* allocate the array to hold the software ring */ | ||
| 434 | ring = kcalloc(descs, sizeof(*ring), flags); | ||
| 435 | if (!ring) | ||
| 436 | return NULL; | ||
| 437 | for (i = 0; i < descs; i++) { | ||
| 438 | ring[i] = ioat2_alloc_ring_ent(c, flags); | ||
| 439 | if (!ring[i]) { | ||
| 440 | while (i--) | ||
| 441 | ioat2_free_ring_ent(ring[i], c); | ||
| 442 | kfree(ring); | ||
| 443 | return NULL; | ||
| 444 | } | ||
| 445 | set_desc_id(ring[i], i); | ||
| 446 | } | ||
| 447 | |||
| 448 | /* link descs */ | ||
| 449 | for (i = 0; i < descs-1; i++) { | ||
| 450 | struct ioat_ring_ent *next = ring[i+1]; | ||
| 451 | struct ioat_dma_descriptor *hw = ring[i]->hw; | ||
| 452 | |||
| 453 | hw->next = next->txd.phys; | ||
| 454 | } | ||
| 455 | ring[i]->hw->next = ring[0]->txd.phys; | ||
| 456 | |||
| 457 | return ring; | ||
| 458 | } | ||
| 459 | |||
| 460 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring | ||
| 461 | * @chan: channel to be initialized | ||
| 462 | */ | ||
| 463 | static int ioat2_alloc_chan_resources(struct dma_chan *c) | ||
| 464 | { | ||
| 465 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
| 466 | struct ioat_chan_common *chan = &ioat->base; | ||
| 467 | struct ioat_ring_ent **ring; | ||
| 468 | u32 chanerr; | ||
| 469 | int order; | ||
| 470 | |||
| 471 | /* have we already been set up? */ | ||
| 472 | if (ioat->ring) | ||
| 473 | return 1 << ioat->alloc_order; | ||
| 474 | |||
| 475 | /* Setup register to interrupt and write completion status on error */ | ||
| 476 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 477 | |||
| 478 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 479 | if (chanerr) { | ||
| 480 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
| 481 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 482 | } | ||
| 483 | |||
| 484 | /* allocate a completion writeback area */ | ||
| 485 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
| 486 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
| 487 | GFP_KERNEL, &chan->completion_dma); | ||
| 488 | if (!chan->completion) | ||
| 489 | return -ENOMEM; | ||
| 490 | |||
| 491 | memset(chan->completion, 0, sizeof(*chan->completion)); | ||
| 492 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | ||
| 493 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
| 494 | writel(((u64) chan->completion_dma) >> 32, | ||
| 495 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
| 496 | |||
| 497 | order = ioat_get_alloc_order(); | ||
| 498 | ring = ioat2_alloc_ring(c, order, GFP_KERNEL); | ||
| 499 | if (!ring) | ||
| 500 | return -ENOMEM; | ||
| 501 | |||
| 502 | spin_lock_bh(&ioat->ring_lock); | ||
| 503 | ioat->ring = ring; | ||
| 504 | ioat->head = 0; | ||
| 505 | ioat->issued = 0; | ||
| 506 | ioat->tail = 0; | ||
| 507 | ioat->pending = 0; | ||
| 508 | ioat->alloc_order = order; | ||
| 509 | spin_unlock_bh(&ioat->ring_lock); | ||
| 510 | |||
| 511 | tasklet_enable(&chan->cleanup_task); | ||
| 512 | ioat2_start_null_desc(ioat); | ||
| 513 | |||
| 514 | return 1 << ioat->alloc_order; | ||
| 515 | } | ||
| 516 | |||
| 517 | static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) | ||
| 518 | { | ||
| 519 | /* reshape differs from normal ring allocation in that we want | ||
| 520 | * to allocate a new software ring while only | ||
| 521 | * extending/truncating the hardware ring | ||
| 522 | */ | ||
| 523 | struct ioat_chan_common *chan = &ioat->base; | ||
| 524 | struct dma_chan *c = &chan->common; | ||
| 525 | const u16 curr_size = ioat2_ring_mask(ioat) + 1; | ||
| 526 | const u16 active = ioat2_ring_active(ioat); | ||
| 527 | const u16 new_size = 1 << order; | ||
| 528 | struct ioat_ring_ent **ring; | ||
| 529 | u16 i; | ||
| 530 | |||
| 531 | if (order > ioat_get_max_alloc_order()) | ||
| 532 | return false; | ||
| 533 | |||
| 534 | /* double check that we have at least 1 free descriptor */ | ||
| 535 | if (active == curr_size) | ||
| 536 | return false; | ||
| 537 | |||
| 538 | /* when shrinking, verify that we can hold the current active | ||
| 539 | * set in the new ring | ||
| 540 | */ | ||
| 541 | if (active >= new_size) | ||
| 542 | return false; | ||
| 543 | |||
| 544 | /* allocate the array to hold the software ring */ | ||
| 545 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | ||
| 546 | if (!ring) | ||
| 547 | return false; | ||
| 548 | |||
| 549 | /* allocate/trim descriptors as needed */ | ||
| 550 | if (new_size > curr_size) { | ||
| 551 | /* copy current descriptors to the new ring */ | ||
| 552 | for (i = 0; i < curr_size; i++) { | ||
| 553 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | ||
| 554 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
| 555 | |||
| 556 | ring[new_idx] = ioat->ring[curr_idx]; | ||
| 557 | set_desc_id(ring[new_idx], new_idx); | ||
| 558 | } | ||
| 559 | |||
| 560 | /* add new descriptors to the ring */ | ||
| 561 | for (i = curr_size; i < new_size; i++) { | ||
| 562 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
| 563 | |||
| 564 | ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); | ||
| 565 | if (!ring[new_idx]) { | ||
| 566 | while (i--) { | ||
| 567 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
| 568 | |||
| 569 | ioat2_free_ring_ent(ring[new_idx], c); | ||
| 570 | } | ||
| 571 | kfree(ring); | ||
| 572 | return false; | ||
| 573 | } | ||
| 574 | set_desc_id(ring[new_idx], new_idx); | ||
| 575 | } | ||
| 576 | |||
| 577 | /* hw link new descriptors */ | ||
| 578 | for (i = curr_size-1; i < new_size; i++) { | ||
| 579 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
| 580 | struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; | ||
| 581 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | ||
| 582 | |||
| 583 | hw->next = next->txd.phys; | ||
| 584 | } | ||
| 585 | } else { | ||
| 586 | struct ioat_dma_descriptor *hw; | ||
| 587 | struct ioat_ring_ent *next; | ||
| 588 | |||
| 589 | /* copy current descriptors to the new ring, dropping the | ||
| 590 | * removed descriptors | ||
| 591 | */ | ||
| 592 | for (i = 0; i < new_size; i++) { | ||
| 593 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | ||
| 594 | u16 new_idx = (ioat->tail+i) & (new_size-1); | ||
| 595 | |||
| 596 | ring[new_idx] = ioat->ring[curr_idx]; | ||
| 597 | set_desc_id(ring[new_idx], new_idx); | ||
| 598 | } | ||
| 599 | |||
| 600 | /* free deleted descriptors */ | ||
| 601 | for (i = new_size; i < curr_size; i++) { | ||
| 602 | struct ioat_ring_ent *ent; | ||
| 603 | |||
| 604 | ent = ioat2_get_ring_ent(ioat, ioat->tail+i); | ||
| 605 | ioat2_free_ring_ent(ent, c); | ||
| 606 | } | ||
| 607 | |||
| 608 | /* fix up hardware ring */ | ||
| 609 | hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; | ||
| 610 | next = ring[(ioat->tail+new_size) & (new_size-1)]; | ||
| 611 | hw->next = next->txd.phys; | ||
| 612 | } | ||
| 613 | |||
| 614 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
| 615 | __func__, new_size); | ||
| 616 | |||
| 617 | kfree(ioat->ring); | ||
| 618 | ioat->ring = ring; | ||
| 619 | ioat->alloc_order = order; | ||
| 620 | |||
| 621 | return true; | ||
| 622 | } | ||
| 623 | |||
| 624 | /** | ||
| 625 | * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops | ||
| 626 | * @idx: gets starting descriptor index on successful allocation | ||
| 627 | * @ioat: ioat2,3 channel (ring) to operate on | ||
| 628 | * @num_descs: allocation length | ||
| 629 | */ | ||
| 630 | static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | ||
| 631 | { | ||
| 632 | struct ioat_chan_common *chan = &ioat->base; | ||
| 633 | |||
| 634 | spin_lock_bh(&ioat->ring_lock); | ||
| 635 | /* never allow the last descriptor to be consumed, we need at | ||
| 636 | * least one free at all times to allow for on-the-fly ring | ||
| 637 | * resizing. | ||
| 638 | */ | ||
| 639 | while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { | ||
| 640 | if (reshape_ring(ioat, ioat->alloc_order + 1) && | ||
| 641 | ioat2_ring_space(ioat) > num_descs) | ||
| 642 | break; | ||
| 643 | |||
| 644 | if (printk_ratelimit()) | ||
| 645 | dev_dbg(to_dev(chan), | ||
| 646 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | ||
| 647 | __func__, num_descs, ioat->head, ioat->tail, | ||
| 648 | ioat->issued); | ||
| 649 | spin_unlock_bh(&ioat->ring_lock); | ||
| 650 | |||
| 651 | /* progress reclaim in the allocation failure case we | ||
| 652 | * may be called under bh_disabled so we need to trigger | ||
| 653 | * the timer event directly | ||
| 654 | */ | ||
| 655 | spin_lock_bh(&chan->cleanup_lock); | ||
| 656 | if (jiffies > chan->timer.expires && | ||
| 657 | timer_pending(&chan->timer)) { | ||
| 658 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
| 659 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 660 | ioat2_timer_event((unsigned long) ioat); | ||
| 661 | } else | ||
| 662 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 663 | return -ENOMEM; | ||
| 664 | } | ||
| 665 | |||
| 666 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | ||
| 667 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | ||
| 668 | |||
| 669 | *idx = ioat2_desc_alloc(ioat, num_descs); | ||
| 670 | return 0; /* with ioat->ring_lock held */ | ||
| 671 | } | ||
| 672 | |||
| 673 | static struct dma_async_tx_descriptor * | ||
| 674 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||
| 675 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
| 676 | { | ||
| 677 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
| 678 | struct ioat_dma_descriptor *hw; | ||
| 679 | struct ioat_ring_ent *desc; | ||
| 680 | dma_addr_t dst = dma_dest; | ||
| 681 | dma_addr_t src = dma_src; | ||
| 682 | size_t total_len = len; | ||
| 683 | int num_descs; | ||
| 684 | u16 idx; | ||
| 685 | int i; | ||
| 686 | |||
| 687 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
| 688 | if (likely(num_descs) && | ||
| 689 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | ||
| 690 | /* pass */; | ||
| 691 | else | ||
| 692 | return NULL; | ||
| 693 | for (i = 0; i < num_descs; i++) { | ||
| 694 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
| 695 | |||
| 696 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
| 697 | hw = desc->hw; | ||
| 698 | |||
| 699 | hw->size = copy; | ||
| 700 | hw->ctl = 0; | ||
| 701 | hw->src_addr = src; | ||
| 702 | hw->dst_addr = dst; | ||
| 703 | |||
| 704 | len -= copy; | ||
| 705 | dst += copy; | ||
| 706 | src += copy; | ||
| 707 | dump_desc_dbg(ioat, desc); | ||
| 708 | } | ||
| 709 | |||
| 710 | desc->txd.flags = flags; | ||
| 711 | desc->len = total_len; | ||
| 712 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
| 713 | hw->ctl_f.compl_write = 1; | ||
| 714 | dump_desc_dbg(ioat, desc); | ||
| 715 | /* we leave the channel locked to ensure in order submission */ | ||
| 716 | |||
| 717 | return &desc->txd; | ||
| 718 | } | ||
| 719 | |||
| 720 | /** | ||
| 721 | * ioat2_free_chan_resources - release all the descriptors | ||
| 722 | * @chan: the channel to be cleaned | ||
| 723 | */ | ||
| 724 | static void ioat2_free_chan_resources(struct dma_chan *c) | ||
| 725 | { | ||
| 726 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
| 727 | struct ioat_chan_common *chan = &ioat->base; | ||
| 728 | struct ioatdma_device *ioatdma_device = chan->device; | ||
| 729 | struct ioat_ring_ent *desc; | ||
| 730 | const u16 total_descs = 1 << ioat->alloc_order; | ||
| 731 | int descs; | ||
| 732 | int i; | ||
| 733 | |||
| 734 | /* Before freeing channel resources first check | ||
| 735 | * if they have been previously allocated for this channel. | ||
| 736 | */ | ||
| 737 | if (!ioat->ring) | ||
| 738 | return; | ||
| 739 | |||
| 740 | tasklet_disable(&chan->cleanup_task); | ||
| 741 | del_timer_sync(&chan->timer); | ||
| 742 | ioat2_cleanup(ioat); | ||
| 743 | |||
| 744 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
| 745 | * before removing DMA descriptor resources. | ||
| 746 | */ | ||
| 747 | writeb(IOAT_CHANCMD_RESET, | ||
| 748 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
| 749 | mdelay(100); | ||
| 750 | |||
| 751 | spin_lock_bh(&ioat->ring_lock); | ||
| 752 | descs = ioat2_ring_space(ioat); | ||
| 753 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); | ||
| 754 | for (i = 0; i < descs; i++) { | ||
| 755 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | ||
| 756 | ioat2_free_ring_ent(desc, c); | ||
| 757 | } | ||
| 758 | |||
| 759 | if (descs < total_descs) | ||
| 760 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | ||
| 761 | total_descs - descs); | ||
| 762 | |||
| 763 | for (i = 0; i < total_descs - descs; i++) { | ||
| 764 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | ||
| 765 | dump_desc_dbg(ioat, desc); | ||
| 766 | ioat2_free_ring_ent(desc, c); | ||
| 767 | } | ||
| 768 | |||
| 769 | kfree(ioat->ring); | ||
| 770 | ioat->ring = NULL; | ||
| 771 | ioat->alloc_order = 0; | ||
| 772 | pci_pool_free(ioatdma_device->completion_pool, | ||
| 773 | chan->completion, | ||
| 774 | chan->completion_dma); | ||
| 775 | spin_unlock_bh(&ioat->ring_lock); | ||
| 776 | |||
| 777 | chan->last_completion = 0; | ||
| 778 | chan->completion_dma = 0; | ||
| 779 | ioat->pending = 0; | ||
| 780 | ioat->dmacount = 0; | ||
| 781 | } | ||
| 782 | |||
| 783 | static enum dma_status | ||
| 784 | ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
| 785 | dma_cookie_t *done, dma_cookie_t *used) | ||
| 786 | { | ||
| 787 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
| 788 | |||
| 789 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
| 790 | return DMA_SUCCESS; | ||
| 791 | |||
| 792 | ioat2_cleanup(ioat); | ||
| 793 | |||
| 794 | return ioat_is_complete(c, cookie, done, used); | ||
| 795 | } | ||
| 796 | |||
| 797 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||
| 798 | { | ||
| 799 | struct pci_dev *pdev = device->pdev; | ||
| 800 | struct dma_device *dma; | ||
| 801 | struct dma_chan *c; | ||
| 802 | struct ioat_chan_common *chan; | ||
| 803 | int err; | ||
| 804 | |||
| 805 | device->enumerate_channels = ioat2_enumerate_channels; | ||
| 806 | dma = &device->common; | ||
| 807 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | ||
| 808 | dma->device_issue_pending = ioat2_issue_pending; | ||
| 809 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | ||
| 810 | dma->device_free_chan_resources = ioat2_free_chan_resources; | ||
| 811 | dma->device_is_tx_complete = ioat2_is_complete; | ||
| 812 | |||
| 813 | err = ioat_probe(device); | ||
| 814 | if (err) | ||
| 815 | return err; | ||
| 816 | ioat_set_tcp_copy_break(2048); | ||
| 817 | |||
| 818 | list_for_each_entry(c, &dma->channels, device_node) { | ||
| 819 | chan = to_chan_common(c); | ||
| 820 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | ||
| 821 | chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
| 822 | } | ||
| 823 | |||
| 824 | err = ioat_register(device); | ||
| 825 | if (err) | ||
| 826 | return err; | ||
| 827 | if (dca) | ||
| 828 | device->dca = ioat2_dca_init(pdev, device->reg_base); | ||
| 829 | |||
| 830 | return err; | ||
| 831 | } | ||
| 832 | |||
| 833 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
| 834 | { | ||
| 835 | struct pci_dev *pdev = device->pdev; | ||
| 836 | struct dma_device *dma; | ||
| 837 | struct dma_chan *c; | ||
| 838 | struct ioat_chan_common *chan; | ||
| 839 | int err; | ||
| 840 | u16 dev_id; | ||
| 841 | |||
| 842 | device->enumerate_channels = ioat2_enumerate_channels; | ||
| 843 | dma = &device->common; | ||
| 844 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | ||
| 845 | dma->device_issue_pending = ioat2_issue_pending; | ||
| 846 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | ||
| 847 | dma->device_free_chan_resources = ioat2_free_chan_resources; | ||
| 848 | dma->device_is_tx_complete = ioat2_is_complete; | ||
| 849 | |||
| 850 | /* -= IOAT ver.3 workarounds =- */ | ||
| 851 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
| 852 | * that can cause stability issues for IOAT ver.3 | ||
| 853 | */ | ||
| 854 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
| 855 | |||
| 856 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
| 857 | * (workaround for spurious config parity error after restart) | ||
| 858 | */ | ||
| 859 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
| 860 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
| 861 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
| 862 | |||
| 863 | err = ioat_probe(device); | ||
| 864 | if (err) | ||
| 865 | return err; | ||
| 866 | ioat_set_tcp_copy_break(262144); | ||
| 867 | |||
| 868 | list_for_each_entry(c, &dma->channels, device_node) { | ||
| 869 | chan = to_chan_common(c); | ||
| 870 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
| 871 | chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
| 872 | } | ||
| 873 | |||
| 874 | err = ioat_register(device); | ||
| 875 | if (err) | ||
| 876 | return err; | ||
| 877 | if (dca) | ||
| 878 | device->dca = ioat3_dca_init(pdev, device->reg_base); | ||
| 879 | |||
| 880 | return err; | ||
| 881 | } | ||
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h new file mode 100644 index 00000000000..9baa3d6065f --- /dev/null +++ b/drivers/dma/ioat/dma_v2.h | |||
| @@ -0,0 +1,146 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License as published by the Free | ||
| 6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 7 | * any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | ||
| 19 | * file called COPYING. | ||
| 20 | */ | ||
| 21 | #ifndef IOATDMA_V2_H | ||
| 22 | #define IOATDMA_V2_H | ||
| 23 | |||
| 24 | #include <linux/dmaengine.h> | ||
| 25 | #include "dma.h" | ||
| 26 | #include "hw.h" | ||
| 27 | |||
| 28 | |||
| 29 | extern int ioat_pending_level; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * workaround for IOAT ver.3.0 null descriptor issue | ||
| 33 | * (channel returns error when size is 0) | ||
| 34 | */ | ||
| 35 | #define NULL_DESC_BUFFER_SIZE 1 | ||
| 36 | |||
| 37 | #define IOAT_MAX_ORDER 16 | ||
| 38 | #define ioat_get_alloc_order() \ | ||
| 39 | (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) | ||
| 40 | #define ioat_get_max_alloc_order() \ | ||
| 41 | (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) | ||
| 42 | |||
| 43 | /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes | ||
| 44 | * @base: common ioat channel parameters | ||
| 45 | * @xfercap_log; log2 of channel max transfer length (for fast division) | ||
| 46 | * @head: allocated index | ||
| 47 | * @issued: hardware notification point | ||
| 48 | * @tail: cleanup index | ||
| 49 | * @pending: lock free indicator for issued != head | ||
| 50 | * @dmacount: identical to 'head' except for occasionally resetting to zero | ||
| 51 | * @alloc_order: log2 of the number of allocated descriptors | ||
| 52 | * @ring: software ring buffer implementation of hardware ring | ||
| 53 | * @ring_lock: protects ring attributes | ||
| 54 | */ | ||
| 55 | struct ioat2_dma_chan { | ||
| 56 | struct ioat_chan_common base; | ||
| 57 | size_t xfercap_log; | ||
| 58 | u16 head; | ||
| 59 | u16 issued; | ||
| 60 | u16 tail; | ||
| 61 | u16 dmacount; | ||
| 62 | u16 alloc_order; | ||
| 63 | int pending; | ||
| 64 | struct ioat_ring_ent **ring; | ||
| 65 | spinlock_t ring_lock; | ||
| 66 | }; | ||
| 67 | |||
| 68 | static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | ||
| 69 | { | ||
| 70 | struct ioat_chan_common *chan = to_chan_common(c); | ||
| 71 | |||
| 72 | return container_of(chan, struct ioat2_dma_chan, base); | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) | ||
| 76 | { | ||
| 77 | return (1 << ioat->alloc_order) - 1; | ||
| 78 | } | ||
| 79 | |||
| 80 | /* count of descriptors in flight with the engine */ | ||
| 81 | static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) | ||
| 82 | { | ||
| 83 | return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); | ||
| 84 | } | ||
| 85 | |||
| 86 | /* count of descriptors pending submission to hardware */ | ||
| 87 | static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) | ||
| 88 | { | ||
| 89 | return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) | ||
| 93 | { | ||
| 94 | u16 num_descs = ioat2_ring_mask(ioat) + 1; | ||
| 95 | u16 active = ioat2_ring_active(ioat); | ||
| 96 | |||
| 97 | BUG_ON(active > num_descs); | ||
| 98 | |||
| 99 | return num_descs - active; | ||
| 100 | } | ||
| 101 | |||
| 102 | /* assumes caller already checked space */ | ||
| 103 | static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) | ||
| 104 | { | ||
| 105 | ioat->head += len; | ||
| 106 | return ioat->head - len; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) | ||
| 110 | { | ||
| 111 | u16 num_descs = len >> ioat->xfercap_log; | ||
| 112 | |||
| 113 | num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); | ||
| 114 | return num_descs; | ||
| 115 | } | ||
| 116 | |||
| 117 | struct ioat_ring_ent { | ||
| 118 | struct ioat_dma_descriptor *hw; | ||
| 119 | struct dma_async_tx_descriptor txd; | ||
| 120 | size_t len; | ||
| 121 | #ifdef DEBUG | ||
| 122 | int id; | ||
| 123 | #endif | ||
| 124 | }; | ||
| 125 | |||
| 126 | static inline struct ioat_ring_ent * | ||
| 127 | ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) | ||
| 128 | { | ||
| 129 | return ioat->ring[idx & ioat2_ring_mask(ioat)]; | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | ||
| 133 | { | ||
| 134 | struct ioat_chan_common *chan = &ioat->base; | ||
| 135 | |||
| 136 | writel(addr & 0x00000000FFFFFFFF, | ||
| 137 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
| 138 | writel(addr >> 32, | ||
| 139 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
| 140 | } | ||
| 141 | |||
| 142 | int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); | ||
| 143 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); | ||
| 144 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
| 145 | struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
| 146 | #endif /* IOATDMA_V2_H */ | ||
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioat/hw.h index afa57eef86c..7481fb13ce0 100644 --- a/drivers/dma/ioatdma_hw.h +++ b/drivers/dma/ioat/hw.h | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | /* PCI Configuration Space Values */ | 24 | /* PCI Configuration Space Values */ |
| 25 | #define IOAT_PCI_VID 0x8086 | 25 | #define IOAT_PCI_VID 0x8086 |
| 26 | #define IOAT_MMIO_BAR 0 | ||
| 26 | 27 | ||
| 27 | /* CB device ID's */ | 28 | /* CB device ID's */ |
| 28 | #define IOAT_PCI_DID_5000 0x1A38 | 29 | #define IOAT_PCI_DID_5000 0x1A38 |
| @@ -39,32 +40,34 @@ | |||
| 39 | 40 | ||
| 40 | struct ioat_dma_descriptor { | 41 | struct ioat_dma_descriptor { |
| 41 | uint32_t size; | 42 | uint32_t size; |
| 42 | uint32_t ctl; | 43 | union { |
| 44 | uint32_t ctl; | ||
| 45 | struct { | ||
| 46 | unsigned int int_en:1; | ||
| 47 | unsigned int src_snoop_dis:1; | ||
| 48 | unsigned int dest_snoop_dis:1; | ||
| 49 | unsigned int compl_write:1; | ||
| 50 | unsigned int fence:1; | ||
| 51 | unsigned int null:1; | ||
| 52 | unsigned int src_brk:1; | ||
| 53 | unsigned int dest_brk:1; | ||
| 54 | unsigned int bundle:1; | ||
| 55 | unsigned int dest_dca:1; | ||
| 56 | unsigned int hint:1; | ||
| 57 | unsigned int rsvd2:13; | ||
| 58 | unsigned int op:8; | ||
| 59 | } ctl_f; | ||
| 60 | }; | ||
| 43 | uint64_t src_addr; | 61 | uint64_t src_addr; |
| 44 | uint64_t dst_addr; | 62 | uint64_t dst_addr; |
| 45 | uint64_t next; | 63 | uint64_t next; |
| 46 | uint64_t rsv1; | 64 | uint64_t rsv1; |
| 47 | uint64_t rsv2; | 65 | uint64_t rsv2; |
| 48 | uint64_t user1; | 66 | /* store some driver data in an unused portion of the descriptor */ |
| 67 | union { | ||
| 68 | uint64_t user1; | ||
| 69 | uint64_t tx_cnt; | ||
| 70 | }; | ||
| 49 | uint64_t user2; | 71 | uint64_t user2; |
| 50 | }; | 72 | }; |
| 51 | |||
| 52 | #define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001 | ||
| 53 | #define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002 | ||
| 54 | #define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004 | ||
| 55 | #define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 | ||
| 56 | #define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 | ||
| 57 | #define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 | ||
| 58 | #define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 | ||
| 59 | #define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 | ||
| 60 | #define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 | ||
| 61 | #define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 | ||
| 62 | #define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 | ||
| 63 | |||
| 64 | #define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 | ||
| 65 | #define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 | ||
| 66 | |||
| 67 | #define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 | ||
| 68 | #define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 | ||
| 69 | |||
| 70 | #endif | 73 | #endif |
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat/pci.c index 2225bb6ba3d..c4e43226925 100644 --- a/drivers/dma/ioat.c +++ b/drivers/dma/ioat/pci.c | |||
| @@ -30,9 +30,10 @@ | |||
| 30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
| 31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
| 32 | #include <linux/dca.h> | 32 | #include <linux/dca.h> |
| 33 | #include "ioatdma.h" | 33 | #include "dma.h" |
| 34 | #include "ioatdma_registers.h" | 34 | #include "dma_v2.h" |
| 35 | #include "ioatdma_hw.h" | 35 | #include "registers.h" |
| 36 | #include "hw.h" | ||
| 36 | 37 | ||
| 37 | MODULE_VERSION(IOAT_DMA_VERSION); | 38 | MODULE_VERSION(IOAT_DMA_VERSION); |
| 38 | MODULE_LICENSE("GPL"); | 39 | MODULE_LICENSE("GPL"); |
| @@ -60,119 +61,101 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
| 60 | { 0, } | 61 | { 0, } |
| 61 | }; | 62 | }; |
| 62 | 63 | ||
| 63 | struct ioat_device { | 64 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, |
| 64 | struct pci_dev *pdev; | 65 | const struct pci_device_id *id); |
| 65 | void __iomem *iobase; | ||
| 66 | struct ioatdma_device *dma; | ||
| 67 | struct dca_provider *dca; | ||
| 68 | }; | ||
| 69 | |||
| 70 | static int __devinit ioat_probe(struct pci_dev *pdev, | ||
| 71 | const struct pci_device_id *id); | ||
| 72 | static void __devexit ioat_remove(struct pci_dev *pdev); | 66 | static void __devexit ioat_remove(struct pci_dev *pdev); |
| 73 | 67 | ||
| 74 | static int ioat_dca_enabled = 1; | 68 | static int ioat_dca_enabled = 1; |
| 75 | module_param(ioat_dca_enabled, int, 0644); | 69 | module_param(ioat_dca_enabled, int, 0644); |
| 76 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 70 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
| 77 | 71 | ||
| 72 | #define DRV_NAME "ioatdma" | ||
| 73 | |||
| 78 | static struct pci_driver ioat_pci_driver = { | 74 | static struct pci_driver ioat_pci_driver = { |
| 79 | .name = "ioatdma", | 75 | .name = DRV_NAME, |
| 80 | .id_table = ioat_pci_tbl, | 76 | .id_table = ioat_pci_tbl, |
| 81 | .probe = ioat_probe, | 77 | .probe = ioat_pci_probe, |
| 82 | .remove = __devexit_p(ioat_remove), | 78 | .remove = __devexit_p(ioat_remove), |
| 83 | }; | 79 | }; |
| 84 | 80 | ||
| 85 | static int __devinit ioat_probe(struct pci_dev *pdev, | 81 | static struct ioatdma_device * |
| 86 | const struct pci_device_id *id) | 82 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) |
| 87 | { | 83 | { |
| 88 | void __iomem *iobase; | 84 | struct device *dev = &pdev->dev; |
| 89 | struct ioat_device *device; | 85 | struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); |
| 90 | unsigned long mmio_start, mmio_len; | 86 | |
| 87 | if (!d) | ||
| 88 | return NULL; | ||
| 89 | d->pdev = pdev; | ||
| 90 | d->reg_base = iobase; | ||
| 91 | return d; | ||
| 92 | } | ||
| 93 | |||
| 94 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
| 95 | { | ||
| 96 | void __iomem * const *iomap; | ||
| 97 | struct device *dev = &pdev->dev; | ||
| 98 | struct ioatdma_device *device; | ||
| 91 | int err; | 99 | int err; |
| 92 | 100 | ||
| 93 | err = pci_enable_device(pdev); | 101 | err = pcim_enable_device(pdev); |
| 94 | if (err) | 102 | if (err) |
| 95 | goto err_enable_device; | 103 | return err; |
| 96 | 104 | ||
| 97 | err = pci_request_regions(pdev, ioat_pci_driver.name); | 105 | err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); |
| 98 | if (err) | 106 | if (err) |
| 99 | goto err_request_regions; | 107 | return err; |
| 108 | iomap = pcim_iomap_table(pdev); | ||
| 109 | if (!iomap) | ||
| 110 | return -ENOMEM; | ||
| 100 | 111 | ||
| 101 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 112 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
| 102 | if (err) | 113 | if (err) |
| 103 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 114 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 104 | if (err) | 115 | if (err) |
| 105 | goto err_set_dma_mask; | 116 | return err; |
| 106 | 117 | ||
| 107 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 118 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
| 108 | if (err) | 119 | if (err) |
| 109 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 120 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 110 | if (err) | 121 | if (err) |
| 111 | goto err_set_dma_mask; | 122 | return err; |
| 112 | |||
| 113 | mmio_start = pci_resource_start(pdev, 0); | ||
| 114 | mmio_len = pci_resource_len(pdev, 0); | ||
| 115 | iobase = ioremap(mmio_start, mmio_len); | ||
| 116 | if (!iobase) { | ||
| 117 | err = -ENOMEM; | ||
| 118 | goto err_ioremap; | ||
| 119 | } | ||
| 120 | 123 | ||
| 121 | device = kzalloc(sizeof(*device), GFP_KERNEL); | 124 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); |
| 122 | if (!device) { | 125 | if (!device) |
| 123 | err = -ENOMEM; | 126 | return -ENOMEM; |
| 124 | goto err_kzalloc; | ||
| 125 | } | ||
| 126 | device->pdev = pdev; | ||
| 127 | pci_set_drvdata(pdev, device); | ||
| 128 | device->iobase = iobase; | ||
| 129 | 127 | ||
| 130 | pci_set_master(pdev); | 128 | pci_set_master(pdev); |
| 131 | 129 | ||
| 132 | switch (readb(iobase + IOAT_VER_OFFSET)) { | 130 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); |
| 133 | case IOAT_VER_1_2: | 131 | if (!device) |
| 134 | device->dma = ioat_dma_probe(pdev, iobase); | 132 | return -ENOMEM; |
| 135 | if (device->dma && ioat_dca_enabled) | 133 | pci_set_drvdata(pdev, device); |
| 136 | device->dca = ioat_dca_init(pdev, iobase); | ||
| 137 | break; | ||
| 138 | case IOAT_VER_2_0: | ||
| 139 | device->dma = ioat_dma_probe(pdev, iobase); | ||
| 140 | if (device->dma && ioat_dca_enabled) | ||
| 141 | device->dca = ioat2_dca_init(pdev, iobase); | ||
| 142 | break; | ||
| 143 | case IOAT_VER_3_0: | ||
| 144 | device->dma = ioat_dma_probe(pdev, iobase); | ||
| 145 | if (device->dma && ioat_dca_enabled) | ||
| 146 | device->dca = ioat3_dca_init(pdev, iobase); | ||
| 147 | break; | ||
| 148 | default: | ||
| 149 | err = -ENODEV; | ||
| 150 | break; | ||
| 151 | } | ||
| 152 | if (!device->dma) | ||
| 153 | err = -ENODEV; | ||
| 154 | 134 | ||
| 155 | if (err) | 135 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
| 156 | goto err_version; | 136 | if (device->version == IOAT_VER_1_2) |
| 137 | err = ioat1_dma_probe(device, ioat_dca_enabled); | ||
| 138 | else if (device->version == IOAT_VER_2_0) | ||
| 139 | err = ioat2_dma_probe(device, ioat_dca_enabled); | ||
| 140 | else if (device->version >= IOAT_VER_3_0) | ||
| 141 | err = ioat3_dma_probe(device, ioat_dca_enabled); | ||
| 142 | else | ||
| 143 | return -ENODEV; | ||
| 144 | |||
| 145 | if (err) { | ||
| 146 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | ||
| 147 | return -ENODEV; | ||
| 148 | } | ||
| 157 | 149 | ||
| 158 | return 0; | 150 | return 0; |
| 159 | |||
| 160 | err_version: | ||
| 161 | kfree(device); | ||
| 162 | err_kzalloc: | ||
| 163 | iounmap(iobase); | ||
| 164 | err_ioremap: | ||
| 165 | err_set_dma_mask: | ||
| 166 | pci_release_regions(pdev); | ||
| 167 | pci_disable_device(pdev); | ||
| 168 | err_request_regions: | ||
| 169 | err_enable_device: | ||
| 170 | return err; | ||
| 171 | } | 151 | } |
| 172 | 152 | ||
| 173 | static void __devexit ioat_remove(struct pci_dev *pdev) | 153 | static void __devexit ioat_remove(struct pci_dev *pdev) |
| 174 | { | 154 | { |
| 175 | struct ioat_device *device = pci_get_drvdata(pdev); | 155 | struct ioatdma_device *device = pci_get_drvdata(pdev); |
| 156 | |||
| 157 | if (!device) | ||
| 158 | return; | ||
| 176 | 159 | ||
| 177 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | 160 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
| 178 | if (device->dca) { | 161 | if (device->dca) { |
| @@ -180,13 +163,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev) | |||
| 180 | free_dca_provider(device->dca); | 163 | free_dca_provider(device->dca); |
| 181 | device->dca = NULL; | 164 | device->dca = NULL; |
| 182 | } | 165 | } |
| 183 | 166 | ioat_dma_remove(device); | |
| 184 | if (device->dma) { | ||
| 185 | ioat_dma_remove(device->dma); | ||
| 186 | device->dma = NULL; | ||
| 187 | } | ||
| 188 | |||
| 189 | kfree(device); | ||
| 190 | } | 167 | } |
| 191 | 168 | ||
| 192 | static int __init ioat_init_module(void) | 169 | static int __init ioat_init_module(void) |
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioat/registers.h index 49bc277424f..e4334a19538 100644 --- a/drivers/dma/ioatdma_registers.h +++ b/drivers/dma/ioat/registers.h | |||
| @@ -75,7 +75,11 @@ | |||
| 75 | #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 | 75 | #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 |
| 76 | #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 | 76 | #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 |
| 77 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 | 77 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 |
| 78 | #define IOAT_CHANCTRL_INT_DISABLE 0x0001 | 78 | #define IOAT_CHANCTRL_INT_REARM 0x0001 |
| 79 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ | ||
| 80 | IOAT_CHANCTRL_ERR_COMPLETION_EN |\ | ||
| 81 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\ | ||
| 82 | IOAT_CHANCTRL_ERR_INT_EN) | ||
| 79 | 83 | ||
| 80 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ | 84 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ |
| 81 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ | 85 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ |
| @@ -94,14 +98,14 @@ | |||
| 94 | #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C | 98 | #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C |
| 95 | #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ | 99 | #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ |
| 96 | ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) | 100 | ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) |
| 97 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F | 101 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) |
| 98 | #define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 | 102 | #define IOAT_CHANSTS_SOFT_ERR 0x10ULL |
| 99 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 | 103 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL |
| 100 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 | 104 | #define IOAT_CHANSTS_STATUS 0x7ULL |
| 101 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 | 105 | #define IOAT_CHANSTS_ACTIVE 0x0 |
| 102 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 | 106 | #define IOAT_CHANSTS_DONE 0x1 |
| 103 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 | 107 | #define IOAT_CHANSTS_SUSPENDED 0x2 |
| 104 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 | 108 | #define IOAT_CHANSTS_HALTED 0x3 |
| 105 | 109 | ||
| 106 | 110 | ||
| 107 | 111 | ||
| @@ -204,18 +208,18 @@ | |||
| 204 | #define IOAT_CDAR_OFFSET_HIGH 0x24 | 208 | #define IOAT_CDAR_OFFSET_HIGH 0x24 |
| 205 | 209 | ||
| 206 | #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ | 210 | #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ |
| 207 | #define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 | 211 | #define IOAT_CHANERR_SRC_ADDR_ERR 0x0001 |
| 208 | #define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 | 212 | #define IOAT_CHANERR_DEST_ADDR_ERR 0x0002 |
| 209 | #define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 | 213 | #define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004 |
| 210 | #define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 | 214 | #define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008 |
| 211 | #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 | 215 | #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 |
| 212 | #define IOAT_CHANERR_CHANCMD_ERR 0x0020 | 216 | #define IOAT_CHANERR_CHANCMD_ERR 0x0020 |
| 213 | #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 | 217 | #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 |
| 214 | #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 | 218 | #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 |
| 215 | #define IOAT_CHANERR_READ_DATA_ERR 0x0100 | 219 | #define IOAT_CHANERR_READ_DATA_ERR 0x0100 |
| 216 | #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 | 220 | #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 |
| 217 | #define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 | 221 | #define IOAT_CHANERR_CONTROL_ERR 0x0400 |
| 218 | #define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 | 222 | #define IOAT_CHANERR_LENGTH_ERR 0x0800 |
| 219 | #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 | 223 | #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 |
| 220 | #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 | 224 | #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 |
| 221 | #define IOAT_CHANERR_SOFT_ERR 0x4000 | 225 | #define IOAT_CHANERR_SOFT_ERR 0x4000 |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c deleted file mode 100644 index a600fc0f796..00000000000 --- a/drivers/dma/ioat_dma.c +++ /dev/null | |||
| @@ -1,1741 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Intel I/OAT DMA Linux driver | ||
| 3 | * Copyright(c) 2004 - 2009 Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | ||
| 19 | * the file called "COPYING". | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | /* | ||
| 24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | ||
| 25 | * copy operations. | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/module.h> | ||
| 30 | #include <linux/pci.h> | ||
| 31 | #include <linux/interrupt.h> | ||
| 32 | #include <linux/dmaengine.h> | ||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/dma-mapping.h> | ||
| 35 | #include <linux/workqueue.h> | ||
| 36 | #include <linux/i7300_idle.h> | ||
| 37 | #include "ioatdma.h" | ||
| 38 | #include "ioatdma_registers.h" | ||
| 39 | #include "ioatdma_hw.h" | ||
| 40 | |||
| 41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) | ||
| 42 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | ||
| 43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | ||
| 44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) | ||
| 45 | |||
| 46 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | ||
| 47 | static int ioat_pending_level = 4; | ||
| 48 | module_param(ioat_pending_level, int, 0644); | ||
| 49 | MODULE_PARM_DESC(ioat_pending_level, | ||
| 50 | "high-water mark for pushing ioat descriptors (default: 4)"); | ||
| 51 | |||
| 52 | #define RESET_DELAY msecs_to_jiffies(100) | ||
| 53 | #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) | ||
| 54 | static void ioat_dma_chan_reset_part2(struct work_struct *work); | ||
| 55 | static void ioat_dma_chan_watchdog(struct work_struct *work); | ||
| 56 | |||
| 57 | /* | ||
| 58 | * workaround for IOAT ver.3.0 null descriptor issue | ||
| 59 | * (channel returns error when size is 0) | ||
| 60 | */ | ||
| 61 | #define NULL_DESC_BUFFER_SIZE 1 | ||
| 62 | |||
| 63 | /* internal functions */ | ||
| 64 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); | ||
| 65 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | ||
| 66 | |||
| 67 | static struct ioat_desc_sw * | ||
| 68 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | ||
| 69 | static struct ioat_desc_sw * | ||
| 70 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | ||
| 71 | |||
| 72 | static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( | ||
| 73 | struct ioatdma_device *device, | ||
| 74 | int index) | ||
| 75 | { | ||
| 76 | return device->idx[index]; | ||
| 77 | } | ||
| 78 | |||
| 79 | /** | ||
| 80 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | ||
| 81 | * @irq: interrupt id | ||
| 82 | * @data: interrupt data | ||
| 83 | */ | ||
| 84 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | ||
| 85 | { | ||
| 86 | struct ioatdma_device *instance = data; | ||
| 87 | struct ioat_dma_chan *ioat_chan; | ||
| 88 | unsigned long attnstatus; | ||
| 89 | int bit; | ||
| 90 | u8 intrctrl; | ||
| 91 | |||
| 92 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 93 | |||
| 94 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
| 95 | return IRQ_NONE; | ||
| 96 | |||
| 97 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
| 98 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 99 | return IRQ_NONE; | ||
| 100 | } | ||
| 101 | |||
| 102 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
| 103 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | ||
| 104 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); | ||
| 105 | tasklet_schedule(&ioat_chan->cleanup_task); | ||
| 106 | } | ||
| 107 | |||
| 108 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 109 | return IRQ_HANDLED; | ||
| 110 | } | ||
| 111 | |||
| 112 | /** | ||
| 113 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | ||
| 114 | * @irq: interrupt id | ||
| 115 | * @data: interrupt data | ||
| 116 | */ | ||
| 117 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||
| 118 | { | ||
| 119 | struct ioat_dma_chan *ioat_chan = data; | ||
| 120 | |||
| 121 | tasklet_schedule(&ioat_chan->cleanup_task); | ||
| 122 | |||
| 123 | return IRQ_HANDLED; | ||
| 124 | } | ||
| 125 | |||
| 126 | static void ioat_dma_cleanup_tasklet(unsigned long data); | ||
| 127 | |||
| 128 | /** | ||
| 129 | * ioat_dma_enumerate_channels - find and initialize the device's channels | ||
| 130 | * @device: the device to be enumerated | ||
| 131 | */ | ||
| 132 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | ||
| 133 | { | ||
| 134 | u8 xfercap_scale; | ||
| 135 | u32 xfercap; | ||
| 136 | int i; | ||
| 137 | struct ioat_dma_chan *ioat_chan; | ||
| 138 | |||
| 139 | /* | ||
| 140 | * IOAT ver.3 workarounds | ||
| 141 | */ | ||
| 142 | if (device->version == IOAT_VER_3_0) { | ||
| 143 | u32 chan_err_mask; | ||
| 144 | u16 dev_id; | ||
| 145 | u32 dmauncerrsts; | ||
| 146 | |||
| 147 | /* | ||
| 148 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
| 149 | * that can cause stability issues for IOAT ver.3 | ||
| 150 | */ | ||
| 151 | chan_err_mask = 0x3E07; | ||
| 152 | pci_write_config_dword(device->pdev, | ||
| 153 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | ||
| 154 | chan_err_mask); | ||
| 155 | |||
| 156 | /* | ||
| 157 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
| 158 | * (workaround for spurious config parity error after restart) | ||
| 159 | */ | ||
| 160 | pci_read_config_word(device->pdev, | ||
| 161 | IOAT_PCI_DEVICE_ID_OFFSET, | ||
| 162 | &dev_id); | ||
| 163 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
| 164 | dmauncerrsts = 0x10; | ||
| 165 | pci_write_config_dword(device->pdev, | ||
| 166 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
| 167 | dmauncerrsts); | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
| 172 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
| 173 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | ||
| 174 | |||
| 175 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
| 176 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { | ||
| 177 | device->common.chancnt--; | ||
| 178 | } | ||
| 179 | #endif | ||
| 180 | for (i = 0; i < device->common.chancnt; i++) { | ||
| 181 | ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); | ||
| 182 | if (!ioat_chan) { | ||
| 183 | device->common.chancnt = i; | ||
| 184 | break; | ||
| 185 | } | ||
| 186 | |||
| 187 | ioat_chan->device = device; | ||
| 188 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); | ||
| 189 | ioat_chan->xfercap = xfercap; | ||
| 190 | ioat_chan->desccount = 0; | ||
| 191 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); | ||
| 192 | if (ioat_chan->device->version == IOAT_VER_2_0) | ||
| 193 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | | ||
| 194 | IOAT_DMA_DCA_ANY_CPU, | ||
| 195 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
| 196 | else if (ioat_chan->device->version == IOAT_VER_3_0) | ||
| 197 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
| 198 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
| 199 | spin_lock_init(&ioat_chan->cleanup_lock); | ||
| 200 | spin_lock_init(&ioat_chan->desc_lock); | ||
| 201 | INIT_LIST_HEAD(&ioat_chan->free_desc); | ||
| 202 | INIT_LIST_HEAD(&ioat_chan->used_desc); | ||
| 203 | /* This should be made common somewhere in dmaengine.c */ | ||
| 204 | ioat_chan->common.device = &device->common; | ||
| 205 | list_add_tail(&ioat_chan->common.device_node, | ||
| 206 | &device->common.channels); | ||
| 207 | device->idx[i] = ioat_chan; | ||
| 208 | tasklet_init(&ioat_chan->cleanup_task, | ||
| 209 | ioat_dma_cleanup_tasklet, | ||
| 210 | (unsigned long) ioat_chan); | ||
| 211 | tasklet_disable(&ioat_chan->cleanup_task); | ||
| 212 | } | ||
| 213 | return device->common.chancnt; | ||
| 214 | } | ||
| 215 | |||
| 216 | /** | ||
| 217 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
| 218 | * descriptors to hw | ||
| 219 | * @chan: DMA channel handle | ||
| 220 | */ | ||
| 221 | static inline void __ioat1_dma_memcpy_issue_pending( | ||
| 222 | struct ioat_dma_chan *ioat_chan) | ||
| 223 | { | ||
| 224 | ioat_chan->pending = 0; | ||
| 225 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | ||
| 226 | } | ||
| 227 | |||
| 228 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
| 229 | { | ||
| 230 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 231 | |||
| 232 | if (ioat_chan->pending > 0) { | ||
| 233 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 234 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | ||
| 235 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 236 | } | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline void __ioat2_dma_memcpy_issue_pending( | ||
| 240 | struct ioat_dma_chan *ioat_chan) | ||
| 241 | { | ||
| 242 | ioat_chan->pending = 0; | ||
| 243 | writew(ioat_chan->dmacount, | ||
| 244 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
| 245 | } | ||
| 246 | |||
| 247 | static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
| 248 | { | ||
| 249 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 250 | |||
| 251 | if (ioat_chan->pending > 0) { | ||
| 252 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 253 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
| 254 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | |||
| 259 | /** | ||
| 260 | * ioat_dma_chan_reset_part2 - reinit the channel after a reset | ||
| 261 | */ | ||
| 262 | static void ioat_dma_chan_reset_part2(struct work_struct *work) | ||
| 263 | { | ||
| 264 | struct ioat_dma_chan *ioat_chan = | ||
| 265 | container_of(work, struct ioat_dma_chan, work.work); | ||
| 266 | struct ioat_desc_sw *desc; | ||
| 267 | |||
| 268 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
| 269 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 270 | |||
| 271 | ioat_chan->completion_virt->low = 0; | ||
| 272 | ioat_chan->completion_virt->high = 0; | ||
| 273 | ioat_chan->pending = 0; | ||
| 274 | |||
| 275 | /* | ||
| 276 | * count the descriptors waiting, and be sure to do it | ||
| 277 | * right for both the CB1 line and the CB2 ring | ||
| 278 | */ | ||
| 279 | ioat_chan->dmacount = 0; | ||
| 280 | if (ioat_chan->used_desc.prev) { | ||
| 281 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
| 282 | do { | ||
| 283 | ioat_chan->dmacount++; | ||
| 284 | desc = to_ioat_desc(desc->node.next); | ||
| 285 | } while (&desc->node != ioat_chan->used_desc.next); | ||
| 286 | } | ||
| 287 | |||
| 288 | /* | ||
| 289 | * write the new starting descriptor address | ||
| 290 | * this puts channel engine into ARMED state | ||
| 291 | */ | ||
| 292 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
| 293 | switch (ioat_chan->device->version) { | ||
| 294 | case IOAT_VER_1_2: | ||
| 295 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
| 296 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
| 297 | writel(((u64) desc->async_tx.phys) >> 32, | ||
| 298 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
| 299 | |||
| 300 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | ||
| 301 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
| 302 | break; | ||
| 303 | case IOAT_VER_2_0: | ||
| 304 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
| 305 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
| 306 | writel(((u64) desc->async_tx.phys) >> 32, | ||
| 307 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
| 308 | |||
| 309 | /* tell the engine to go with what's left to be done */ | ||
| 310 | writew(ioat_chan->dmacount, | ||
| 311 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
| 312 | |||
| 313 | break; | ||
| 314 | } | ||
| 315 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 316 | "chan%d reset - %d descs waiting, %d total desc\n", | ||
| 317 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
| 318 | |||
| 319 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 320 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
| 321 | } | ||
| 322 | |||
| 323 | /** | ||
| 324 | * ioat_dma_reset_channel - restart a channel | ||
| 325 | * @ioat_chan: IOAT DMA channel handle | ||
| 326 | */ | ||
| 327 | static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) | ||
| 328 | { | ||
| 329 | u32 chansts, chanerr; | ||
| 330 | |||
| 331 | if (!ioat_chan->used_desc.prev) | ||
| 332 | return; | ||
| 333 | |||
| 334 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 335 | chansts = (ioat_chan->completion_virt->low | ||
| 336 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | ||
| 337 | if (chanerr) { | ||
| 338 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 339 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
| 340 | chan_num(ioat_chan), chansts, chanerr); | ||
| 341 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 342 | } | ||
| 343 | |||
| 344 | /* | ||
| 345 | * whack it upside the head with a reset | ||
| 346 | * and wait for things to settle out. | ||
| 347 | * force the pending count to a really big negative | ||
| 348 | * to make sure no one forces an issue_pending | ||
| 349 | * while we're waiting. | ||
| 350 | */ | ||
| 351 | |||
| 352 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 353 | ioat_chan->pending = INT_MIN; | ||
| 354 | writeb(IOAT_CHANCMD_RESET, | ||
| 355 | ioat_chan->reg_base | ||
| 356 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
| 357 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 358 | |||
| 359 | /* schedule the 2nd half instead of sleeping a long time */ | ||
| 360 | schedule_delayed_work(&ioat_chan->work, RESET_DELAY); | ||
| 361 | } | ||
| 362 | |||
| 363 | /** | ||
| 364 | * ioat_dma_chan_watchdog - watch for stuck channels | ||
| 365 | */ | ||
| 366 | static void ioat_dma_chan_watchdog(struct work_struct *work) | ||
| 367 | { | ||
| 368 | struct ioatdma_device *device = | ||
| 369 | container_of(work, struct ioatdma_device, work.work); | ||
| 370 | struct ioat_dma_chan *ioat_chan; | ||
| 371 | int i; | ||
| 372 | |||
| 373 | union { | ||
| 374 | u64 full; | ||
| 375 | struct { | ||
| 376 | u32 low; | ||
| 377 | u32 high; | ||
| 378 | }; | ||
| 379 | } completion_hw; | ||
| 380 | unsigned long compl_desc_addr_hw; | ||
| 381 | |||
| 382 | for (i = 0; i < device->common.chancnt; i++) { | ||
| 383 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
| 384 | |||
| 385 | if (ioat_chan->device->version == IOAT_VER_1_2 | ||
| 386 | /* have we started processing anything yet */ | ||
| 387 | && ioat_chan->last_completion | ||
| 388 | /* have we completed any since last watchdog cycle? */ | ||
| 389 | && (ioat_chan->last_completion == | ||
| 390 | ioat_chan->watchdog_completion) | ||
| 391 | /* has TCP stuck on one cookie since last watchdog? */ | ||
| 392 | && (ioat_chan->watchdog_tcp_cookie == | ||
| 393 | ioat_chan->watchdog_last_tcp_cookie) | ||
| 394 | && (ioat_chan->watchdog_tcp_cookie != | ||
| 395 | ioat_chan->completed_cookie) | ||
| 396 | /* is there something in the chain to be processed? */ | ||
| 397 | /* CB1 chain always has at least the last one processed */ | ||
| 398 | && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) | ||
| 399 | && ioat_chan->pending == 0) { | ||
| 400 | |||
| 401 | /* | ||
| 402 | * check CHANSTS register for completed | ||
| 403 | * descriptor address. | ||
| 404 | * if it is different than completion writeback, | ||
| 405 | * it is not zero | ||
| 406 | * and it has changed since the last watchdog | ||
| 407 | * we can assume that channel | ||
| 408 | * is still working correctly | ||
| 409 | * and the problem is in completion writeback. | ||
| 410 | * update completion writeback | ||
| 411 | * with actual CHANSTS value | ||
| 412 | * else | ||
| 413 | * try resetting the channel | ||
| 414 | */ | ||
| 415 | |||
| 416 | completion_hw.low = readl(ioat_chan->reg_base + | ||
| 417 | IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); | ||
| 418 | completion_hw.high = readl(ioat_chan->reg_base + | ||
| 419 | IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); | ||
| 420 | #if (BITS_PER_LONG == 64) | ||
| 421 | compl_desc_addr_hw = | ||
| 422 | completion_hw.full | ||
| 423 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
| 424 | #else | ||
| 425 | compl_desc_addr_hw = | ||
| 426 | completion_hw.low & IOAT_LOW_COMPLETION_MASK; | ||
| 427 | #endif | ||
| 428 | |||
| 429 | if ((compl_desc_addr_hw != 0) | ||
| 430 | && (compl_desc_addr_hw != ioat_chan->watchdog_completion) | ||
| 431 | && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { | ||
| 432 | ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; | ||
| 433 | ioat_chan->completion_virt->low = completion_hw.low; | ||
| 434 | ioat_chan->completion_virt->high = completion_hw.high; | ||
| 435 | } else { | ||
| 436 | ioat_dma_reset_channel(ioat_chan); | ||
| 437 | ioat_chan->watchdog_completion = 0; | ||
| 438 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
| 439 | } | ||
| 440 | |||
| 441 | /* | ||
| 442 | * for version 2.0 if there are descriptors yet to be processed | ||
| 443 | * and the last completed hasn't changed since the last watchdog | ||
| 444 | * if they haven't hit the pending level | ||
| 445 | * issue the pending to push them through | ||
| 446 | * else | ||
| 447 | * try resetting the channel | ||
| 448 | */ | ||
| 449 | } else if (ioat_chan->device->version == IOAT_VER_2_0 | ||
| 450 | && ioat_chan->used_desc.prev | ||
| 451 | && ioat_chan->last_completion | ||
| 452 | && ioat_chan->last_completion == ioat_chan->watchdog_completion) { | ||
| 453 | |||
| 454 | if (ioat_chan->pending < ioat_pending_level) | ||
| 455 | ioat2_dma_memcpy_issue_pending(&ioat_chan->common); | ||
| 456 | else { | ||
| 457 | ioat_dma_reset_channel(ioat_chan); | ||
| 458 | ioat_chan->watchdog_completion = 0; | ||
| 459 | } | ||
| 460 | } else { | ||
| 461 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
| 462 | ioat_chan->watchdog_completion | ||
| 463 | = ioat_chan->last_completion; | ||
| 464 | } | ||
| 465 | |||
| 466 | ioat_chan->watchdog_last_tcp_cookie = | ||
| 467 | ioat_chan->watchdog_tcp_cookie; | ||
| 468 | } | ||
| 469 | |||
| 470 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
| 471 | } | ||
| 472 | |||
| 473 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 474 | { | ||
| 475 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | ||
| 476 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | ||
| 477 | struct ioat_desc_sw *prev, *new; | ||
| 478 | struct ioat_dma_descriptor *hw; | ||
| 479 | dma_cookie_t cookie; | ||
| 480 | LIST_HEAD(new_chain); | ||
| 481 | u32 copy; | ||
| 482 | size_t len; | ||
| 483 | dma_addr_t src, dst; | ||
| 484 | unsigned long orig_flags; | ||
| 485 | unsigned int desc_count = 0; | ||
| 486 | |||
| 487 | /* src and dest and len are stored in the initial descriptor */ | ||
| 488 | len = first->len; | ||
| 489 | src = first->src; | ||
| 490 | dst = first->dst; | ||
| 491 | orig_flags = first->async_tx.flags; | ||
| 492 | new = first; | ||
| 493 | |||
| 494 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 495 | prev = to_ioat_desc(ioat_chan->used_desc.prev); | ||
| 496 | prefetch(prev->hw); | ||
| 497 | do { | ||
| 498 | copy = min_t(size_t, len, ioat_chan->xfercap); | ||
| 499 | |||
| 500 | async_tx_ack(&new->async_tx); | ||
| 501 | |||
| 502 | hw = new->hw; | ||
| 503 | hw->size = copy; | ||
| 504 | hw->ctl = 0; | ||
| 505 | hw->src_addr = src; | ||
| 506 | hw->dst_addr = dst; | ||
| 507 | hw->next = 0; | ||
| 508 | |||
| 509 | /* chain together the physical address list for the HW */ | ||
| 510 | wmb(); | ||
| 511 | prev->hw->next = (u64) new->async_tx.phys; | ||
| 512 | |||
| 513 | len -= copy; | ||
| 514 | dst += copy; | ||
| 515 | src += copy; | ||
| 516 | |||
| 517 | list_add_tail(&new->node, &new_chain); | ||
| 518 | desc_count++; | ||
| 519 | prev = new; | ||
| 520 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); | ||
| 521 | |||
| 522 | if (!new) { | ||
| 523 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 524 | "tx submit failed\n"); | ||
| 525 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 526 | return -ENOMEM; | ||
| 527 | } | ||
| 528 | |||
| 529 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
| 530 | if (first->async_tx.callback) { | ||
| 531 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | ||
| 532 | if (first != new) { | ||
| 533 | /* move callback into to last desc */ | ||
| 534 | new->async_tx.callback = first->async_tx.callback; | ||
| 535 | new->async_tx.callback_param | ||
| 536 | = first->async_tx.callback_param; | ||
| 537 | first->async_tx.callback = NULL; | ||
| 538 | first->async_tx.callback_param = NULL; | ||
| 539 | } | ||
| 540 | } | ||
| 541 | |||
| 542 | new->tx_cnt = desc_count; | ||
| 543 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | ||
| 544 | |||
| 545 | /* store the original values for use in later cleanup */ | ||
| 546 | if (new != first) { | ||
| 547 | new->src = first->src; | ||
| 548 | new->dst = first->dst; | ||
| 549 | new->len = first->len; | ||
| 550 | } | ||
| 551 | |||
| 552 | /* cookie incr and addition to used_list must be atomic */ | ||
| 553 | cookie = ioat_chan->common.cookie; | ||
| 554 | cookie++; | ||
| 555 | if (cookie < 0) | ||
| 556 | cookie = 1; | ||
| 557 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | ||
| 558 | |||
| 559 | /* write address into NextDescriptor field of last desc in chain */ | ||
| 560 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = | ||
| 561 | first->async_tx.phys; | ||
| 562 | list_splice_tail(&new_chain, &ioat_chan->used_desc); | ||
| 563 | |||
| 564 | ioat_chan->dmacount += desc_count; | ||
| 565 | ioat_chan->pending += desc_count; | ||
| 566 | if (ioat_chan->pending >= ioat_pending_level) | ||
| 567 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | ||
| 568 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 569 | |||
| 570 | return cookie; | ||
| 571 | } | ||
| 572 | |||
| 573 | static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 574 | { | ||
| 575 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | ||
| 576 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | ||
| 577 | struct ioat_desc_sw *new; | ||
| 578 | struct ioat_dma_descriptor *hw; | ||
| 579 | dma_cookie_t cookie; | ||
| 580 | u32 copy; | ||
| 581 | size_t len; | ||
| 582 | dma_addr_t src, dst; | ||
| 583 | unsigned long orig_flags; | ||
| 584 | unsigned int desc_count = 0; | ||
| 585 | |||
| 586 | /* src and dest and len are stored in the initial descriptor */ | ||
| 587 | len = first->len; | ||
| 588 | src = first->src; | ||
| 589 | dst = first->dst; | ||
| 590 | orig_flags = first->async_tx.flags; | ||
| 591 | new = first; | ||
| 592 | |||
| 593 | /* | ||
| 594 | * ioat_chan->desc_lock is still in force in version 2 path | ||
| 595 | * it gets unlocked at end of this function | ||
| 596 | */ | ||
| 597 | do { | ||
| 598 | copy = min_t(size_t, len, ioat_chan->xfercap); | ||
| 599 | |||
| 600 | async_tx_ack(&new->async_tx); | ||
| 601 | |||
| 602 | hw = new->hw; | ||
| 603 | hw->size = copy; | ||
| 604 | hw->ctl = 0; | ||
| 605 | hw->src_addr = src; | ||
| 606 | hw->dst_addr = dst; | ||
| 607 | |||
| 608 | len -= copy; | ||
| 609 | dst += copy; | ||
| 610 | src += copy; | ||
| 611 | desc_count++; | ||
| 612 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); | ||
| 613 | |||
| 614 | if (!new) { | ||
| 615 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 616 | "tx submit failed\n"); | ||
| 617 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 618 | return -ENOMEM; | ||
| 619 | } | ||
| 620 | |||
| 621 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
| 622 | if (first->async_tx.callback) { | ||
| 623 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | ||
| 624 | if (first != new) { | ||
| 625 | /* move callback into to last desc */ | ||
| 626 | new->async_tx.callback = first->async_tx.callback; | ||
| 627 | new->async_tx.callback_param | ||
| 628 | = first->async_tx.callback_param; | ||
| 629 | first->async_tx.callback = NULL; | ||
| 630 | first->async_tx.callback_param = NULL; | ||
| 631 | } | ||
| 632 | } | ||
| 633 | |||
| 634 | new->tx_cnt = desc_count; | ||
| 635 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | ||
| 636 | |||
| 637 | /* store the original values for use in later cleanup */ | ||
| 638 | if (new != first) { | ||
| 639 | new->src = first->src; | ||
| 640 | new->dst = first->dst; | ||
| 641 | new->len = first->len; | ||
| 642 | } | ||
| 643 | |||
| 644 | /* cookie incr and addition to used_list must be atomic */ | ||
| 645 | cookie = ioat_chan->common.cookie; | ||
| 646 | cookie++; | ||
| 647 | if (cookie < 0) | ||
| 648 | cookie = 1; | ||
| 649 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | ||
| 650 | |||
| 651 | ioat_chan->dmacount += desc_count; | ||
| 652 | ioat_chan->pending += desc_count; | ||
| 653 | if (ioat_chan->pending >= ioat_pending_level) | ||
| 654 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
| 655 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 656 | |||
| 657 | return cookie; | ||
| 658 | } | ||
| 659 | |||
| 660 | /** | ||
| 661 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | ||
| 662 | * @ioat_chan: the channel supplying the memory pool for the descriptors | ||
| 663 | * @flags: allocation flags | ||
| 664 | */ | ||
| 665 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | ||
| 666 | struct ioat_dma_chan *ioat_chan, | ||
| 667 | gfp_t flags) | ||
| 668 | { | ||
| 669 | struct ioat_dma_descriptor *desc; | ||
| 670 | struct ioat_desc_sw *desc_sw; | ||
| 671 | struct ioatdma_device *ioatdma_device; | ||
| 672 | dma_addr_t phys; | ||
| 673 | |||
| 674 | ioatdma_device = to_ioatdma_device(ioat_chan->common.device); | ||
| 675 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | ||
| 676 | if (unlikely(!desc)) | ||
| 677 | return NULL; | ||
| 678 | |||
| 679 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | ||
| 680 | if (unlikely(!desc_sw)) { | ||
| 681 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | ||
| 682 | return NULL; | ||
| 683 | } | ||
| 684 | |||
| 685 | memset(desc, 0, sizeof(*desc)); | ||
| 686 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); | ||
| 687 | switch (ioat_chan->device->version) { | ||
| 688 | case IOAT_VER_1_2: | ||
| 689 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | ||
| 690 | break; | ||
| 691 | case IOAT_VER_2_0: | ||
| 692 | case IOAT_VER_3_0: | ||
| 693 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; | ||
| 694 | break; | ||
| 695 | } | ||
| 696 | |||
| 697 | desc_sw->hw = desc; | ||
| 698 | desc_sw->async_tx.phys = phys; | ||
| 699 | |||
| 700 | return desc_sw; | ||
| 701 | } | ||
| 702 | |||
| 703 | static int ioat_initial_desc_count = 256; | ||
| 704 | module_param(ioat_initial_desc_count, int, 0644); | ||
| 705 | MODULE_PARM_DESC(ioat_initial_desc_count, | ||
| 706 | "initial descriptors per channel (default: 256)"); | ||
| 707 | |||
| 708 | /** | ||
| 709 | * ioat2_dma_massage_chan_desc - link the descriptors into a circle | ||
| 710 | * @ioat_chan: the channel to be massaged | ||
| 711 | */ | ||
| 712 | static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | ||
| 713 | { | ||
| 714 | struct ioat_desc_sw *desc, *_desc; | ||
| 715 | |||
| 716 | /* setup used_desc */ | ||
| 717 | ioat_chan->used_desc.next = ioat_chan->free_desc.next; | ||
| 718 | ioat_chan->used_desc.prev = NULL; | ||
| 719 | |||
| 720 | /* pull free_desc out of the circle so that every node is a hw | ||
| 721 | * descriptor, but leave it pointing to the list | ||
| 722 | */ | ||
| 723 | ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; | ||
| 724 | ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; | ||
| 725 | |||
| 726 | /* circle link the hw descriptors */ | ||
| 727 | desc = to_ioat_desc(ioat_chan->free_desc.next); | ||
| 728 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | ||
| 729 | list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { | ||
| 730 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | ||
| 731 | } | ||
| 732 | } | ||
| 733 | |||
| 734 | /** | ||
| 735 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | ||
| 736 | * @chan: the channel to be filled out | ||
| 737 | */ | ||
| 738 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 739 | { | ||
| 740 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 741 | struct ioat_desc_sw *desc; | ||
| 742 | u16 chanctrl; | ||
| 743 | u32 chanerr; | ||
| 744 | int i; | ||
| 745 | LIST_HEAD(tmp_list); | ||
| 746 | |||
| 747 | /* have we already been set up? */ | ||
| 748 | if (!list_empty(&ioat_chan->free_desc)) | ||
| 749 | return ioat_chan->desccount; | ||
| 750 | |||
| 751 | /* Setup register to interrupt and write completion status on error */ | ||
| 752 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | | ||
| 753 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | | ||
| 754 | IOAT_CHANCTRL_ERR_COMPLETION_EN; | ||
| 755 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 756 | |||
| 757 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 758 | if (chanerr) { | ||
| 759 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 760 | "CHANERR = %x, clearing\n", chanerr); | ||
| 761 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 762 | } | ||
| 763 | |||
| 764 | /* Allocate descriptors */ | ||
| 765 | for (i = 0; i < ioat_initial_desc_count; i++) { | ||
| 766 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); | ||
| 767 | if (!desc) { | ||
| 768 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 769 | "Only %d initial descriptors\n", i); | ||
| 770 | break; | ||
| 771 | } | ||
| 772 | list_add_tail(&desc->node, &tmp_list); | ||
| 773 | } | ||
| 774 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 775 | ioat_chan->desccount = i; | ||
| 776 | list_splice(&tmp_list, &ioat_chan->free_desc); | ||
| 777 | if (ioat_chan->device->version != IOAT_VER_1_2) | ||
| 778 | ioat2_dma_massage_chan_desc(ioat_chan); | ||
| 779 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 780 | |||
| 781 | /* allocate a completion writeback area */ | ||
| 782 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
| 783 | ioat_chan->completion_virt = | ||
| 784 | pci_pool_alloc(ioat_chan->device->completion_pool, | ||
| 785 | GFP_KERNEL, | ||
| 786 | &ioat_chan->completion_addr); | ||
| 787 | memset(ioat_chan->completion_virt, 0, | ||
| 788 | sizeof(*ioat_chan->completion_virt)); | ||
| 789 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, | ||
| 790 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
| 791 | writel(((u64) ioat_chan->completion_addr) >> 32, | ||
| 792 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
| 793 | |||
| 794 | tasklet_enable(&ioat_chan->cleanup_task); | ||
| 795 | ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ | ||
| 796 | return ioat_chan->desccount; | ||
| 797 | } | ||
| 798 | |||
| 799 | /** | ||
| 800 | * ioat_dma_free_chan_resources - release all the descriptors | ||
| 801 | * @chan: the channel to be cleaned | ||
| 802 | */ | ||
| 803 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) | ||
| 804 | { | ||
| 805 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 806 | struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); | ||
| 807 | struct ioat_desc_sw *desc, *_desc; | ||
| 808 | int in_use_descs = 0; | ||
| 809 | |||
| 810 | /* Before freeing channel resources first check | ||
| 811 | * if they have been previously allocated for this channel. | ||
| 812 | */ | ||
| 813 | if (ioat_chan->desccount == 0) | ||
| 814 | return; | ||
| 815 | |||
| 816 | tasklet_disable(&ioat_chan->cleanup_task); | ||
| 817 | ioat_dma_memcpy_cleanup(ioat_chan); | ||
| 818 | |||
| 819 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
| 820 | * before removing DMA descriptor resources. | ||
| 821 | */ | ||
| 822 | writeb(IOAT_CHANCMD_RESET, | ||
| 823 | ioat_chan->reg_base | ||
| 824 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
| 825 | mdelay(100); | ||
| 826 | |||
| 827 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 828 | switch (ioat_chan->device->version) { | ||
| 829 | case IOAT_VER_1_2: | ||
| 830 | list_for_each_entry_safe(desc, _desc, | ||
| 831 | &ioat_chan->used_desc, node) { | ||
| 832 | in_use_descs++; | ||
| 833 | list_del(&desc->node); | ||
| 834 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
| 835 | desc->async_tx.phys); | ||
| 836 | kfree(desc); | ||
| 837 | } | ||
| 838 | list_for_each_entry_safe(desc, _desc, | ||
| 839 | &ioat_chan->free_desc, node) { | ||
| 840 | list_del(&desc->node); | ||
| 841 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
| 842 | desc->async_tx.phys); | ||
| 843 | kfree(desc); | ||
| 844 | } | ||
| 845 | break; | ||
| 846 | case IOAT_VER_2_0: | ||
| 847 | case IOAT_VER_3_0: | ||
| 848 | list_for_each_entry_safe(desc, _desc, | ||
| 849 | ioat_chan->free_desc.next, node) { | ||
| 850 | list_del(&desc->node); | ||
| 851 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
| 852 | desc->async_tx.phys); | ||
| 853 | kfree(desc); | ||
| 854 | } | ||
| 855 | desc = to_ioat_desc(ioat_chan->free_desc.next); | ||
| 856 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
| 857 | desc->async_tx.phys); | ||
| 858 | kfree(desc); | ||
| 859 | INIT_LIST_HEAD(&ioat_chan->free_desc); | ||
| 860 | INIT_LIST_HEAD(&ioat_chan->used_desc); | ||
| 861 | break; | ||
| 862 | } | ||
| 863 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 864 | |||
| 865 | pci_pool_free(ioatdma_device->completion_pool, | ||
| 866 | ioat_chan->completion_virt, | ||
| 867 | ioat_chan->completion_addr); | ||
| 868 | |||
| 869 | /* one is ok since we left it on there on purpose */ | ||
| 870 | if (in_use_descs > 1) | ||
| 871 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 872 | "Freeing %d in use descriptors!\n", | ||
| 873 | in_use_descs - 1); | ||
| 874 | |||
| 875 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | ||
| 876 | ioat_chan->pending = 0; | ||
| 877 | ioat_chan->dmacount = 0; | ||
| 878 | ioat_chan->desccount = 0; | ||
| 879 | ioat_chan->watchdog_completion = 0; | ||
| 880 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
| 881 | ioat_chan->watchdog_tcp_cookie = | ||
| 882 | ioat_chan->watchdog_last_tcp_cookie = 0; | ||
| 883 | } | ||
| 884 | |||
| 885 | /** | ||
| 886 | * ioat_dma_get_next_descriptor - return the next available descriptor | ||
| 887 | * @ioat_chan: IOAT DMA channel handle | ||
| 888 | * | ||
| 889 | * Gets the next descriptor from the chain, and must be called with the | ||
| 890 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
| 891 | * has run out. | ||
| 892 | */ | ||
| 893 | static struct ioat_desc_sw * | ||
| 894 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | ||
| 895 | { | ||
| 896 | struct ioat_desc_sw *new; | ||
| 897 | |||
| 898 | if (!list_empty(&ioat_chan->free_desc)) { | ||
| 899 | new = to_ioat_desc(ioat_chan->free_desc.next); | ||
| 900 | list_del(&new->node); | ||
| 901 | } else { | ||
| 902 | /* try to get another desc */ | ||
| 903 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | ||
| 904 | if (!new) { | ||
| 905 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 906 | "alloc failed\n"); | ||
| 907 | return NULL; | ||
| 908 | } | ||
| 909 | } | ||
| 910 | |||
| 911 | prefetch(new->hw); | ||
| 912 | return new; | ||
| 913 | } | ||
| 914 | |||
| 915 | static struct ioat_desc_sw * | ||
| 916 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | ||
| 917 | { | ||
| 918 | struct ioat_desc_sw *new; | ||
| 919 | |||
| 920 | /* | ||
| 921 | * used.prev points to where to start processing | ||
| 922 | * used.next points to next free descriptor | ||
| 923 | * if used.prev == NULL, there are none waiting to be processed | ||
| 924 | * if used.next == used.prev.prev, there is only one free descriptor, | ||
| 925 | * and we need to use it to as a noop descriptor before | ||
| 926 | * linking in a new set of descriptors, since the device | ||
| 927 | * has probably already read the pointer to it | ||
| 928 | */ | ||
| 929 | if (ioat_chan->used_desc.prev && | ||
| 930 | ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { | ||
| 931 | |||
| 932 | struct ioat_desc_sw *desc; | ||
| 933 | struct ioat_desc_sw *noop_desc; | ||
| 934 | int i; | ||
| 935 | |||
| 936 | /* set up the noop descriptor */ | ||
| 937 | noop_desc = to_ioat_desc(ioat_chan->used_desc.next); | ||
| 938 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
| 939 | noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; | ||
| 940 | noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; | ||
| 941 | noop_desc->hw->src_addr = 0; | ||
| 942 | noop_desc->hw->dst_addr = 0; | ||
| 943 | |||
| 944 | ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; | ||
| 945 | ioat_chan->pending++; | ||
| 946 | ioat_chan->dmacount++; | ||
| 947 | |||
| 948 | /* try to get a few more descriptors */ | ||
| 949 | for (i = 16; i; i--) { | ||
| 950 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | ||
| 951 | if (!desc) { | ||
| 952 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 953 | "alloc failed\n"); | ||
| 954 | break; | ||
| 955 | } | ||
| 956 | list_add_tail(&desc->node, ioat_chan->used_desc.next); | ||
| 957 | |||
| 958 | desc->hw->next | ||
| 959 | = to_ioat_desc(desc->node.next)->async_tx.phys; | ||
| 960 | to_ioat_desc(desc->node.prev)->hw->next | ||
| 961 | = desc->async_tx.phys; | ||
| 962 | ioat_chan->desccount++; | ||
| 963 | } | ||
| 964 | |||
| 965 | ioat_chan->used_desc.next = noop_desc->node.next; | ||
| 966 | } | ||
| 967 | new = to_ioat_desc(ioat_chan->used_desc.next); | ||
| 968 | prefetch(new); | ||
| 969 | ioat_chan->used_desc.next = new->node.next; | ||
| 970 | |||
| 971 | if (ioat_chan->used_desc.prev == NULL) | ||
| 972 | ioat_chan->used_desc.prev = &new->node; | ||
| 973 | |||
| 974 | prefetch(new->hw); | ||
| 975 | return new; | ||
| 976 | } | ||
| 977 | |||
| 978 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | ||
| 979 | struct ioat_dma_chan *ioat_chan) | ||
| 980 | { | ||
| 981 | if (!ioat_chan) | ||
| 982 | return NULL; | ||
| 983 | |||
| 984 | switch (ioat_chan->device->version) { | ||
| 985 | case IOAT_VER_1_2: | ||
| 986 | return ioat1_dma_get_next_descriptor(ioat_chan); | ||
| 987 | case IOAT_VER_2_0: | ||
| 988 | case IOAT_VER_3_0: | ||
| 989 | return ioat2_dma_get_next_descriptor(ioat_chan); | ||
| 990 | } | ||
| 991 | return NULL; | ||
| 992 | } | ||
| 993 | |||
| 994 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | ||
| 995 | struct dma_chan *chan, | ||
| 996 | dma_addr_t dma_dest, | ||
| 997 | dma_addr_t dma_src, | ||
| 998 | size_t len, | ||
| 999 | unsigned long flags) | ||
| 1000 | { | ||
| 1001 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 1002 | struct ioat_desc_sw *new; | ||
| 1003 | |||
| 1004 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 1005 | new = ioat_dma_get_next_descriptor(ioat_chan); | ||
| 1006 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 1007 | |||
| 1008 | if (new) { | ||
| 1009 | new->len = len; | ||
| 1010 | new->dst = dma_dest; | ||
| 1011 | new->src = dma_src; | ||
| 1012 | new->async_tx.flags = flags; | ||
| 1013 | return &new->async_tx; | ||
| 1014 | } else { | ||
| 1015 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 1016 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | ||
| 1017 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
| 1018 | return NULL; | ||
| 1019 | } | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | ||
| 1023 | struct dma_chan *chan, | ||
| 1024 | dma_addr_t dma_dest, | ||
| 1025 | dma_addr_t dma_src, | ||
| 1026 | size_t len, | ||
| 1027 | unsigned long flags) | ||
| 1028 | { | ||
| 1029 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 1030 | struct ioat_desc_sw *new; | ||
| 1031 | |||
| 1032 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 1033 | new = ioat2_dma_get_next_descriptor(ioat_chan); | ||
| 1034 | |||
| 1035 | /* | ||
| 1036 | * leave ioat_chan->desc_lock set in ioat 2 path | ||
| 1037 | * it will get unlocked at end of tx_submit | ||
| 1038 | */ | ||
| 1039 | |||
| 1040 | if (new) { | ||
| 1041 | new->len = len; | ||
| 1042 | new->dst = dma_dest; | ||
| 1043 | new->src = dma_src; | ||
| 1044 | new->async_tx.flags = flags; | ||
| 1045 | return &new->async_tx; | ||
| 1046 | } else { | ||
| 1047 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 1048 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 1049 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | ||
| 1050 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
| 1051 | return NULL; | ||
| 1052 | } | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | static void ioat_dma_cleanup_tasklet(unsigned long data) | ||
| 1056 | { | ||
| 1057 | struct ioat_dma_chan *chan = (void *)data; | ||
| 1058 | ioat_dma_memcpy_cleanup(chan); | ||
| 1059 | writew(IOAT_CHANCTRL_INT_DISABLE, | ||
| 1060 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | static void | ||
| 1064 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | ||
| 1065 | { | ||
| 1066 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 1067 | if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 1068 | pci_unmap_single(ioat_chan->device->pdev, | ||
| 1069 | pci_unmap_addr(desc, dst), | ||
| 1070 | pci_unmap_len(desc, len), | ||
| 1071 | PCI_DMA_FROMDEVICE); | ||
| 1072 | else | ||
| 1073 | pci_unmap_page(ioat_chan->device->pdev, | ||
| 1074 | pci_unmap_addr(desc, dst), | ||
| 1075 | pci_unmap_len(desc, len), | ||
| 1076 | PCI_DMA_FROMDEVICE); | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 1080 | if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 1081 | pci_unmap_single(ioat_chan->device->pdev, | ||
| 1082 | pci_unmap_addr(desc, src), | ||
| 1083 | pci_unmap_len(desc, len), | ||
| 1084 | PCI_DMA_TODEVICE); | ||
| 1085 | else | ||
| 1086 | pci_unmap_page(ioat_chan->device->pdev, | ||
| 1087 | pci_unmap_addr(desc, src), | ||
| 1088 | pci_unmap_len(desc, len), | ||
| 1089 | PCI_DMA_TODEVICE); | ||
| 1090 | } | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | /** | ||
| 1094 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors | ||
| 1095 | * @chan: ioat channel to be cleaned up | ||
| 1096 | */ | ||
| 1097 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | ||
| 1098 | { | ||
| 1099 | unsigned long phys_complete; | ||
| 1100 | struct ioat_desc_sw *desc, *_desc; | ||
| 1101 | dma_cookie_t cookie = 0; | ||
| 1102 | unsigned long desc_phys; | ||
| 1103 | struct ioat_desc_sw *latest_desc; | ||
| 1104 | |||
| 1105 | prefetch(ioat_chan->completion_virt); | ||
| 1106 | |||
| 1107 | if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) | ||
| 1108 | return; | ||
| 1109 | |||
| 1110 | /* The completion writeback can happen at any time, | ||
| 1111 | so reads by the driver need to be atomic operations | ||
| 1112 | The descriptor physical addresses are limited to 32-bits | ||
| 1113 | when the CPU can only do a 32-bit mov */ | ||
| 1114 | |||
| 1115 | #if (BITS_PER_LONG == 64) | ||
| 1116 | phys_complete = | ||
| 1117 | ioat_chan->completion_virt->full | ||
| 1118 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
| 1119 | #else | ||
| 1120 | phys_complete = | ||
| 1121 | ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; | ||
| 1122 | #endif | ||
| 1123 | |||
| 1124 | if ((ioat_chan->completion_virt->full | ||
| 1125 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | ||
| 1126 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | ||
| 1127 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 1128 | "Channel halted, chanerr = %x\n", | ||
| 1129 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); | ||
| 1130 | |||
| 1131 | /* TODO do something to salvage the situation */ | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | if (phys_complete == ioat_chan->last_completion) { | ||
| 1135 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
| 1136 | /* | ||
| 1137 | * perhaps we're stuck so hard that the watchdog can't go off? | ||
| 1138 | * try to catch it after 2 seconds | ||
| 1139 | */ | ||
| 1140 | if (ioat_chan->device->version != IOAT_VER_3_0) { | ||
| 1141 | if (time_after(jiffies, | ||
| 1142 | ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { | ||
| 1143 | ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); | ||
| 1144 | ioat_chan->last_completion_time = jiffies; | ||
| 1145 | } | ||
| 1146 | } | ||
| 1147 | return; | ||
| 1148 | } | ||
| 1149 | ioat_chan->last_completion_time = jiffies; | ||
| 1150 | |||
| 1151 | cookie = 0; | ||
| 1152 | if (!spin_trylock_bh(&ioat_chan->desc_lock)) { | ||
| 1153 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
| 1154 | return; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | switch (ioat_chan->device->version) { | ||
| 1158 | case IOAT_VER_1_2: | ||
| 1159 | list_for_each_entry_safe(desc, _desc, | ||
| 1160 | &ioat_chan->used_desc, node) { | ||
| 1161 | |||
| 1162 | /* | ||
| 1163 | * Incoming DMA requests may use multiple descriptors, | ||
| 1164 | * due to exceeding xfercap, perhaps. If so, only the | ||
| 1165 | * last one will have a cookie, and require unmapping. | ||
| 1166 | */ | ||
| 1167 | if (desc->async_tx.cookie) { | ||
| 1168 | cookie = desc->async_tx.cookie; | ||
| 1169 | ioat_dma_unmap(ioat_chan, desc); | ||
| 1170 | if (desc->async_tx.callback) { | ||
| 1171 | desc->async_tx.callback(desc->async_tx.callback_param); | ||
| 1172 | desc->async_tx.callback = NULL; | ||
| 1173 | } | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | if (desc->async_tx.phys != phys_complete) { | ||
| 1177 | /* | ||
| 1178 | * a completed entry, but not the last, so clean | ||
| 1179 | * up if the client is done with the descriptor | ||
| 1180 | */ | ||
| 1181 | if (async_tx_test_ack(&desc->async_tx)) { | ||
| 1182 | list_move_tail(&desc->node, | ||
| 1183 | &ioat_chan->free_desc); | ||
| 1184 | } else | ||
| 1185 | desc->async_tx.cookie = 0; | ||
| 1186 | } else { | ||
| 1187 | /* | ||
| 1188 | * last used desc. Do not remove, so we can | ||
| 1189 | * append from it, but don't look at it next | ||
| 1190 | * time, either | ||
| 1191 | */ | ||
| 1192 | desc->async_tx.cookie = 0; | ||
| 1193 | |||
| 1194 | /* TODO check status bits? */ | ||
| 1195 | break; | ||
| 1196 | } | ||
| 1197 | } | ||
| 1198 | break; | ||
| 1199 | case IOAT_VER_2_0: | ||
| 1200 | case IOAT_VER_3_0: | ||
| 1201 | /* has some other thread has already cleaned up? */ | ||
| 1202 | if (ioat_chan->used_desc.prev == NULL) | ||
| 1203 | break; | ||
| 1204 | |||
| 1205 | /* work backwards to find latest finished desc */ | ||
| 1206 | desc = to_ioat_desc(ioat_chan->used_desc.next); | ||
| 1207 | latest_desc = NULL; | ||
| 1208 | do { | ||
| 1209 | desc = to_ioat_desc(desc->node.prev); | ||
| 1210 | desc_phys = (unsigned long)desc->async_tx.phys | ||
| 1211 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
| 1212 | if (desc_phys == phys_complete) { | ||
| 1213 | latest_desc = desc; | ||
| 1214 | break; | ||
| 1215 | } | ||
| 1216 | } while (&desc->node != ioat_chan->used_desc.prev); | ||
| 1217 | |||
| 1218 | if (latest_desc != NULL) { | ||
| 1219 | |||
| 1220 | /* work forwards to clear finished descriptors */ | ||
| 1221 | for (desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
| 1222 | &desc->node != latest_desc->node.next && | ||
| 1223 | &desc->node != ioat_chan->used_desc.next; | ||
| 1224 | desc = to_ioat_desc(desc->node.next)) { | ||
| 1225 | if (desc->async_tx.cookie) { | ||
| 1226 | cookie = desc->async_tx.cookie; | ||
| 1227 | desc->async_tx.cookie = 0; | ||
| 1228 | ioat_dma_unmap(ioat_chan, desc); | ||
| 1229 | if (desc->async_tx.callback) { | ||
| 1230 | desc->async_tx.callback(desc->async_tx.callback_param); | ||
| 1231 | desc->async_tx.callback = NULL; | ||
| 1232 | } | ||
| 1233 | } | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | /* move used.prev up beyond those that are finished */ | ||
| 1237 | if (&desc->node == ioat_chan->used_desc.next) | ||
| 1238 | ioat_chan->used_desc.prev = NULL; | ||
| 1239 | else | ||
| 1240 | ioat_chan->used_desc.prev = &desc->node; | ||
| 1241 | } | ||
| 1242 | break; | ||
| 1243 | } | ||
| 1244 | |||
| 1245 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 1246 | |||
| 1247 | ioat_chan->last_completion = phys_complete; | ||
| 1248 | if (cookie != 0) | ||
| 1249 | ioat_chan->completed_cookie = cookie; | ||
| 1250 | |||
| 1251 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | /** | ||
| 1255 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | ||
| 1256 | * @chan: IOAT DMA channel handle | ||
| 1257 | * @cookie: DMA transaction identifier | ||
| 1258 | * @done: if not %NULL, updated with last completed transaction | ||
| 1259 | * @used: if not %NULL, updated with last used transaction | ||
| 1260 | */ | ||
| 1261 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | ||
| 1262 | dma_cookie_t cookie, | ||
| 1263 | dma_cookie_t *done, | ||
| 1264 | dma_cookie_t *used) | ||
| 1265 | { | ||
| 1266 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
| 1267 | dma_cookie_t last_used; | ||
| 1268 | dma_cookie_t last_complete; | ||
| 1269 | enum dma_status ret; | ||
| 1270 | |||
| 1271 | last_used = chan->cookie; | ||
| 1272 | last_complete = ioat_chan->completed_cookie; | ||
| 1273 | ioat_chan->watchdog_tcp_cookie = cookie; | ||
| 1274 | |||
| 1275 | if (done) | ||
| 1276 | *done = last_complete; | ||
| 1277 | if (used) | ||
| 1278 | *used = last_used; | ||
| 1279 | |||
| 1280 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
| 1281 | if (ret == DMA_SUCCESS) | ||
| 1282 | return ret; | ||
| 1283 | |||
| 1284 | ioat_dma_memcpy_cleanup(ioat_chan); | ||
| 1285 | |||
| 1286 | last_used = chan->cookie; | ||
| 1287 | last_complete = ioat_chan->completed_cookie; | ||
| 1288 | |||
| 1289 | if (done) | ||
| 1290 | *done = last_complete; | ||
| 1291 | if (used) | ||
| 1292 | *used = last_used; | ||
| 1293 | |||
| 1294 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
| 1295 | } | ||
| 1296 | |||
| 1297 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | ||
| 1298 | { | ||
| 1299 | struct ioat_desc_sw *desc; | ||
| 1300 | |||
| 1301 | spin_lock_bh(&ioat_chan->desc_lock); | ||
| 1302 | |||
| 1303 | desc = ioat_dma_get_next_descriptor(ioat_chan); | ||
| 1304 | |||
| 1305 | if (!desc) { | ||
| 1306 | dev_err(&ioat_chan->device->pdev->dev, | ||
| 1307 | "Unable to start null desc - get next desc failed\n"); | ||
| 1308 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 1309 | return; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL | ||
| 1313 | | IOAT_DMA_DESCRIPTOR_CTL_INT_GN | ||
| 1314 | | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
| 1315 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
| 1316 | desc->hw->size = NULL_DESC_BUFFER_SIZE; | ||
| 1317 | desc->hw->src_addr = 0; | ||
| 1318 | desc->hw->dst_addr = 0; | ||
| 1319 | async_tx_ack(&desc->async_tx); | ||
| 1320 | switch (ioat_chan->device->version) { | ||
| 1321 | case IOAT_VER_1_2: | ||
| 1322 | desc->hw->next = 0; | ||
| 1323 | list_add_tail(&desc->node, &ioat_chan->used_desc); | ||
| 1324 | |||
| 1325 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
| 1326 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
| 1327 | writel(((u64) desc->async_tx.phys) >> 32, | ||
| 1328 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
| 1329 | |||
| 1330 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | ||
| 1331 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
| 1332 | break; | ||
| 1333 | case IOAT_VER_2_0: | ||
| 1334 | case IOAT_VER_3_0: | ||
| 1335 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
| 1336 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
| 1337 | writel(((u64) desc->async_tx.phys) >> 32, | ||
| 1338 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
| 1339 | |||
| 1340 | ioat_chan->dmacount++; | ||
| 1341 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | ||
| 1342 | break; | ||
| 1343 | } | ||
| 1344 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | /* | ||
| 1348 | * Perform a IOAT transaction to verify the HW works. | ||
| 1349 | */ | ||
| 1350 | #define IOAT_TEST_SIZE 2000 | ||
| 1351 | |||
| 1352 | static void ioat_dma_test_callback(void *dma_async_param) | ||
| 1353 | { | ||
| 1354 | struct completion *cmp = dma_async_param; | ||
| 1355 | |||
| 1356 | complete(cmp); | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | /** | ||
| 1360 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
| 1361 | * @device: device to be tested | ||
| 1362 | */ | ||
| 1363 | static int ioat_dma_self_test(struct ioatdma_device *device) | ||
| 1364 | { | ||
| 1365 | int i; | ||
| 1366 | u8 *src; | ||
| 1367 | u8 *dest; | ||
| 1368 | struct dma_chan *dma_chan; | ||
| 1369 | struct dma_async_tx_descriptor *tx; | ||
| 1370 | dma_addr_t dma_dest, dma_src; | ||
| 1371 | dma_cookie_t cookie; | ||
| 1372 | int err = 0; | ||
| 1373 | struct completion cmp; | ||
| 1374 | unsigned long tmo; | ||
| 1375 | unsigned long flags; | ||
| 1376 | |||
| 1377 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
| 1378 | if (!src) | ||
| 1379 | return -ENOMEM; | ||
| 1380 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | ||
| 1381 | if (!dest) { | ||
| 1382 | kfree(src); | ||
| 1383 | return -ENOMEM; | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | /* Fill in src buffer */ | ||
| 1387 | for (i = 0; i < IOAT_TEST_SIZE; i++) | ||
| 1388 | src[i] = (u8)i; | ||
| 1389 | |||
| 1390 | /* Start copy, using first DMA channel */ | ||
| 1391 | dma_chan = container_of(device->common.channels.next, | ||
| 1392 | struct dma_chan, | ||
| 1393 | device_node); | ||
| 1394 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { | ||
| 1395 | dev_err(&device->pdev->dev, | ||
| 1396 | "selftest cannot allocate chan resource\n"); | ||
| 1397 | err = -ENODEV; | ||
| 1398 | goto out; | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, | ||
| 1402 | DMA_TO_DEVICE); | ||
| 1403 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
| 1404 | DMA_FROM_DEVICE); | ||
| 1405 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; | ||
| 1406 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
| 1407 | IOAT_TEST_SIZE, flags); | ||
| 1408 | if (!tx) { | ||
| 1409 | dev_err(&device->pdev->dev, | ||
| 1410 | "Self-test prep failed, disabling\n"); | ||
| 1411 | err = -ENODEV; | ||
| 1412 | goto free_resources; | ||
| 1413 | } | ||
| 1414 | |||
| 1415 | async_tx_ack(tx); | ||
| 1416 | init_completion(&cmp); | ||
| 1417 | tx->callback = ioat_dma_test_callback; | ||
| 1418 | tx->callback_param = &cmp; | ||
| 1419 | cookie = tx->tx_submit(tx); | ||
| 1420 | if (cookie < 0) { | ||
| 1421 | dev_err(&device->pdev->dev, | ||
| 1422 | "Self-test setup failed, disabling\n"); | ||
| 1423 | err = -ENODEV; | ||
| 1424 | goto free_resources; | ||
| 1425 | } | ||
| 1426 | device->common.device_issue_pending(dma_chan); | ||
| 1427 | |||
| 1428 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | ||
| 1429 | |||
| 1430 | if (tmo == 0 || | ||
| 1431 | device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | ||
| 1432 | != DMA_SUCCESS) { | ||
| 1433 | dev_err(&device->pdev->dev, | ||
| 1434 | "Self-test copy timed out, disabling\n"); | ||
| 1435 | err = -ENODEV; | ||
| 1436 | goto free_resources; | ||
| 1437 | } | ||
| 1438 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | ||
| 1439 | dev_err(&device->pdev->dev, | ||
| 1440 | "Self-test copy failed compare, disabling\n"); | ||
| 1441 | err = -ENODEV; | ||
| 1442 | goto free_resources; | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | free_resources: | ||
| 1446 | device->common.device_free_chan_resources(dma_chan); | ||
| 1447 | out: | ||
| 1448 | kfree(src); | ||
| 1449 | kfree(dest); | ||
| 1450 | return err; | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | static char ioat_interrupt_style[32] = "msix"; | ||
| 1454 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
| 1455 | sizeof(ioat_interrupt_style), 0644); | ||
| 1456 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
| 1457 | "set ioat interrupt style: msix (default), " | ||
| 1458 | "msix-single-vector, msi, intx)"); | ||
| 1459 | |||
| 1460 | /** | ||
| 1461 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
| 1462 | * @device: ioat device | ||
| 1463 | */ | ||
| 1464 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | ||
| 1465 | { | ||
| 1466 | struct ioat_dma_chan *ioat_chan; | ||
| 1467 | int err, i, j, msixcnt; | ||
| 1468 | u8 intrctrl = 0; | ||
| 1469 | |||
| 1470 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
| 1471 | goto msix; | ||
| 1472 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
| 1473 | goto msix_single_vector; | ||
| 1474 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
| 1475 | goto msi; | ||
| 1476 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
| 1477 | goto intx; | ||
| 1478 | dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", | ||
| 1479 | ioat_interrupt_style); | ||
| 1480 | goto err_no_irq; | ||
| 1481 | |||
| 1482 | msix: | ||
| 1483 | /* The number of MSI-X vectors should equal the number of channels */ | ||
| 1484 | msixcnt = device->common.chancnt; | ||
| 1485 | for (i = 0; i < msixcnt; i++) | ||
| 1486 | device->msix_entries[i].entry = i; | ||
| 1487 | |||
| 1488 | err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); | ||
| 1489 | if (err < 0) | ||
| 1490 | goto msi; | ||
| 1491 | if (err > 0) | ||
| 1492 | goto msix_single_vector; | ||
| 1493 | |||
| 1494 | for (i = 0; i < msixcnt; i++) { | ||
| 1495 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
| 1496 | err = request_irq(device->msix_entries[i].vector, | ||
| 1497 | ioat_dma_do_interrupt_msix, | ||
| 1498 | 0, "ioat-msix", ioat_chan); | ||
| 1499 | if (err) { | ||
| 1500 | for (j = 0; j < i; j++) { | ||
| 1501 | ioat_chan = | ||
| 1502 | ioat_lookup_chan_by_index(device, j); | ||
| 1503 | free_irq(device->msix_entries[j].vector, | ||
| 1504 | ioat_chan); | ||
| 1505 | } | ||
| 1506 | goto msix_single_vector; | ||
| 1507 | } | ||
| 1508 | } | ||
| 1509 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
| 1510 | device->irq_mode = msix_multi_vector; | ||
| 1511 | goto done; | ||
| 1512 | |||
| 1513 | msix_single_vector: | ||
| 1514 | device->msix_entries[0].entry = 0; | ||
| 1515 | err = pci_enable_msix(device->pdev, device->msix_entries, 1); | ||
| 1516 | if (err) | ||
| 1517 | goto msi; | ||
| 1518 | |||
| 1519 | err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, | ||
| 1520 | 0, "ioat-msix", device); | ||
| 1521 | if (err) { | ||
| 1522 | pci_disable_msix(device->pdev); | ||
| 1523 | goto msi; | ||
| 1524 | } | ||
| 1525 | device->irq_mode = msix_single_vector; | ||
| 1526 | goto done; | ||
| 1527 | |||
| 1528 | msi: | ||
| 1529 | err = pci_enable_msi(device->pdev); | ||
| 1530 | if (err) | ||
| 1531 | goto intx; | ||
| 1532 | |||
| 1533 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | ||
| 1534 | 0, "ioat-msi", device); | ||
| 1535 | if (err) { | ||
| 1536 | pci_disable_msi(device->pdev); | ||
| 1537 | goto intx; | ||
| 1538 | } | ||
| 1539 | /* | ||
| 1540 | * CB 1.2 devices need a bit set in configuration space to enable MSI | ||
| 1541 | */ | ||
| 1542 | if (device->version == IOAT_VER_1_2) { | ||
| 1543 | u32 dmactrl; | ||
| 1544 | pci_read_config_dword(device->pdev, | ||
| 1545 | IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
| 1546 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
| 1547 | pci_write_config_dword(device->pdev, | ||
| 1548 | IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
| 1549 | } | ||
| 1550 | device->irq_mode = msi; | ||
| 1551 | goto done; | ||
| 1552 | |||
| 1553 | intx: | ||
| 1554 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | ||
| 1555 | IRQF_SHARED, "ioat-intx", device); | ||
| 1556 | if (err) | ||
| 1557 | goto err_no_irq; | ||
| 1558 | device->irq_mode = intx; | ||
| 1559 | |||
| 1560 | done: | ||
| 1561 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
| 1562 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 1563 | return 0; | ||
| 1564 | |||
| 1565 | err_no_irq: | ||
| 1566 | /* Disable all interrupt generation */ | ||
| 1567 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 1568 | dev_err(&device->pdev->dev, "no usable interrupts\n"); | ||
| 1569 | device->irq_mode = none; | ||
| 1570 | return -1; | ||
| 1571 | } | ||
| 1572 | |||
| 1573 | /** | ||
| 1574 | * ioat_dma_remove_interrupts - remove whatever interrupts were set | ||
| 1575 | * @device: ioat device | ||
| 1576 | */ | ||
| 1577 | static void ioat_dma_remove_interrupts(struct ioatdma_device *device) | ||
| 1578 | { | ||
| 1579 | struct ioat_dma_chan *ioat_chan; | ||
| 1580 | int i; | ||
| 1581 | |||
| 1582 | /* Disable all interrupt generation */ | ||
| 1583 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
| 1584 | |||
| 1585 | switch (device->irq_mode) { | ||
| 1586 | case msix_multi_vector: | ||
| 1587 | for (i = 0; i < device->common.chancnt; i++) { | ||
| 1588 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
| 1589 | free_irq(device->msix_entries[i].vector, ioat_chan); | ||
| 1590 | } | ||
| 1591 | pci_disable_msix(device->pdev); | ||
| 1592 | break; | ||
| 1593 | case msix_single_vector: | ||
| 1594 | free_irq(device->msix_entries[0].vector, device); | ||
| 1595 | pci_disable_msix(device->pdev); | ||
| 1596 | break; | ||
| 1597 | case msi: | ||
| 1598 | free_irq(device->pdev->irq, device); | ||
| 1599 | pci_disable_msi(device->pdev); | ||
| 1600 | break; | ||
| 1601 | case intx: | ||
| 1602 | free_irq(device->pdev->irq, device); | ||
| 1603 | break; | ||
| 1604 | case none: | ||
| 1605 | dev_warn(&device->pdev->dev, | ||
| 1606 | "call to %s without interrupts setup\n", __func__); | ||
| 1607 | } | ||
| 1608 | device->irq_mode = none; | ||
| 1609 | } | ||
| 1610 | |||
| 1611 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | ||
| 1612 | void __iomem *iobase) | ||
| 1613 | { | ||
| 1614 | int err; | ||
| 1615 | struct ioatdma_device *device; | ||
| 1616 | |||
| 1617 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
| 1618 | if (!device) { | ||
| 1619 | err = -ENOMEM; | ||
| 1620 | goto err_kzalloc; | ||
| 1621 | } | ||
| 1622 | device->pdev = pdev; | ||
| 1623 | device->reg_base = iobase; | ||
| 1624 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
| 1625 | |||
| 1626 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
| 1627 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
| 1628 | sizeof(struct ioat_dma_descriptor), | ||
| 1629 | 64, 0); | ||
| 1630 | if (!device->dma_pool) { | ||
| 1631 | err = -ENOMEM; | ||
| 1632 | goto err_dma_pool; | ||
| 1633 | } | ||
| 1634 | |||
| 1635 | device->completion_pool = pci_pool_create("completion_pool", pdev, | ||
| 1636 | sizeof(u64), SMP_CACHE_BYTES, | ||
| 1637 | SMP_CACHE_BYTES); | ||
| 1638 | if (!device->completion_pool) { | ||
| 1639 | err = -ENOMEM; | ||
| 1640 | goto err_completion_pool; | ||
| 1641 | } | ||
| 1642 | |||
| 1643 | INIT_LIST_HEAD(&device->common.channels); | ||
| 1644 | ioat_dma_enumerate_channels(device); | ||
| 1645 | |||
| 1646 | device->common.device_alloc_chan_resources = | ||
| 1647 | ioat_dma_alloc_chan_resources; | ||
| 1648 | device->common.device_free_chan_resources = | ||
| 1649 | ioat_dma_free_chan_resources; | ||
| 1650 | device->common.dev = &pdev->dev; | ||
| 1651 | |||
| 1652 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | ||
| 1653 | device->common.device_is_tx_complete = ioat_dma_is_complete; | ||
| 1654 | switch (device->version) { | ||
| 1655 | case IOAT_VER_1_2: | ||
| 1656 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
| 1657 | device->common.device_issue_pending = | ||
| 1658 | ioat1_dma_memcpy_issue_pending; | ||
| 1659 | break; | ||
| 1660 | case IOAT_VER_2_0: | ||
| 1661 | case IOAT_VER_3_0: | ||
| 1662 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
| 1663 | device->common.device_issue_pending = | ||
| 1664 | ioat2_dma_memcpy_issue_pending; | ||
| 1665 | break; | ||
| 1666 | } | ||
| 1667 | |||
| 1668 | dev_err(&device->pdev->dev, | ||
| 1669 | "Intel(R) I/OAT DMA Engine found," | ||
| 1670 | " %d channels, device version 0x%02x, driver version %s\n", | ||
| 1671 | device->common.chancnt, device->version, IOAT_DMA_VERSION); | ||
| 1672 | |||
| 1673 | if (!device->common.chancnt) { | ||
| 1674 | dev_err(&device->pdev->dev, | ||
| 1675 | "Intel(R) I/OAT DMA Engine problem found: " | ||
| 1676 | "zero channels detected\n"); | ||
| 1677 | goto err_setup_interrupts; | ||
| 1678 | } | ||
| 1679 | |||
| 1680 | err = ioat_dma_setup_interrupts(device); | ||
| 1681 | if (err) | ||
| 1682 | goto err_setup_interrupts; | ||
| 1683 | |||
| 1684 | err = ioat_dma_self_test(device); | ||
| 1685 | if (err) | ||
| 1686 | goto err_self_test; | ||
| 1687 | |||
| 1688 | ioat_set_tcp_copy_break(device); | ||
| 1689 | |||
| 1690 | dma_async_device_register(&device->common); | ||
| 1691 | |||
| 1692 | if (device->version != IOAT_VER_3_0) { | ||
| 1693 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
| 1694 | schedule_delayed_work(&device->work, | ||
| 1695 | WATCHDOG_DELAY); | ||
| 1696 | } | ||
| 1697 | |||
| 1698 | return device; | ||
| 1699 | |||
| 1700 | err_self_test: | ||
| 1701 | ioat_dma_remove_interrupts(device); | ||
| 1702 | err_setup_interrupts: | ||
| 1703 | pci_pool_destroy(device->completion_pool); | ||
| 1704 | err_completion_pool: | ||
| 1705 | pci_pool_destroy(device->dma_pool); | ||
| 1706 | err_dma_pool: | ||
| 1707 | kfree(device); | ||
| 1708 | err_kzalloc: | ||
| 1709 | dev_err(&pdev->dev, | ||
| 1710 | "Intel(R) I/OAT DMA Engine initialization failed\n"); | ||
| 1711 | return NULL; | ||
| 1712 | } | ||
| 1713 | |||
| 1714 | void ioat_dma_remove(struct ioatdma_device *device) | ||
| 1715 | { | ||
| 1716 | struct dma_chan *chan, *_chan; | ||
| 1717 | struct ioat_dma_chan *ioat_chan; | ||
| 1718 | |||
| 1719 | if (device->version != IOAT_VER_3_0) | ||
| 1720 | cancel_delayed_work(&device->work); | ||
| 1721 | |||
| 1722 | ioat_dma_remove_interrupts(device); | ||
| 1723 | |||
| 1724 | dma_async_device_unregister(&device->common); | ||
| 1725 | |||
| 1726 | pci_pool_destroy(device->dma_pool); | ||
| 1727 | pci_pool_destroy(device->completion_pool); | ||
| 1728 | |||
| 1729 | iounmap(device->reg_base); | ||
| 1730 | pci_release_regions(device->pdev); | ||
| 1731 | pci_disable_device(device->pdev); | ||
| 1732 | |||
| 1733 | list_for_each_entry_safe(chan, _chan, | ||
| 1734 | &device->common.channels, device_node) { | ||
| 1735 | ioat_chan = to_ioat_chan(chan); | ||
| 1736 | list_del(&chan->device_node); | ||
| 1737 | kfree(ioat_chan); | ||
| 1738 | } | ||
| 1739 | kfree(device); | ||
| 1740 | } | ||
| 1741 | |||
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h deleted file mode 100644 index a52ff4bd460..00000000000 --- a/drivers/dma/ioatdma.h +++ /dev/null | |||
| @@ -1,165 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License as published by the Free | ||
| 6 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 7 | * any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | ||
| 19 | * file called COPYING. | ||
| 20 | */ | ||
| 21 | #ifndef IOATDMA_H | ||
| 22 | #define IOATDMA_H | ||
| 23 | |||
| 24 | #include <linux/dmaengine.h> | ||
| 25 | #include "ioatdma_hw.h" | ||
| 26 | #include <linux/init.h> | ||
| 27 | #include <linux/dmapool.h> | ||
| 28 | #include <linux/cache.h> | ||
| 29 | #include <linux/pci_ids.h> | ||
| 30 | #include <net/tcp.h> | ||
| 31 | |||
| 32 | #define IOAT_DMA_VERSION "3.64" | ||
| 33 | |||
| 34 | enum ioat_interrupt { | ||
| 35 | none = 0, | ||
| 36 | msix_multi_vector = 1, | ||
| 37 | msix_single_vector = 2, | ||
| 38 | msi = 3, | ||
| 39 | intx = 4, | ||
| 40 | }; | ||
| 41 | |||
| 42 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | ||
| 43 | #define IOAT_DMA_DCA_ANY_CPU ~0 | ||
| 44 | #define IOAT_WATCHDOG_PERIOD (2 * HZ) | ||
| 45 | |||
| 46 | |||
| 47 | /** | ||
| 48 | * struct ioatdma_device - internal representation of a IOAT device | ||
| 49 | * @pdev: PCI-Express device | ||
| 50 | * @reg_base: MMIO register space base address | ||
| 51 | * @dma_pool: for allocating DMA descriptors | ||
| 52 | * @common: embedded struct dma_device | ||
| 53 | * @version: version of ioatdma device | ||
| 54 | * @irq_mode: which style irq to use | ||
| 55 | * @msix_entries: irq handlers | ||
| 56 | * @idx: per channel data | ||
| 57 | */ | ||
| 58 | |||
| 59 | struct ioatdma_device { | ||
| 60 | struct pci_dev *pdev; | ||
| 61 | void __iomem *reg_base; | ||
| 62 | struct pci_pool *dma_pool; | ||
| 63 | struct pci_pool *completion_pool; | ||
| 64 | struct dma_device common; | ||
| 65 | u8 version; | ||
| 66 | enum ioat_interrupt irq_mode; | ||
| 67 | struct delayed_work work; | ||
| 68 | struct msix_entry msix_entries[4]; | ||
| 69 | struct ioat_dma_chan *idx[4]; | ||
| 70 | }; | ||
| 71 | |||
| 72 | /** | ||
| 73 | * struct ioat_dma_chan - internal representation of a DMA channel | ||
| 74 | */ | ||
| 75 | struct ioat_dma_chan { | ||
| 76 | |||
| 77 | void __iomem *reg_base; | ||
| 78 | |||
| 79 | dma_cookie_t completed_cookie; | ||
| 80 | unsigned long last_completion; | ||
| 81 | unsigned long last_completion_time; | ||
| 82 | |||
| 83 | size_t xfercap; /* XFERCAP register value expanded out */ | ||
| 84 | |||
| 85 | spinlock_t cleanup_lock; | ||
| 86 | spinlock_t desc_lock; | ||
| 87 | struct list_head free_desc; | ||
| 88 | struct list_head used_desc; | ||
| 89 | unsigned long watchdog_completion; | ||
| 90 | int watchdog_tcp_cookie; | ||
| 91 | u32 watchdog_last_tcp_cookie; | ||
| 92 | struct delayed_work work; | ||
| 93 | |||
| 94 | int pending; | ||
| 95 | int dmacount; | ||
| 96 | int desccount; | ||
| 97 | |||
| 98 | struct ioatdma_device *device; | ||
| 99 | struct dma_chan common; | ||
| 100 | |||
| 101 | dma_addr_t completion_addr; | ||
| 102 | union { | ||
| 103 | u64 full; /* HW completion writeback */ | ||
| 104 | struct { | ||
| 105 | u32 low; | ||
| 106 | u32 high; | ||
| 107 | }; | ||
| 108 | } *completion_virt; | ||
| 109 | unsigned long last_compl_desc_addr_hw; | ||
| 110 | struct tasklet_struct cleanup_task; | ||
| 111 | }; | ||
| 112 | |||
| 113 | /* wrapper around hardware descriptor format + additional software fields */ | ||
| 114 | |||
| 115 | /** | ||
| 116 | * struct ioat_desc_sw - wrapper around hardware descriptor | ||
| 117 | * @hw: hardware DMA descriptor | ||
| 118 | * @node: this descriptor will either be on the free list, | ||
| 119 | * or attached to a transaction list (async_tx.tx_list) | ||
| 120 | * @tx_cnt: number of descriptors required to complete the transaction | ||
| 121 | * @async_tx: the generic software descriptor for all engines | ||
| 122 | */ | ||
| 123 | struct ioat_desc_sw { | ||
| 124 | struct ioat_dma_descriptor *hw; | ||
| 125 | struct list_head node; | ||
| 126 | int tx_cnt; | ||
| 127 | size_t len; | ||
| 128 | dma_addr_t src; | ||
| 129 | dma_addr_t dst; | ||
| 130 | struct dma_async_tx_descriptor async_tx; | ||
| 131 | }; | ||
| 132 | |||
| 133 | static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) | ||
| 134 | { | ||
| 135 | #ifdef CONFIG_NET_DMA | ||
| 136 | switch (dev->version) { | ||
| 137 | case IOAT_VER_1_2: | ||
| 138 | sysctl_tcp_dma_copybreak = 4096; | ||
| 139 | break; | ||
| 140 | case IOAT_VER_2_0: | ||
| 141 | sysctl_tcp_dma_copybreak = 2048; | ||
| 142 | break; | ||
| 143 | case IOAT_VER_3_0: | ||
| 144 | sysctl_tcp_dma_copybreak = 262144; | ||
| 145 | break; | ||
| 146 | } | ||
| 147 | #endif | ||
| 148 | } | ||
| 149 | |||
| 150 | #if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) | ||
| 151 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | ||
| 152 | void __iomem *iobase); | ||
| 153 | void ioat_dma_remove(struct ioatdma_device *device); | ||
| 154 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
| 155 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
| 156 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
| 157 | #else | ||
| 158 | #define ioat_dma_probe(pdev, iobase) NULL | ||
| 159 | #define ioat_dma_remove(device) do { } while (0) | ||
| 160 | #define ioat_dca_init(pdev, iobase) NULL | ||
| 161 | #define ioat2_dca_init(pdev, iobase) NULL | ||
| 162 | #define ioat3_dca_init(pdev, iobase) NULL | ||
| 163 | #endif | ||
| 164 | |||
| 165 | #endif /* IOATDMA_H */ | ||
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index 9f6fe46a9b8..c0a272c7368 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c | |||
| @@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | |||
| 183 | iov_byte_offset, | 183 | iov_byte_offset, |
| 184 | kdata, | 184 | kdata, |
| 185 | copy); | 185 | copy); |
| 186 | /* poll for a descriptor slot */ | ||
| 187 | if (unlikely(dma_cookie < 0)) { | ||
| 188 | dma_async_issue_pending(chan); | ||
| 189 | continue; | ||
| 190 | } | ||
| 186 | 191 | ||
| 187 | len -= copy; | 192 | len -= copy; |
| 188 | iov[iovec_idx].iov_len -= copy; | 193 | iov[iovec_idx].iov_len -= copy; |
| @@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | |||
| 248 | page, | 253 | page, |
| 249 | offset, | 254 | offset, |
| 250 | copy); | 255 | copy); |
| 256 | /* poll for a descriptor slot */ | ||
| 257 | if (unlikely(dma_cookie < 0)) { | ||
| 258 | dma_async_issue_pending(chan); | ||
| 259 | continue; | ||
| 260 | } | ||
| 251 | 261 | ||
| 252 | len -= copy; | 262 | len -= copy; |
| 253 | iov[iovec_idx].iov_len -= copy; | 263 | iov[iovec_idx].iov_len -= copy; |
