diff options
Diffstat (limited to 'drivers/fpga/zynq-fpga.c')
-rw-r--r-- | drivers/fpga/zynq-fpga.c | 277 |
1 files changed, 203 insertions, 74 deletions
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index c2fb4120bd62..34cb98139442 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/pm.h> | 30 | #include <linux/pm.h> |
31 | #include <linux/regmap.h> | 31 | #include <linux/regmap.h> |
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/scatterlist.h> | ||
33 | 34 | ||
34 | /* Offsets into SLCR regmap */ | 35 | /* Offsets into SLCR regmap */ |
35 | 36 | ||
@@ -80,6 +81,7 @@ | |||
80 | 81 | ||
81 | /* FPGA init status */ | 82 | /* FPGA init status */ |
82 | #define STATUS_DMA_Q_F BIT(31) | 83 | #define STATUS_DMA_Q_F BIT(31) |
84 | #define STATUS_DMA_Q_E BIT(30) | ||
83 | #define STATUS_PCFG_INIT_MASK BIT(4) | 85 | #define STATUS_PCFG_INIT_MASK BIT(4) |
84 | 86 | ||
85 | /* Interrupt Status/Mask Register Bit definitions */ | 87 | /* Interrupt Status/Mask Register Bit definitions */ |
@@ -89,7 +91,7 @@ | |||
89 | #define IXR_D_P_DONE_MASK BIT(12) | 91 | #define IXR_D_P_DONE_MASK BIT(12) |
90 | /* FPGA programmed */ | 92 | /* FPGA programmed */ |
91 | #define IXR_PCFG_DONE_MASK BIT(2) | 93 | #define IXR_PCFG_DONE_MASK BIT(2) |
92 | #define IXR_ERROR_FLAGS_MASK 0x00F0F860 | 94 | #define IXR_ERROR_FLAGS_MASK 0x00F0C860 |
93 | #define IXR_ALL_MASK 0xF8F7F87F | 95 | #define IXR_ALL_MASK 0xF8F7F87F |
94 | 96 | ||
95 | /* Miscellaneous constant values */ | 97 | /* Miscellaneous constant values */ |
@@ -98,12 +100,16 @@ | |||
98 | #define DMA_INVALID_ADDRESS GENMASK(31, 0) | 100 | #define DMA_INVALID_ADDRESS GENMASK(31, 0) |
99 | /* Used to unlock the dev */ | 101 | /* Used to unlock the dev */ |
100 | #define UNLOCK_MASK 0x757bdf0d | 102 | #define UNLOCK_MASK 0x757bdf0d |
101 | /* Timeout for DMA to complete */ | ||
102 | #define DMA_DONE_TIMEOUT msecs_to_jiffies(1000) | ||
103 | /* Timeout for polling reset bits */ | 103 | /* Timeout for polling reset bits */ |
104 | #define INIT_POLL_TIMEOUT 2500000 | 104 | #define INIT_POLL_TIMEOUT 2500000 |
105 | /* Delay for polling reset bits */ | 105 | /* Delay for polling reset bits */ |
106 | #define INIT_POLL_DELAY 20 | 106 | #define INIT_POLL_DELAY 20 |
107 | /* Signal this is the last DMA transfer, wait for the AXI and PCAP before | ||
108 | * interrupting | ||
109 | */ | ||
110 | #define DMA_SRC_LAST_TRANSFER 1 | ||
111 | /* Timeout for DMA completion */ | ||
112 | #define DMA_TIMEOUT_MS 5000 | ||
107 | 113 | ||
108 | /* Masks for controlling stuff in SLCR */ | 114 | /* Masks for controlling stuff in SLCR */ |
109 | /* Disable all Level shifters */ | 115 | /* Disable all Level shifters */ |
@@ -118,13 +124,17 @@ | |||
118 | #define FPGA_RST_NONE_MASK 0x0 | 124 | #define FPGA_RST_NONE_MASK 0x0 |
119 | 125 | ||
120 | struct zynq_fpga_priv { | 126 | struct zynq_fpga_priv { |
121 | struct device *dev; | ||
122 | int irq; | 127 | int irq; |
123 | struct clk *clk; | 128 | struct clk *clk; |
124 | 129 | ||
125 | void __iomem *io_base; | 130 | void __iomem *io_base; |
126 | struct regmap *slcr; | 131 | struct regmap *slcr; |
127 | 132 | ||
133 | spinlock_t dma_lock; | ||
134 | unsigned int dma_elm; | ||
135 | unsigned int dma_nelms; | ||
136 | struct scatterlist *cur_sg; | ||
137 | |||
128 | struct completion dma_done; | 138 | struct completion dma_done; |
129 | }; | 139 | }; |
130 | 140 | ||
@@ -144,38 +154,106 @@ static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv, | |||
144 | readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \ | 154 | readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \ |
145 | timeout_us) | 155 | timeout_us) |
146 | 156 | ||
147 | static void zynq_fpga_mask_irqs(struct zynq_fpga_priv *priv) | 157 | /* Cause the specified irq mask bits to generate IRQs */ |
158 | static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable) | ||
148 | { | 159 | { |
149 | u32 intr_mask; | 160 | zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable); |
150 | |||
151 | intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET); | ||
152 | zynq_fpga_write(priv, INT_MASK_OFFSET, | ||
153 | intr_mask | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK); | ||
154 | } | 161 | } |
155 | 162 | ||
156 | static void zynq_fpga_unmask_irqs(struct zynq_fpga_priv *priv) | 163 | /* Must be called with dma_lock held */ |
164 | static void zynq_step_dma(struct zynq_fpga_priv *priv) | ||
157 | { | 165 | { |
158 | u32 intr_mask; | 166 | u32 addr; |
167 | u32 len; | ||
168 | bool first; | ||
169 | |||
170 | first = priv->dma_elm == 0; | ||
171 | while (priv->cur_sg) { | ||
172 | /* Feed the DMA queue until it is full. */ | ||
173 | if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F) | ||
174 | break; | ||
175 | |||
176 | addr = sg_dma_address(priv->cur_sg); | ||
177 | len = sg_dma_len(priv->cur_sg); | ||
178 | if (priv->dma_elm + 1 == priv->dma_nelms) { | ||
179 | /* The last transfer waits for the PCAP to finish too, | ||
180 | * notice this also changes the irq_mask to ignore | ||
181 | * IXR_DMA_DONE_MASK which ensures we do not trigger | ||
182 | * the completion too early. | ||
183 | */ | ||
184 | addr |= DMA_SRC_LAST_TRANSFER; | ||
185 | priv->cur_sg = NULL; | ||
186 | } else { | ||
187 | priv->cur_sg = sg_next(priv->cur_sg); | ||
188 | priv->dma_elm++; | ||
189 | } | ||
190 | |||
191 | zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr); | ||
192 | zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS); | ||
193 | zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4); | ||
194 | zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0); | ||
195 | } | ||
159 | 196 | ||
160 | intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET); | 197 | /* Once the first transfer is queued we can turn on the ISR, future |
161 | zynq_fpga_write(priv, INT_MASK_OFFSET, | 198 | * calls to zynq_step_dma will happen from the ISR context. The |
162 | intr_mask | 199 | * dma_lock spinlock guarentees this handover is done coherently, the |
163 | & ~(IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK)); | 200 | * ISR enable is put at the end to avoid another CPU spinning in the |
201 | * ISR on this lock. | ||
202 | */ | ||
203 | if (first && priv->cur_sg) { | ||
204 | zynq_fpga_set_irq(priv, | ||
205 | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK); | ||
206 | } else if (!priv->cur_sg) { | ||
207 | /* The last transfer changes to DMA & PCAP mode since we do | ||
208 | * not want to continue until everything has been flushed into | ||
209 | * the PCAP. | ||
210 | */ | ||
211 | zynq_fpga_set_irq(priv, | ||
212 | IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK); | ||
213 | } | ||
164 | } | 214 | } |
165 | 215 | ||
166 | static irqreturn_t zynq_fpga_isr(int irq, void *data) | 216 | static irqreturn_t zynq_fpga_isr(int irq, void *data) |
167 | { | 217 | { |
168 | struct zynq_fpga_priv *priv = data; | 218 | struct zynq_fpga_priv *priv = data; |
219 | u32 intr_status; | ||
169 | 220 | ||
170 | /* disable DMA and error IRQs */ | 221 | /* If anything other than DMA completion is reported stop and hand |
171 | zynq_fpga_mask_irqs(priv); | 222 | * control back to zynq_fpga_ops_write, something went wrong, |
223 | * otherwise progress the DMA. | ||
224 | */ | ||
225 | spin_lock(&priv->dma_lock); | ||
226 | intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); | ||
227 | if (!(intr_status & IXR_ERROR_FLAGS_MASK) && | ||
228 | (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) { | ||
229 | zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK); | ||
230 | zynq_step_dma(priv); | ||
231 | spin_unlock(&priv->dma_lock); | ||
232 | return IRQ_HANDLED; | ||
233 | } | ||
234 | spin_unlock(&priv->dma_lock); | ||
172 | 235 | ||
236 | zynq_fpga_set_irq(priv, 0); | ||
173 | complete(&priv->dma_done); | 237 | complete(&priv->dma_done); |
174 | 238 | ||
175 | return IRQ_HANDLED; | 239 | return IRQ_HANDLED; |
176 | } | 240 | } |
177 | 241 | ||
178 | static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, | 242 | /* Sanity check the proposed bitstream. It must start with the sync word in |
243 | * the correct byte order, and be dword aligned. The input is a Xilinx .bin | ||
244 | * file with every 32 bit quantity swapped. | ||
245 | */ | ||
246 | static bool zynq_fpga_has_sync(const u8 *buf, size_t count) | ||
247 | { | ||
248 | for (; count >= 4; buf += 4, count -= 4) | ||
249 | if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 && | ||
250 | buf[3] == 0xaa) | ||
251 | return true; | ||
252 | return false; | ||
253 | } | ||
254 | |||
255 | static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, | ||
256 | struct fpga_image_info *info, | ||
179 | const char *buf, size_t count) | 257 | const char *buf, size_t count) |
180 | { | 258 | { |
181 | struct zynq_fpga_priv *priv; | 259 | struct zynq_fpga_priv *priv; |
@@ -189,7 +267,14 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, | |||
189 | return err; | 267 | return err; |
190 | 268 | ||
191 | /* don't globally reset PL if we're doing partial reconfig */ | 269 | /* don't globally reset PL if we're doing partial reconfig */ |
192 | if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) { | 270 | if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { |
271 | if (!zynq_fpga_has_sync(buf, count)) { | ||
272 | dev_err(&mgr->dev, | ||
273 | "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n"); | ||
274 | err = -EINVAL; | ||
275 | goto out_err; | ||
276 | } | ||
277 | |||
193 | /* assert AXI interface resets */ | 278 | /* assert AXI interface resets */ |
194 | regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET, | 279 | regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET, |
195 | FPGA_RST_ALL_MASK); | 280 | FPGA_RST_ALL_MASK); |
@@ -217,7 +302,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, | |||
217 | INIT_POLL_DELAY, | 302 | INIT_POLL_DELAY, |
218 | INIT_POLL_TIMEOUT); | 303 | INIT_POLL_TIMEOUT); |
219 | if (err) { | 304 | if (err) { |
220 | dev_err(priv->dev, "Timeout waiting for PCFG_INIT"); | 305 | dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n"); |
221 | goto out_err; | 306 | goto out_err; |
222 | } | 307 | } |
223 | 308 | ||
@@ -231,7 +316,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, | |||
231 | INIT_POLL_DELAY, | 316 | INIT_POLL_DELAY, |
232 | INIT_POLL_TIMEOUT); | 317 | INIT_POLL_TIMEOUT); |
233 | if (err) { | 318 | if (err) { |
234 | dev_err(priv->dev, "Timeout waiting for !PCFG_INIT"); | 319 | dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n"); |
235 | goto out_err; | 320 | goto out_err; |
236 | } | 321 | } |
237 | 322 | ||
@@ -245,7 +330,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, | |||
245 | INIT_POLL_DELAY, | 330 | INIT_POLL_DELAY, |
246 | INIT_POLL_TIMEOUT); | 331 | INIT_POLL_TIMEOUT); |
247 | if (err) { | 332 | if (err) { |
248 | dev_err(priv->dev, "Timeout waiting for PCFG_INIT"); | 333 | dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n"); |
249 | goto out_err; | 334 | goto out_err; |
250 | } | 335 | } |
251 | } | 336 | } |
@@ -259,10 +344,11 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, | |||
259 | zynq_fpga_write(priv, CTRL_OFFSET, | 344 | zynq_fpga_write(priv, CTRL_OFFSET, |
260 | (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl)); | 345 | (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl)); |
261 | 346 | ||
262 | /* check that we have room in the command queue */ | 347 | /* We expect that the command queue is empty right now. */ |
263 | status = zynq_fpga_read(priv, STATUS_OFFSET); | 348 | status = zynq_fpga_read(priv, STATUS_OFFSET); |
264 | if (status & STATUS_DMA_Q_F) { | 349 | if ((status & STATUS_DMA_Q_F) || |
265 | dev_err(priv->dev, "DMA command queue full"); | 350 | (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) { |
351 | dev_err(&mgr->dev, "DMA command queue not right\n"); | ||
266 | err = -EBUSY; | 352 | err = -EBUSY; |
267 | goto out_err; | 353 | goto out_err; |
268 | } | 354 | } |
@@ -281,25 +367,36 @@ out_err: | |||
281 | return err; | 367 | return err; |
282 | } | 368 | } |
283 | 369 | ||
284 | static int zynq_fpga_ops_write(struct fpga_manager *mgr, | 370 | static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) |
285 | const char *buf, size_t count) | ||
286 | { | 371 | { |
287 | struct zynq_fpga_priv *priv; | 372 | struct zynq_fpga_priv *priv; |
373 | const char *why; | ||
288 | int err; | 374 | int err; |
289 | char *kbuf; | ||
290 | size_t in_count; | ||
291 | dma_addr_t dma_addr; | ||
292 | u32 transfer_length; | ||
293 | u32 intr_status; | 375 | u32 intr_status; |
376 | unsigned long timeout; | ||
377 | unsigned long flags; | ||
378 | struct scatterlist *sg; | ||
379 | int i; | ||
294 | 380 | ||
295 | in_count = count; | ||
296 | priv = mgr->priv; | 381 | priv = mgr->priv; |
297 | 382 | ||
298 | kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL); | 383 | /* The hardware can only DMA multiples of 4 bytes, and it requires the |
299 | if (!kbuf) | 384 | * starting addresses to be aligned to 64 bits (UG585 pg 212). |
300 | return -ENOMEM; | 385 | */ |
386 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
387 | if ((sg->offset % 8) || (sg->length % 4)) { | ||
388 | dev_err(&mgr->dev, | ||
389 | "Invalid bitstream, chunks must be aligned\n"); | ||
390 | return -EINVAL; | ||
391 | } | ||
392 | } | ||
301 | 393 | ||
302 | memcpy(kbuf, buf, count); | 394 | priv->dma_nelms = |
395 | dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); | ||
396 | if (priv->dma_nelms == 0) { | ||
397 | dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); | ||
398 | return -ENOMEM; | ||
399 | } | ||
303 | 400 | ||
304 | /* enable clock */ | 401 | /* enable clock */ |
305 | err = clk_enable(priv->clk); | 402 | err = clk_enable(priv->clk); |
@@ -307,43 +404,72 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, | |||
307 | goto out_free; | 404 | goto out_free; |
308 | 405 | ||
309 | zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); | 406 | zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); |
310 | |||
311 | reinit_completion(&priv->dma_done); | 407 | reinit_completion(&priv->dma_done); |
312 | 408 | ||
313 | /* enable DMA and error IRQs */ | 409 | /* zynq_step_dma will turn on interrupts */ |
314 | zynq_fpga_unmask_irqs(priv); | 410 | spin_lock_irqsave(&priv->dma_lock, flags); |
411 | priv->dma_elm = 0; | ||
412 | priv->cur_sg = sgt->sgl; | ||
413 | zynq_step_dma(priv); | ||
414 | spin_unlock_irqrestore(&priv->dma_lock, flags); | ||
315 | 415 | ||
316 | /* the +1 in the src addr is used to hold off on DMA_DONE IRQ | 416 | timeout = wait_for_completion_timeout(&priv->dma_done, |
317 | * until both AXI and PCAP are done ... | 417 | msecs_to_jiffies(DMA_TIMEOUT_MS)); |
318 | */ | ||
319 | zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, (u32)(dma_addr) + 1); | ||
320 | zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, (u32)DMA_INVALID_ADDRESS); | ||
321 | 418 | ||
322 | /* convert #bytes to #words */ | 419 | spin_lock_irqsave(&priv->dma_lock, flags); |
323 | transfer_length = (count + 3) / 4; | 420 | zynq_fpga_set_irq(priv, 0); |
421 | priv->cur_sg = NULL; | ||
422 | spin_unlock_irqrestore(&priv->dma_lock, flags); | ||
324 | 423 | ||
325 | zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, transfer_length); | 424 | intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); |
326 | zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0); | 425 | zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); |
327 | 426 | ||
328 | wait_for_completion(&priv->dma_done); | 427 | /* There doesn't seem to be a way to force cancel any DMA, so if |
428 | * something went wrong we are relying on the hardware to have halted | ||
429 | * the DMA before we get here, if there was we could use | ||
430 | * wait_for_completion_interruptible too. | ||
431 | */ | ||
329 | 432 | ||
330 | intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); | 433 | if (intr_status & IXR_ERROR_FLAGS_MASK) { |
331 | zynq_fpga_write(priv, INT_STS_OFFSET, intr_status); | 434 | why = "DMA reported error"; |
435 | err = -EIO; | ||
436 | goto out_report; | ||
437 | } | ||
332 | 438 | ||
333 | if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { | 439 | if (priv->cur_sg || |
334 | dev_err(priv->dev, "Error configuring FPGA"); | 440 | !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { |
335 | err = -EFAULT; | 441 | if (timeout == 0) |
442 | why = "DMA timed out"; | ||
443 | else | ||
444 | why = "DMA did not complete"; | ||
445 | err = -EIO; | ||
446 | goto out_report; | ||
336 | } | 447 | } |
337 | 448 | ||
449 | err = 0; | ||
450 | goto out_clk; | ||
451 | |||
452 | out_report: | ||
453 | dev_err(&mgr->dev, | ||
454 | "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n", | ||
455 | why, | ||
456 | intr_status, | ||
457 | zynq_fpga_read(priv, CTRL_OFFSET), | ||
458 | zynq_fpga_read(priv, LOCK_OFFSET), | ||
459 | zynq_fpga_read(priv, INT_MASK_OFFSET), | ||
460 | zynq_fpga_read(priv, STATUS_OFFSET), | ||
461 | zynq_fpga_read(priv, MCTRL_OFFSET)); | ||
462 | |||
463 | out_clk: | ||
338 | clk_disable(priv->clk); | 464 | clk_disable(priv->clk); |
339 | 465 | ||
340 | out_free: | 466 | out_free: |
341 | dma_free_coherent(priv->dev, in_count, kbuf, dma_addr); | 467 | dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); |
342 | |||
343 | return err; | 468 | return err; |
344 | } | 469 | } |
345 | 470 | ||
346 | static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags) | 471 | static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, |
472 | struct fpga_image_info *info) | ||
347 | { | 473 | { |
348 | struct zynq_fpga_priv *priv = mgr->priv; | 474 | struct zynq_fpga_priv *priv = mgr->priv; |
349 | int err; | 475 | int err; |
@@ -364,7 +490,7 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags) | |||
364 | return err; | 490 | return err; |
365 | 491 | ||
366 | /* for the partial reconfig case we didn't touch the level shifters */ | 492 | /* for the partial reconfig case we didn't touch the level shifters */ |
367 | if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) { | 493 | if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { |
368 | /* enable level shifters from PL to PS */ | 494 | /* enable level shifters from PL to PS */ |
369 | regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET, | 495 | regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET, |
370 | LVL_SHFTR_ENABLE_PL_TO_PS); | 496 | LVL_SHFTR_ENABLE_PL_TO_PS); |
@@ -399,9 +525,10 @@ static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr) | |||
399 | } | 525 | } |
400 | 526 | ||
401 | static const struct fpga_manager_ops zynq_fpga_ops = { | 527 | static const struct fpga_manager_ops zynq_fpga_ops = { |
528 | .initial_header_size = 128, | ||
402 | .state = zynq_fpga_ops_state, | 529 | .state = zynq_fpga_ops_state, |
403 | .write_init = zynq_fpga_ops_write_init, | 530 | .write_init = zynq_fpga_ops_write_init, |
404 | .write = zynq_fpga_ops_write, | 531 | .write_sg = zynq_fpga_ops_write, |
405 | .write_complete = zynq_fpga_ops_write_complete, | 532 | .write_complete = zynq_fpga_ops_write_complete, |
406 | }; | 533 | }; |
407 | 534 | ||
@@ -415,8 +542,7 @@ static int zynq_fpga_probe(struct platform_device *pdev) | |||
415 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 542 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
416 | if (!priv) | 543 | if (!priv) |
417 | return -ENOMEM; | 544 | return -ENOMEM; |
418 | 545 | spin_lock_init(&priv->dma_lock); | |
419 | priv->dev = dev; | ||
420 | 546 | ||
421 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 547 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
422 | priv->io_base = devm_ioremap_resource(dev, res); | 548 | priv->io_base = devm_ioremap_resource(dev, res); |
@@ -426,7 +552,7 @@ static int zynq_fpga_probe(struct platform_device *pdev) | |||
426 | priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node, | 552 | priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node, |
427 | "syscon"); | 553 | "syscon"); |
428 | if (IS_ERR(priv->slcr)) { | 554 | if (IS_ERR(priv->slcr)) { |
429 | dev_err(dev, "unable to get zynq-slcr regmap"); | 555 | dev_err(dev, "unable to get zynq-slcr regmap\n"); |
430 | return PTR_ERR(priv->slcr); | 556 | return PTR_ERR(priv->slcr); |
431 | } | 557 | } |
432 | 558 | ||
@@ -434,38 +560,41 @@ static int zynq_fpga_probe(struct platform_device *pdev) | |||
434 | 560 | ||
435 | priv->irq = platform_get_irq(pdev, 0); | 561 | priv->irq = platform_get_irq(pdev, 0); |
436 | if (priv->irq < 0) { | 562 | if (priv->irq < 0) { |
437 | dev_err(dev, "No IRQ available"); | 563 | dev_err(dev, "No IRQ available\n"); |
438 | return priv->irq; | 564 | return priv->irq; |
439 | } | 565 | } |
440 | 566 | ||
441 | err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, | ||
442 | dev_name(dev), priv); | ||
443 | if (err) { | ||
444 | dev_err(dev, "unable to request IRQ"); | ||
445 | return err; | ||
446 | } | ||
447 | |||
448 | priv->clk = devm_clk_get(dev, "ref_clk"); | 567 | priv->clk = devm_clk_get(dev, "ref_clk"); |
449 | if (IS_ERR(priv->clk)) { | 568 | if (IS_ERR(priv->clk)) { |
450 | dev_err(dev, "input clock not found"); | 569 | dev_err(dev, "input clock not found\n"); |
451 | return PTR_ERR(priv->clk); | 570 | return PTR_ERR(priv->clk); |
452 | } | 571 | } |
453 | 572 | ||
454 | err = clk_prepare_enable(priv->clk); | 573 | err = clk_prepare_enable(priv->clk); |
455 | if (err) { | 574 | if (err) { |
456 | dev_err(dev, "unable to enable clock"); | 575 | dev_err(dev, "unable to enable clock\n"); |
457 | return err; | 576 | return err; |
458 | } | 577 | } |
459 | 578 | ||
460 | /* unlock the device */ | 579 | /* unlock the device */ |
461 | zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK); | 580 | zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK); |
462 | 581 | ||
582 | zynq_fpga_set_irq(priv, 0); | ||
583 | zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); | ||
584 | err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev), | ||
585 | priv); | ||
586 | if (err) { | ||
587 | dev_err(dev, "unable to request IRQ\n"); | ||
588 | clk_disable_unprepare(priv->clk); | ||
589 | return err; | ||
590 | } | ||
591 | |||
463 | clk_disable(priv->clk); | 592 | clk_disable(priv->clk); |
464 | 593 | ||
465 | err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager", | 594 | err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager", |
466 | &zynq_fpga_ops, priv); | 595 | &zynq_fpga_ops, priv); |
467 | if (err) { | 596 | if (err) { |
468 | dev_err(dev, "unable to register FPGA manager"); | 597 | dev_err(dev, "unable to register FPGA manager\n"); |
469 | clk_unprepare(priv->clk); | 598 | clk_unprepare(priv->clk); |
470 | return err; | 599 | return err; |
471 | } | 600 | } |