diff options
Diffstat (limited to 'drivers/spi/spi-dw.c')
-rw-r--r-- | drivers/spi/spi-dw.c | 936 |
1 files changed, 936 insertions, 0 deletions
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c new file mode 100644 index 000000000000..ece5f69deaaf --- /dev/null +++ b/drivers/spi/spi-dw.c | |||
@@ -0,0 +1,936 @@ | |||
1 | /* | ||
2 | * Designware SPI core controller driver (refer pxa2xx_spi.c) | ||
3 | * | ||
4 | * Copyright (c) 2009, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/highmem.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/spi/spi.h> | ||
26 | |||
27 | #include "spi-dw.h" | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_FS | ||
30 | #include <linux/debugfs.h> | ||
31 | #endif | ||
32 | |||
33 | #define START_STATE ((void *)0) | ||
34 | #define RUNNING_STATE ((void *)1) | ||
35 | #define DONE_STATE ((void *)2) | ||
36 | #define ERROR_STATE ((void *)-1) | ||
37 | |||
38 | #define QUEUE_RUNNING 0 | ||
39 | #define QUEUE_STOPPED 1 | ||
40 | |||
41 | #define MRST_SPI_DEASSERT 0 | ||
42 | #define MRST_SPI_ASSERT 1 | ||
43 | |||
44 | /* Slave spi_dev related */ | ||
45 | struct chip_data { | ||
46 | u16 cr0; | ||
47 | u8 cs; /* chip select pin */ | ||
48 | u8 n_bytes; /* current is a 1/2/4 byte op */ | ||
49 | u8 tmode; /* TR/TO/RO/EEPROM */ | ||
50 | u8 type; /* SPI/SSP/MicroWire */ | ||
51 | |||
52 | u8 poll_mode; /* 1 means use poll mode */ | ||
53 | |||
54 | u32 dma_width; | ||
55 | u32 rx_threshold; | ||
56 | u32 tx_threshold; | ||
57 | u8 enable_dma; | ||
58 | u8 bits_per_word; | ||
59 | u16 clk_div; /* baud rate divider */ | ||
60 | u32 speed_hz; /* baud rate */ | ||
61 | void (*cs_control)(u32 command); | ||
62 | }; | ||
63 | |||
64 | #ifdef CONFIG_DEBUG_FS | ||
65 | static int spi_show_regs_open(struct inode *inode, struct file *file) | ||
66 | { | ||
67 | file->private_data = inode->i_private; | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | #define SPI_REGS_BUFSIZE 1024 | ||
72 | static ssize_t spi_show_regs(struct file *file, char __user *user_buf, | ||
73 | size_t count, loff_t *ppos) | ||
74 | { | ||
75 | struct dw_spi *dws; | ||
76 | char *buf; | ||
77 | u32 len = 0; | ||
78 | ssize_t ret; | ||
79 | |||
80 | dws = file->private_data; | ||
81 | |||
82 | buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL); | ||
83 | if (!buf) | ||
84 | return 0; | ||
85 | |||
86 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
87 | "MRST SPI0 registers:\n"); | ||
88 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
89 | "=================================\n"); | ||
90 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
91 | "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0)); | ||
92 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
93 | "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1)); | ||
94 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
95 | "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr)); | ||
96 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
97 | "SER: \t\t0x%08x\n", dw_readl(dws, ser)); | ||
98 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
99 | "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr)); | ||
100 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
101 | "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr)); | ||
102 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
103 | "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr)); | ||
104 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
105 | "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr)); | ||
106 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
107 | "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr)); | ||
108 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
109 | "SR: \t\t0x%08x\n", dw_readl(dws, sr)); | ||
110 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
111 | "IMR: \t\t0x%08x\n", dw_readl(dws, imr)); | ||
112 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
113 | "ISR: \t\t0x%08x\n", dw_readl(dws, isr)); | ||
114 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
115 | "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr)); | ||
116 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
117 | "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr)); | ||
118 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
119 | "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr)); | ||
120 | len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, | ||
121 | "=================================\n"); | ||
122 | |||
123 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
124 | kfree(buf); | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | static const struct file_operations mrst_spi_regs_ops = { | ||
129 | .owner = THIS_MODULE, | ||
130 | .open = spi_show_regs_open, | ||
131 | .read = spi_show_regs, | ||
132 | .llseek = default_llseek, | ||
133 | }; | ||
134 | |||
135 | static int mrst_spi_debugfs_init(struct dw_spi *dws) | ||
136 | { | ||
137 | dws->debugfs = debugfs_create_dir("mrst_spi", NULL); | ||
138 | if (!dws->debugfs) | ||
139 | return -ENOMEM; | ||
140 | |||
141 | debugfs_create_file("registers", S_IFREG | S_IRUGO, | ||
142 | dws->debugfs, (void *)dws, &mrst_spi_regs_ops); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static void mrst_spi_debugfs_remove(struct dw_spi *dws) | ||
147 | { | ||
148 | if (dws->debugfs) | ||
149 | debugfs_remove_recursive(dws->debugfs); | ||
150 | } | ||
151 | |||
152 | #else | ||
153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) | ||
154 | { | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | ||
159 | { | ||
160 | } | ||
161 | #endif /* CONFIG_DEBUG_FS */ | ||
162 | |||
163 | /* Return the max entries we can fill into tx fifo */ | ||
164 | static inline u32 tx_max(struct dw_spi *dws) | ||
165 | { | ||
166 | u32 tx_left, tx_room, rxtx_gap; | ||
167 | |||
168 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; | ||
169 | tx_room = dws->fifo_len - dw_readw(dws, txflr); | ||
170 | |||
171 | /* | ||
172 | * Another concern is about the tx/rx mismatch, we | ||
173 | * though to use (dws->fifo_len - rxflr - txflr) as | ||
174 | * one maximum value for tx, but it doesn't cover the | ||
175 | * data which is out of tx/rx fifo and inside the | ||
176 | * shift registers. So a control from sw point of | ||
177 | * view is taken. | ||
178 | */ | ||
179 | rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx)) | ||
180 | / dws->n_bytes; | ||
181 | |||
182 | return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap)); | ||
183 | } | ||
184 | |||
185 | /* Return the max entries we should read out of rx fifo */ | ||
186 | static inline u32 rx_max(struct dw_spi *dws) | ||
187 | { | ||
188 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; | ||
189 | |||
190 | return min(rx_left, (u32)dw_readw(dws, rxflr)); | ||
191 | } | ||
192 | |||
193 | static void dw_writer(struct dw_spi *dws) | ||
194 | { | ||
195 | u32 max = tx_max(dws); | ||
196 | u16 txw = 0; | ||
197 | |||
198 | while (max--) { | ||
199 | /* Set the tx word if the transfer's original "tx" is not null */ | ||
200 | if (dws->tx_end - dws->len) { | ||
201 | if (dws->n_bytes == 1) | ||
202 | txw = *(u8 *)(dws->tx); | ||
203 | else | ||
204 | txw = *(u16 *)(dws->tx); | ||
205 | } | ||
206 | dw_writew(dws, dr, txw); | ||
207 | dws->tx += dws->n_bytes; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static void dw_reader(struct dw_spi *dws) | ||
212 | { | ||
213 | u32 max = rx_max(dws); | ||
214 | u16 rxw; | ||
215 | |||
216 | while (max--) { | ||
217 | rxw = dw_readw(dws, dr); | ||
218 | /* Care rx only if the transfer's original "rx" is not null */ | ||
219 | if (dws->rx_end - dws->len) { | ||
220 | if (dws->n_bytes == 1) | ||
221 | *(u8 *)(dws->rx) = rxw; | ||
222 | else | ||
223 | *(u16 *)(dws->rx) = rxw; | ||
224 | } | ||
225 | dws->rx += dws->n_bytes; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | static void *next_transfer(struct dw_spi *dws) | ||
230 | { | ||
231 | struct spi_message *msg = dws->cur_msg; | ||
232 | struct spi_transfer *trans = dws->cur_transfer; | ||
233 | |||
234 | /* Move to next transfer */ | ||
235 | if (trans->transfer_list.next != &msg->transfers) { | ||
236 | dws->cur_transfer = | ||
237 | list_entry(trans->transfer_list.next, | ||
238 | struct spi_transfer, | ||
239 | transfer_list); | ||
240 | return RUNNING_STATE; | ||
241 | } else | ||
242 | return DONE_STATE; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Note: first step is the protocol driver prepares | ||
247 | * a dma-capable memory, and this func just need translate | ||
248 | * the virt addr to physical | ||
249 | */ | ||
250 | static int map_dma_buffers(struct dw_spi *dws) | ||
251 | { | ||
252 | if (!dws->cur_msg->is_dma_mapped | ||
253 | || !dws->dma_inited | ||
254 | || !dws->cur_chip->enable_dma | ||
255 | || !dws->dma_ops) | ||
256 | return 0; | ||
257 | |||
258 | if (dws->cur_transfer->tx_dma) | ||
259 | dws->tx_dma = dws->cur_transfer->tx_dma; | ||
260 | |||
261 | if (dws->cur_transfer->rx_dma) | ||
262 | dws->rx_dma = dws->cur_transfer->rx_dma; | ||
263 | |||
264 | return 1; | ||
265 | } | ||
266 | |||
267 | /* Caller already set message->status; dma and pio irqs are blocked */ | ||
268 | static void giveback(struct dw_spi *dws) | ||
269 | { | ||
270 | struct spi_transfer *last_transfer; | ||
271 | unsigned long flags; | ||
272 | struct spi_message *msg; | ||
273 | |||
274 | spin_lock_irqsave(&dws->lock, flags); | ||
275 | msg = dws->cur_msg; | ||
276 | dws->cur_msg = NULL; | ||
277 | dws->cur_transfer = NULL; | ||
278 | dws->prev_chip = dws->cur_chip; | ||
279 | dws->cur_chip = NULL; | ||
280 | dws->dma_mapped = 0; | ||
281 | queue_work(dws->workqueue, &dws->pump_messages); | ||
282 | spin_unlock_irqrestore(&dws->lock, flags); | ||
283 | |||
284 | last_transfer = list_entry(msg->transfers.prev, | ||
285 | struct spi_transfer, | ||
286 | transfer_list); | ||
287 | |||
288 | if (!last_transfer->cs_change && dws->cs_control) | ||
289 | dws->cs_control(MRST_SPI_DEASSERT); | ||
290 | |||
291 | msg->state = NULL; | ||
292 | if (msg->complete) | ||
293 | msg->complete(msg->context); | ||
294 | } | ||
295 | |||
296 | static void int_error_stop(struct dw_spi *dws, const char *msg) | ||
297 | { | ||
298 | /* Stop the hw */ | ||
299 | spi_enable_chip(dws, 0); | ||
300 | |||
301 | dev_err(&dws->master->dev, "%s\n", msg); | ||
302 | dws->cur_msg->state = ERROR_STATE; | ||
303 | tasklet_schedule(&dws->pump_transfers); | ||
304 | } | ||
305 | |||
306 | void dw_spi_xfer_done(struct dw_spi *dws) | ||
307 | { | ||
308 | /* Update total byte transferred return count actual bytes read */ | ||
309 | dws->cur_msg->actual_length += dws->len; | ||
310 | |||
311 | /* Move to next transfer */ | ||
312 | dws->cur_msg->state = next_transfer(dws); | ||
313 | |||
314 | /* Handle end of message */ | ||
315 | if (dws->cur_msg->state == DONE_STATE) { | ||
316 | dws->cur_msg->status = 0; | ||
317 | giveback(dws); | ||
318 | } else | ||
319 | tasklet_schedule(&dws->pump_transfers); | ||
320 | } | ||
321 | EXPORT_SYMBOL_GPL(dw_spi_xfer_done); | ||
322 | |||
323 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | ||
324 | { | ||
325 | u16 irq_status = dw_readw(dws, isr); | ||
326 | |||
327 | /* Error handling */ | ||
328 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { | ||
329 | dw_readw(dws, txoicr); | ||
330 | dw_readw(dws, rxoicr); | ||
331 | dw_readw(dws, rxuicr); | ||
332 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); | ||
333 | return IRQ_HANDLED; | ||
334 | } | ||
335 | |||
336 | dw_reader(dws); | ||
337 | if (dws->rx_end == dws->rx) { | ||
338 | spi_mask_intr(dws, SPI_INT_TXEI); | ||
339 | dw_spi_xfer_done(dws); | ||
340 | return IRQ_HANDLED; | ||
341 | } | ||
342 | if (irq_status & SPI_INT_TXEI) { | ||
343 | spi_mask_intr(dws, SPI_INT_TXEI); | ||
344 | dw_writer(dws); | ||
345 | /* Enable TX irq always, it will be disabled when RX finished */ | ||
346 | spi_umask_intr(dws, SPI_INT_TXEI); | ||
347 | } | ||
348 | |||
349 | return IRQ_HANDLED; | ||
350 | } | ||
351 | |||
352 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) | ||
353 | { | ||
354 | struct dw_spi *dws = dev_id; | ||
355 | u16 irq_status = dw_readw(dws, isr) & 0x3f; | ||
356 | |||
357 | if (!irq_status) | ||
358 | return IRQ_NONE; | ||
359 | |||
360 | if (!dws->cur_msg) { | ||
361 | spi_mask_intr(dws, SPI_INT_TXEI); | ||
362 | return IRQ_HANDLED; | ||
363 | } | ||
364 | |||
365 | return dws->transfer_handler(dws); | ||
366 | } | ||
367 | |||
368 | /* Must be called inside pump_transfers() */ | ||
369 | static void poll_transfer(struct dw_spi *dws) | ||
370 | { | ||
371 | do { | ||
372 | dw_writer(dws); | ||
373 | dw_reader(dws); | ||
374 | cpu_relax(); | ||
375 | } while (dws->rx_end > dws->rx); | ||
376 | |||
377 | dw_spi_xfer_done(dws); | ||
378 | } | ||
379 | |||
380 | static void pump_transfers(unsigned long data) | ||
381 | { | ||
382 | struct dw_spi *dws = (struct dw_spi *)data; | ||
383 | struct spi_message *message = NULL; | ||
384 | struct spi_transfer *transfer = NULL; | ||
385 | struct spi_transfer *previous = NULL; | ||
386 | struct spi_device *spi = NULL; | ||
387 | struct chip_data *chip = NULL; | ||
388 | u8 bits = 0; | ||
389 | u8 imask = 0; | ||
390 | u8 cs_change = 0; | ||
391 | u16 txint_level = 0; | ||
392 | u16 clk_div = 0; | ||
393 | u32 speed = 0; | ||
394 | u32 cr0 = 0; | ||
395 | |||
396 | /* Get current state information */ | ||
397 | message = dws->cur_msg; | ||
398 | transfer = dws->cur_transfer; | ||
399 | chip = dws->cur_chip; | ||
400 | spi = message->spi; | ||
401 | |||
402 | if (unlikely(!chip->clk_div)) | ||
403 | chip->clk_div = dws->max_freq / chip->speed_hz; | ||
404 | |||
405 | if (message->state == ERROR_STATE) { | ||
406 | message->status = -EIO; | ||
407 | goto early_exit; | ||
408 | } | ||
409 | |||
410 | /* Handle end of message */ | ||
411 | if (message->state == DONE_STATE) { | ||
412 | message->status = 0; | ||
413 | goto early_exit; | ||
414 | } | ||
415 | |||
416 | /* Delay if requested at end of transfer*/ | ||
417 | if (message->state == RUNNING_STATE) { | ||
418 | previous = list_entry(transfer->transfer_list.prev, | ||
419 | struct spi_transfer, | ||
420 | transfer_list); | ||
421 | if (previous->delay_usecs) | ||
422 | udelay(previous->delay_usecs); | ||
423 | } | ||
424 | |||
425 | dws->n_bytes = chip->n_bytes; | ||
426 | dws->dma_width = chip->dma_width; | ||
427 | dws->cs_control = chip->cs_control; | ||
428 | |||
429 | dws->rx_dma = transfer->rx_dma; | ||
430 | dws->tx_dma = transfer->tx_dma; | ||
431 | dws->tx = (void *)transfer->tx_buf; | ||
432 | dws->tx_end = dws->tx + transfer->len; | ||
433 | dws->rx = transfer->rx_buf; | ||
434 | dws->rx_end = dws->rx + transfer->len; | ||
435 | dws->cs_change = transfer->cs_change; | ||
436 | dws->len = dws->cur_transfer->len; | ||
437 | if (chip != dws->prev_chip) | ||
438 | cs_change = 1; | ||
439 | |||
440 | cr0 = chip->cr0; | ||
441 | |||
442 | /* Handle per transfer options for bpw and speed */ | ||
443 | if (transfer->speed_hz) { | ||
444 | speed = chip->speed_hz; | ||
445 | |||
446 | if (transfer->speed_hz != speed) { | ||
447 | speed = transfer->speed_hz; | ||
448 | if (speed > dws->max_freq) { | ||
449 | printk(KERN_ERR "MRST SPI0: unsupported" | ||
450 | "freq: %dHz\n", speed); | ||
451 | message->status = -EIO; | ||
452 | goto early_exit; | ||
453 | } | ||
454 | |||
455 | /* clk_div doesn't support odd number */ | ||
456 | clk_div = dws->max_freq / speed; | ||
457 | clk_div = (clk_div + 1) & 0xfffe; | ||
458 | |||
459 | chip->speed_hz = speed; | ||
460 | chip->clk_div = clk_div; | ||
461 | } | ||
462 | } | ||
463 | if (transfer->bits_per_word) { | ||
464 | bits = transfer->bits_per_word; | ||
465 | |||
466 | switch (bits) { | ||
467 | case 8: | ||
468 | case 16: | ||
469 | dws->n_bytes = dws->dma_width = bits >> 3; | ||
470 | break; | ||
471 | default: | ||
472 | printk(KERN_ERR "MRST SPI0: unsupported bits:" | ||
473 | "%db\n", bits); | ||
474 | message->status = -EIO; | ||
475 | goto early_exit; | ||
476 | } | ||
477 | |||
478 | cr0 = (bits - 1) | ||
479 | | (chip->type << SPI_FRF_OFFSET) | ||
480 | | (spi->mode << SPI_MODE_OFFSET) | ||
481 | | (chip->tmode << SPI_TMOD_OFFSET); | ||
482 | } | ||
483 | message->state = RUNNING_STATE; | ||
484 | |||
485 | /* | ||
486 | * Adjust transfer mode if necessary. Requires platform dependent | ||
487 | * chipselect mechanism. | ||
488 | */ | ||
489 | if (dws->cs_control) { | ||
490 | if (dws->rx && dws->tx) | ||
491 | chip->tmode = SPI_TMOD_TR; | ||
492 | else if (dws->rx) | ||
493 | chip->tmode = SPI_TMOD_RO; | ||
494 | else | ||
495 | chip->tmode = SPI_TMOD_TO; | ||
496 | |||
497 | cr0 &= ~SPI_TMOD_MASK; | ||
498 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | ||
499 | } | ||
500 | |||
501 | /* Check if current transfer is a DMA transaction */ | ||
502 | dws->dma_mapped = map_dma_buffers(dws); | ||
503 | |||
504 | /* | ||
505 | * Interrupt mode | ||
506 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely | ||
507 | */ | ||
508 | if (!dws->dma_mapped && !chip->poll_mode) { | ||
509 | int templen = dws->len / dws->n_bytes; | ||
510 | txint_level = dws->fifo_len / 2; | ||
511 | txint_level = (templen > txint_level) ? txint_level : templen; | ||
512 | |||
513 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; | ||
514 | dws->transfer_handler = interrupt_transfer; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * Reprogram registers only if | ||
519 | * 1. chip select changes | ||
520 | * 2. clk_div is changed | ||
521 | * 3. control value changes | ||
522 | */ | ||
523 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { | ||
524 | spi_enable_chip(dws, 0); | ||
525 | |||
526 | if (dw_readw(dws, ctrl0) != cr0) | ||
527 | dw_writew(dws, ctrl0, cr0); | ||
528 | |||
529 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
530 | spi_chip_sel(dws, spi->chip_select); | ||
531 | |||
532 | /* Set the interrupt mask, for poll mode just disable all int */ | ||
533 | spi_mask_intr(dws, 0xff); | ||
534 | if (imask) | ||
535 | spi_umask_intr(dws, imask); | ||
536 | if (txint_level) | ||
537 | dw_writew(dws, txfltr, txint_level); | ||
538 | |||
539 | spi_enable_chip(dws, 1); | ||
540 | if (cs_change) | ||
541 | dws->prev_chip = chip; | ||
542 | } | ||
543 | |||
544 | if (dws->dma_mapped) | ||
545 | dws->dma_ops->dma_transfer(dws, cs_change); | ||
546 | |||
547 | if (chip->poll_mode) | ||
548 | poll_transfer(dws); | ||
549 | |||
550 | return; | ||
551 | |||
552 | early_exit: | ||
553 | giveback(dws); | ||
554 | return; | ||
555 | } | ||
556 | |||
557 | static void pump_messages(struct work_struct *work) | ||
558 | { | ||
559 | struct dw_spi *dws = | ||
560 | container_of(work, struct dw_spi, pump_messages); | ||
561 | unsigned long flags; | ||
562 | |||
563 | /* Lock queue and check for queue work */ | ||
564 | spin_lock_irqsave(&dws->lock, flags); | ||
565 | if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) { | ||
566 | dws->busy = 0; | ||
567 | spin_unlock_irqrestore(&dws->lock, flags); | ||
568 | return; | ||
569 | } | ||
570 | |||
571 | /* Make sure we are not already running a message */ | ||
572 | if (dws->cur_msg) { | ||
573 | spin_unlock_irqrestore(&dws->lock, flags); | ||
574 | return; | ||
575 | } | ||
576 | |||
577 | /* Extract head of queue */ | ||
578 | dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue); | ||
579 | list_del_init(&dws->cur_msg->queue); | ||
580 | |||
581 | /* Initial message state*/ | ||
582 | dws->cur_msg->state = START_STATE; | ||
583 | dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, | ||
584 | struct spi_transfer, | ||
585 | transfer_list); | ||
586 | dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); | ||
587 | |||
588 | /* Mark as busy and launch transfers */ | ||
589 | tasklet_schedule(&dws->pump_transfers); | ||
590 | |||
591 | dws->busy = 1; | ||
592 | spin_unlock_irqrestore(&dws->lock, flags); | ||
593 | } | ||
594 | |||
595 | /* spi_device use this to queue in their spi_msg */ | ||
596 | static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg) | ||
597 | { | ||
598 | struct dw_spi *dws = spi_master_get_devdata(spi->master); | ||
599 | unsigned long flags; | ||
600 | |||
601 | spin_lock_irqsave(&dws->lock, flags); | ||
602 | |||
603 | if (dws->run == QUEUE_STOPPED) { | ||
604 | spin_unlock_irqrestore(&dws->lock, flags); | ||
605 | return -ESHUTDOWN; | ||
606 | } | ||
607 | |||
608 | msg->actual_length = 0; | ||
609 | msg->status = -EINPROGRESS; | ||
610 | msg->state = START_STATE; | ||
611 | |||
612 | list_add_tail(&msg->queue, &dws->queue); | ||
613 | |||
614 | if (dws->run == QUEUE_RUNNING && !dws->busy) { | ||
615 | |||
616 | if (dws->cur_transfer || dws->cur_msg) | ||
617 | queue_work(dws->workqueue, | ||
618 | &dws->pump_messages); | ||
619 | else { | ||
620 | /* If no other data transaction in air, just go */ | ||
621 | spin_unlock_irqrestore(&dws->lock, flags); | ||
622 | pump_messages(&dws->pump_messages); | ||
623 | return 0; | ||
624 | } | ||
625 | } | ||
626 | |||
627 | spin_unlock_irqrestore(&dws->lock, flags); | ||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | /* This may be called twice for each spi dev */ | ||
632 | static int dw_spi_setup(struct spi_device *spi) | ||
633 | { | ||
634 | struct dw_spi_chip *chip_info = NULL; | ||
635 | struct chip_data *chip; | ||
636 | |||
637 | if (spi->bits_per_word != 8 && spi->bits_per_word != 16) | ||
638 | return -EINVAL; | ||
639 | |||
640 | /* Only alloc on first setup */ | ||
641 | chip = spi_get_ctldata(spi); | ||
642 | if (!chip) { | ||
643 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | ||
644 | if (!chip) | ||
645 | return -ENOMEM; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * Protocol drivers may change the chip settings, so... | ||
650 | * if chip_info exists, use it | ||
651 | */ | ||
652 | chip_info = spi->controller_data; | ||
653 | |||
654 | /* chip_info doesn't always exist */ | ||
655 | if (chip_info) { | ||
656 | if (chip_info->cs_control) | ||
657 | chip->cs_control = chip_info->cs_control; | ||
658 | |||
659 | chip->poll_mode = chip_info->poll_mode; | ||
660 | chip->type = chip_info->type; | ||
661 | |||
662 | chip->rx_threshold = 0; | ||
663 | chip->tx_threshold = 0; | ||
664 | |||
665 | chip->enable_dma = chip_info->enable_dma; | ||
666 | } | ||
667 | |||
668 | if (spi->bits_per_word <= 8) { | ||
669 | chip->n_bytes = 1; | ||
670 | chip->dma_width = 1; | ||
671 | } else if (spi->bits_per_word <= 16) { | ||
672 | chip->n_bytes = 2; | ||
673 | chip->dma_width = 2; | ||
674 | } else { | ||
675 | /* Never take >16b case for MRST SPIC */ | ||
676 | dev_err(&spi->dev, "invalid wordsize\n"); | ||
677 | return -EINVAL; | ||
678 | } | ||
679 | chip->bits_per_word = spi->bits_per_word; | ||
680 | |||
681 | if (!spi->max_speed_hz) { | ||
682 | dev_err(&spi->dev, "No max speed HZ parameter\n"); | ||
683 | return -EINVAL; | ||
684 | } | ||
685 | chip->speed_hz = spi->max_speed_hz; | ||
686 | |||
687 | chip->tmode = 0; /* Tx & Rx */ | ||
688 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ | ||
689 | chip->cr0 = (chip->bits_per_word - 1) | ||
690 | | (chip->type << SPI_FRF_OFFSET) | ||
691 | | (spi->mode << SPI_MODE_OFFSET) | ||
692 | | (chip->tmode << SPI_TMOD_OFFSET); | ||
693 | |||
694 | spi_set_ctldata(spi, chip); | ||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static void dw_spi_cleanup(struct spi_device *spi) | ||
699 | { | ||
700 | struct chip_data *chip = spi_get_ctldata(spi); | ||
701 | kfree(chip); | ||
702 | } | ||
703 | |||
704 | static int __devinit init_queue(struct dw_spi *dws) | ||
705 | { | ||
706 | INIT_LIST_HEAD(&dws->queue); | ||
707 | spin_lock_init(&dws->lock); | ||
708 | |||
709 | dws->run = QUEUE_STOPPED; | ||
710 | dws->busy = 0; | ||
711 | |||
712 | tasklet_init(&dws->pump_transfers, | ||
713 | pump_transfers, (unsigned long)dws); | ||
714 | |||
715 | INIT_WORK(&dws->pump_messages, pump_messages); | ||
716 | dws->workqueue = create_singlethread_workqueue( | ||
717 | dev_name(dws->master->dev.parent)); | ||
718 | if (dws->workqueue == NULL) | ||
719 | return -EBUSY; | ||
720 | |||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | static int start_queue(struct dw_spi *dws) | ||
725 | { | ||
726 | unsigned long flags; | ||
727 | |||
728 | spin_lock_irqsave(&dws->lock, flags); | ||
729 | |||
730 | if (dws->run == QUEUE_RUNNING || dws->busy) { | ||
731 | spin_unlock_irqrestore(&dws->lock, flags); | ||
732 | return -EBUSY; | ||
733 | } | ||
734 | |||
735 | dws->run = QUEUE_RUNNING; | ||
736 | dws->cur_msg = NULL; | ||
737 | dws->cur_transfer = NULL; | ||
738 | dws->cur_chip = NULL; | ||
739 | dws->prev_chip = NULL; | ||
740 | spin_unlock_irqrestore(&dws->lock, flags); | ||
741 | |||
742 | queue_work(dws->workqueue, &dws->pump_messages); | ||
743 | |||
744 | return 0; | ||
745 | } | ||
746 | |||
747 | static int stop_queue(struct dw_spi *dws) | ||
748 | { | ||
749 | unsigned long flags; | ||
750 | unsigned limit = 50; | ||
751 | int status = 0; | ||
752 | |||
753 | spin_lock_irqsave(&dws->lock, flags); | ||
754 | dws->run = QUEUE_STOPPED; | ||
755 | while ((!list_empty(&dws->queue) || dws->busy) && limit--) { | ||
756 | spin_unlock_irqrestore(&dws->lock, flags); | ||
757 | msleep(10); | ||
758 | spin_lock_irqsave(&dws->lock, flags); | ||
759 | } | ||
760 | |||
761 | if (!list_empty(&dws->queue) || dws->busy) | ||
762 | status = -EBUSY; | ||
763 | spin_unlock_irqrestore(&dws->lock, flags); | ||
764 | |||
765 | return status; | ||
766 | } | ||
767 | |||
768 | static int destroy_queue(struct dw_spi *dws) | ||
769 | { | ||
770 | int status; | ||
771 | |||
772 | status = stop_queue(dws); | ||
773 | if (status != 0) | ||
774 | return status; | ||
775 | destroy_workqueue(dws->workqueue); | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | /* Restart the controller, disable all interrupts, clean rx fifo */ | ||
780 | static void spi_hw_init(struct dw_spi *dws) | ||
781 | { | ||
782 | spi_enable_chip(dws, 0); | ||
783 | spi_mask_intr(dws, 0xff); | ||
784 | spi_enable_chip(dws, 1); | ||
785 | |||
786 | /* | ||
787 | * Try to detect the FIFO depth if not set by interface driver, | ||
788 | * the depth could be from 2 to 256 from HW spec | ||
789 | */ | ||
790 | if (!dws->fifo_len) { | ||
791 | u32 fifo; | ||
792 | for (fifo = 2; fifo <= 257; fifo++) { | ||
793 | dw_writew(dws, txfltr, fifo); | ||
794 | if (fifo != dw_readw(dws, txfltr)) | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | ||
799 | dw_writew(dws, txfltr, 0); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | int __devinit dw_spi_add_host(struct dw_spi *dws) | ||
804 | { | ||
805 | struct spi_master *master; | ||
806 | int ret; | ||
807 | |||
808 | BUG_ON(dws == NULL); | ||
809 | |||
810 | master = spi_alloc_master(dws->parent_dev, 0); | ||
811 | if (!master) { | ||
812 | ret = -ENOMEM; | ||
813 | goto exit; | ||
814 | } | ||
815 | |||
816 | dws->master = master; | ||
817 | dws->type = SSI_MOTO_SPI; | ||
818 | dws->prev_chip = NULL; | ||
819 | dws->dma_inited = 0; | ||
820 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); | ||
821 | |||
822 | ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, | ||
823 | "dw_spi", dws); | ||
824 | if (ret < 0) { | ||
825 | dev_err(&master->dev, "can not get IRQ\n"); | ||
826 | goto err_free_master; | ||
827 | } | ||
828 | |||
829 | master->mode_bits = SPI_CPOL | SPI_CPHA; | ||
830 | master->bus_num = dws->bus_num; | ||
831 | master->num_chipselect = dws->num_cs; | ||
832 | master->cleanup = dw_spi_cleanup; | ||
833 | master->setup = dw_spi_setup; | ||
834 | master->transfer = dw_spi_transfer; | ||
835 | |||
836 | /* Basic HW init */ | ||
837 | spi_hw_init(dws); | ||
838 | |||
839 | if (dws->dma_ops && dws->dma_ops->dma_init) { | ||
840 | ret = dws->dma_ops->dma_init(dws); | ||
841 | if (ret) { | ||
842 | dev_warn(&master->dev, "DMA init failed\n"); | ||
843 | dws->dma_inited = 0; | ||
844 | } | ||
845 | } | ||
846 | |||
847 | /* Initial and start queue */ | ||
848 | ret = init_queue(dws); | ||
849 | if (ret) { | ||
850 | dev_err(&master->dev, "problem initializing queue\n"); | ||
851 | goto err_diable_hw; | ||
852 | } | ||
853 | ret = start_queue(dws); | ||
854 | if (ret) { | ||
855 | dev_err(&master->dev, "problem starting queue\n"); | ||
856 | goto err_diable_hw; | ||
857 | } | ||
858 | |||
859 | spi_master_set_devdata(master, dws); | ||
860 | ret = spi_register_master(master); | ||
861 | if (ret) { | ||
862 | dev_err(&master->dev, "problem registering spi master\n"); | ||
863 | goto err_queue_alloc; | ||
864 | } | ||
865 | |||
866 | mrst_spi_debugfs_init(dws); | ||
867 | return 0; | ||
868 | |||
869 | err_queue_alloc: | ||
870 | destroy_queue(dws); | ||
871 | if (dws->dma_ops && dws->dma_ops->dma_exit) | ||
872 | dws->dma_ops->dma_exit(dws); | ||
873 | err_diable_hw: | ||
874 | spi_enable_chip(dws, 0); | ||
875 | free_irq(dws->irq, dws); | ||
876 | err_free_master: | ||
877 | spi_master_put(master); | ||
878 | exit: | ||
879 | return ret; | ||
880 | } | ||
881 | EXPORT_SYMBOL_GPL(dw_spi_add_host); | ||
882 | |||
883 | void __devexit dw_spi_remove_host(struct dw_spi *dws) | ||
884 | { | ||
885 | int status = 0; | ||
886 | |||
887 | if (!dws) | ||
888 | return; | ||
889 | mrst_spi_debugfs_remove(dws); | ||
890 | |||
891 | /* Remove the queue */ | ||
892 | status = destroy_queue(dws); | ||
893 | if (status != 0) | ||
894 | dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " | ||
895 | "complete, message memory not freed\n"); | ||
896 | |||
897 | if (dws->dma_ops && dws->dma_ops->dma_exit) | ||
898 | dws->dma_ops->dma_exit(dws); | ||
899 | spi_enable_chip(dws, 0); | ||
900 | /* Disable clk */ | ||
901 | spi_set_clk(dws, 0); | ||
902 | free_irq(dws->irq, dws); | ||
903 | |||
904 | /* Disconnect from the SPI framework */ | ||
905 | spi_unregister_master(dws->master); | ||
906 | } | ||
907 | EXPORT_SYMBOL_GPL(dw_spi_remove_host); | ||
908 | |||
909 | int dw_spi_suspend_host(struct dw_spi *dws) | ||
910 | { | ||
911 | int ret = 0; | ||
912 | |||
913 | ret = stop_queue(dws); | ||
914 | if (ret) | ||
915 | return ret; | ||
916 | spi_enable_chip(dws, 0); | ||
917 | spi_set_clk(dws, 0); | ||
918 | return ret; | ||
919 | } | ||
920 | EXPORT_SYMBOL_GPL(dw_spi_suspend_host); | ||
921 | |||
922 | int dw_spi_resume_host(struct dw_spi *dws) | ||
923 | { | ||
924 | int ret; | ||
925 | |||
926 | spi_hw_init(dws); | ||
927 | ret = start_queue(dws); | ||
928 | if (ret) | ||
929 | dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); | ||
930 | return ret; | ||
931 | } | ||
932 | EXPORT_SYMBOL_GPL(dw_spi_resume_host); | ||
933 | |||
934 | MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); | ||
935 | MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); | ||
936 | MODULE_LICENSE("GPL v2"); | ||