aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/pxa2xx_spi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/pxa2xx_spi.c')
-rw-r--r--drivers/spi/pxa2xx_spi.c733
1 files changed, 440 insertions, 293 deletions
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 494d9b856488..6ed3f1da9296 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -49,6 +49,14 @@ MODULE_LICENSE("GPL");
49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
50#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 50#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
51 51
52/* for testing SSCR1 changes that require SSP restart, basically
53 * everything except the service and interrupt enables */
54#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \
55 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
56 | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \
57 | SSCR1_STRF | SSCR1_EFWR |SSCR1_RFT \
58 | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
59
52#define DEFINE_SSP_REG(reg, off) \ 60#define DEFINE_SSP_REG(reg, off) \
53static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ 61static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
54static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); } 62static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
@@ -123,8 +131,8 @@ struct driver_data {
123 u8 n_bytes; 131 u8 n_bytes;
124 u32 dma_width; 132 u32 dma_width;
125 int cs_change; 133 int cs_change;
126 void (*write)(struct driver_data *drv_data); 134 int (*write)(struct driver_data *drv_data);
127 void (*read)(struct driver_data *drv_data); 135 int (*read)(struct driver_data *drv_data);
128 irqreturn_t (*transfer_handler)(struct driver_data *drv_data); 136 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
129 void (*cs_control)(u32 command); 137 void (*cs_control)(u32 command);
130}; 138};
@@ -132,7 +140,6 @@ struct driver_data {
132struct chip_data { 140struct chip_data {
133 u32 cr0; 141 u32 cr0;
134 u32 cr1; 142 u32 cr1;
135 u32 to;
136 u32 psp; 143 u32 psp;
137 u32 timeout; 144 u32 timeout;
138 u8 n_bytes; 145 u8 n_bytes;
@@ -143,8 +150,8 @@ struct chip_data {
143 u8 enable_dma; 150 u8 enable_dma;
144 u8 bits_per_word; 151 u8 bits_per_word;
145 u32 speed_hz; 152 u32 speed_hz;
146 void (*write)(struct driver_data *drv_data); 153 int (*write)(struct driver_data *drv_data);
147 void (*read)(struct driver_data *drv_data); 154 int (*read)(struct driver_data *drv_data);
148 void (*cs_control)(u32 command); 155 void (*cs_control)(u32 command);
149}; 156};
150 157
@@ -166,114 +173,118 @@ static int flush(struct driver_data *drv_data)
166 return limit; 173 return limit;
167} 174}
168 175
169static void restore_state(struct driver_data *drv_data)
170{
171 void *reg = drv_data->ioaddr;
172
173 /* Clear status and disable clock */
174 write_SSSR(drv_data->clear_sr, reg);
175 write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
176
177 /* Load the registers */
178 write_SSCR1(drv_data->cur_chip->cr1, reg);
179 write_SSCR0(drv_data->cur_chip->cr0, reg);
180 if (drv_data->ssp_type != PXA25x_SSP) {
181 write_SSTO(0, reg);
182 write_SSPSP(drv_data->cur_chip->psp, reg);
183 }
184}
185
186static void null_cs_control(u32 command) 176static void null_cs_control(u32 command)
187{ 177{
188} 178}
189 179
190static void null_writer(struct driver_data *drv_data) 180static int null_writer(struct driver_data *drv_data)
191{ 181{
192 void *reg = drv_data->ioaddr; 182 void *reg = drv_data->ioaddr;
193 u8 n_bytes = drv_data->n_bytes; 183 u8 n_bytes = drv_data->n_bytes;
194 184
195 while ((read_SSSR(reg) & SSSR_TNF) 185 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
196 && (drv_data->tx < drv_data->tx_end)) { 186 || (drv_data->tx == drv_data->tx_end))
197 write_SSDR(0, reg); 187 return 0;
198 drv_data->tx += n_bytes; 188
199 } 189 write_SSDR(0, reg);
190 drv_data->tx += n_bytes;
191
192 return 1;
200} 193}
201 194
202static void null_reader(struct driver_data *drv_data) 195static int null_reader(struct driver_data *drv_data)
203{ 196{
204 void *reg = drv_data->ioaddr; 197 void *reg = drv_data->ioaddr;
205 u8 n_bytes = drv_data->n_bytes; 198 u8 n_bytes = drv_data->n_bytes;
206 199
207 while ((read_SSSR(reg) & SSSR_RNE) 200 while ((read_SSSR(reg) & SSSR_RNE)
208 && (drv_data->rx < drv_data->rx_end)) { 201 && (drv_data->rx < drv_data->rx_end)) {
209 read_SSDR(reg); 202 read_SSDR(reg);
210 drv_data->rx += n_bytes; 203 drv_data->rx += n_bytes;
211 } 204 }
205
206 return drv_data->rx == drv_data->rx_end;
212} 207}
213 208
214static void u8_writer(struct driver_data *drv_data) 209static int u8_writer(struct driver_data *drv_data)
215{ 210{
216 void *reg = drv_data->ioaddr; 211 void *reg = drv_data->ioaddr;
217 212
218 while ((read_SSSR(reg) & SSSR_TNF) 213 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
219 && (drv_data->tx < drv_data->tx_end)) { 214 || (drv_data->tx == drv_data->tx_end))
220 write_SSDR(*(u8 *)(drv_data->tx), reg); 215 return 0;
221 ++drv_data->tx; 216
222 } 217 write_SSDR(*(u8 *)(drv_data->tx), reg);
218 ++drv_data->tx;
219
220 return 1;
223} 221}
224 222
225static void u8_reader(struct driver_data *drv_data) 223static int u8_reader(struct driver_data *drv_data)
226{ 224{
227 void *reg = drv_data->ioaddr; 225 void *reg = drv_data->ioaddr;
228 226
229 while ((read_SSSR(reg) & SSSR_RNE) 227 while ((read_SSSR(reg) & SSSR_RNE)
230 && (drv_data->rx < drv_data->rx_end)) { 228 && (drv_data->rx < drv_data->rx_end)) {
231 *(u8 *)(drv_data->rx) = read_SSDR(reg); 229 *(u8 *)(drv_data->rx) = read_SSDR(reg);
232 ++drv_data->rx; 230 ++drv_data->rx;
233 } 231 }
232
233 return drv_data->rx == drv_data->rx_end;
234} 234}
235 235
236static void u16_writer(struct driver_data *drv_data) 236static int u16_writer(struct driver_data *drv_data)
237{ 237{
238 void *reg = drv_data->ioaddr; 238 void *reg = drv_data->ioaddr;
239 239
240 while ((read_SSSR(reg) & SSSR_TNF) 240 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
241 && (drv_data->tx < drv_data->tx_end)) { 241 || (drv_data->tx == drv_data->tx_end))
242 write_SSDR(*(u16 *)(drv_data->tx), reg); 242 return 0;
243 drv_data->tx += 2; 243
244 } 244 write_SSDR(*(u16 *)(drv_data->tx), reg);
245 drv_data->tx += 2;
246
247 return 1;
245} 248}
246 249
247static void u16_reader(struct driver_data *drv_data) 250static int u16_reader(struct driver_data *drv_data)
248{ 251{
249 void *reg = drv_data->ioaddr; 252 void *reg = drv_data->ioaddr;
250 253
251 while ((read_SSSR(reg) & SSSR_RNE) 254 while ((read_SSSR(reg) & SSSR_RNE)
252 && (drv_data->rx < drv_data->rx_end)) { 255 && (drv_data->rx < drv_data->rx_end)) {
253 *(u16 *)(drv_data->rx) = read_SSDR(reg); 256 *(u16 *)(drv_data->rx) = read_SSDR(reg);
254 drv_data->rx += 2; 257 drv_data->rx += 2;
255 } 258 }
259
260 return drv_data->rx == drv_data->rx_end;
256} 261}
257static void u32_writer(struct driver_data *drv_data) 262
263static int u32_writer(struct driver_data *drv_data)
258{ 264{
259 void *reg = drv_data->ioaddr; 265 void *reg = drv_data->ioaddr;
260 266
261 while ((read_SSSR(reg) & SSSR_TNF) 267 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
262 && (drv_data->tx < drv_data->tx_end)) { 268 || (drv_data->tx == drv_data->tx_end))
263 write_SSDR(*(u32 *)(drv_data->tx), reg); 269 return 0;
264 drv_data->tx += 4; 270
265 } 271 write_SSDR(*(u32 *)(drv_data->tx), reg);
272 drv_data->tx += 4;
273
274 return 1;
266} 275}
267 276
268static void u32_reader(struct driver_data *drv_data) 277static int u32_reader(struct driver_data *drv_data)
269{ 278{
270 void *reg = drv_data->ioaddr; 279 void *reg = drv_data->ioaddr;
271 280
272 while ((read_SSSR(reg) & SSSR_RNE) 281 while ((read_SSSR(reg) & SSSR_RNE)
273 && (drv_data->rx < drv_data->rx_end)) { 282 && (drv_data->rx < drv_data->rx_end)) {
274 *(u32 *)(drv_data->rx) = read_SSDR(reg); 283 *(u32 *)(drv_data->rx) = read_SSDR(reg);
275 drv_data->rx += 4; 284 drv_data->rx += 4;
276 } 285 }
286
287 return drv_data->rx == drv_data->rx_end;
277} 288}
278 289
279static void *next_transfer(struct driver_data *drv_data) 290static void *next_transfer(struct driver_data *drv_data)
@@ -409,166 +420,134 @@ static int wait_dma_channel_stop(int channel)
409 return limit; 420 return limit;
410} 421}
411 422
412static void dma_handler(int channel, void *data) 423void dma_error_stop(struct driver_data *drv_data, const char *msg)
413{ 424{
414 struct driver_data *drv_data = data;
415 struct spi_message *msg = drv_data->cur_msg;
416 void *reg = drv_data->ioaddr; 425 void *reg = drv_data->ioaddr;
417 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
418 u32 trailing_sssr = 0;
419 426
420 if (irq_status & DCSR_BUSERR) { 427 /* Stop and reset */
428 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
429 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
430 write_SSSR(drv_data->clear_sr, reg);
431 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
432 if (drv_data->ssp_type != PXA25x_SSP)
433 write_SSTO(0, reg);
434 flush(drv_data);
435 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
421 436
422 /* Disable interrupts, clear status and reset DMA */ 437 unmap_dma_buffers(drv_data);
423 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
424 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
425 if (drv_data->ssp_type != PXA25x_SSP)
426 write_SSTO(0, reg);
427 write_SSSR(drv_data->clear_sr, reg);
428 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
429 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
430 438
431 if (flush(drv_data) == 0) 439 dev_err(&drv_data->pdev->dev, "%s\n", msg);
432 dev_err(&drv_data->pdev->dev,
433 "dma_handler: flush fail\n");
434 440
435 unmap_dma_buffers(drv_data); 441 drv_data->cur_msg->state = ERROR_STATE;
442 tasklet_schedule(&drv_data->pump_transfers);
443}
444
445static void dma_transfer_complete(struct driver_data *drv_data)
446{
447 void *reg = drv_data->ioaddr;
448 struct spi_message *msg = drv_data->cur_msg;
449
450 /* Clear and disable interrupts on SSP and DMA channels*/
451 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
452 write_SSSR(drv_data->clear_sr, reg);
453 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
454 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
455
456 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
457 dev_err(&drv_data->pdev->dev,
458 "dma_handler: dma rx channel stop failed\n");
459
460 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
461 dev_err(&drv_data->pdev->dev,
462 "dma_transfer: ssp rx stall failed\n");
463
464 unmap_dma_buffers(drv_data);
465
466 /* update the buffer pointer for the amount completed in dma */
467 drv_data->rx += drv_data->len -
468 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
469
470 /* read trailing data from fifo, it does not matter how many
471 * bytes are in the fifo just read until buffer is full
472 * or fifo is empty, which ever occurs first */
473 drv_data->read(drv_data);
474
475 /* return count of what was actually read */
476 msg->actual_length += drv_data->len -
477 (drv_data->rx_end - drv_data->rx);
478
479 /* Release chip select if requested, transfer delays are
480 * handled in pump_transfers */
481 if (drv_data->cs_change)
482 drv_data->cs_control(PXA2XX_CS_DEASSERT);
483
484 /* Move to next transfer */
485 msg->state = next_transfer(drv_data);
486
487 /* Schedule transfer tasklet */
488 tasklet_schedule(&drv_data->pump_transfers);
489}
490
491static void dma_handler(int channel, void *data)
492{
493 struct driver_data *drv_data = data;
494 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
495
496 if (irq_status & DCSR_BUSERR) {
436 497
437 if (channel == drv_data->tx_channel) 498 if (channel == drv_data->tx_channel)
438 dev_err(&drv_data->pdev->dev, 499 dma_error_stop(drv_data,
439 "dma_handler: bad bus address on " 500 "dma_handler: "
440 "tx channel %d, source %x target = %x\n", 501 "bad bus address on tx channel");
441 channel, DSADR(channel), DTADR(channel));
442 else 502 else
443 dev_err(&drv_data->pdev->dev, 503 dma_error_stop(drv_data,
444 "dma_handler: bad bus address on " 504 "dma_handler: "
445 "rx channel %d, source %x target = %x\n", 505 "bad bus address on rx channel");
446 channel, DSADR(channel), DTADR(channel)); 506 return;
447
448 msg->state = ERROR_STATE;
449 tasklet_schedule(&drv_data->pump_transfers);
450 } 507 }
451 508
452 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ 509 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
453 if ((drv_data->ssp_type == PXA25x_SSP) 510 if ((channel == drv_data->tx_channel)
454 && (channel == drv_data->tx_channel) 511 && (irq_status & DCSR_ENDINTR)
455 && (irq_status & DCSR_ENDINTR)) { 512 && (drv_data->ssp_type == PXA25x_SSP)) {
456 513
457 /* Wait for rx to stall */ 514 /* Wait for rx to stall */
458 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 515 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
459 dev_err(&drv_data->pdev->dev, 516 dev_err(&drv_data->pdev->dev,
460 "dma_handler: ssp rx stall failed\n"); 517 "dma_handler: ssp rx stall failed\n");
461 518
462 /* Clear and disable interrupts on SSP and DMA channels*/ 519 /* finish this transfer, start the next */
463 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 520 dma_transfer_complete(drv_data);
464 write_SSSR(drv_data->clear_sr, reg);
465 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
466 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
467 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
468 dev_err(&drv_data->pdev->dev,
469 "dma_handler: dma rx channel stop failed\n");
470
471 unmap_dma_buffers(drv_data);
472
473 /* Read trailing bytes */
474 /* Calculate number of trailing bytes, read them */
475 trailing_sssr = read_SSSR(reg);
476 if ((trailing_sssr & 0xf008) != 0xf000) {
477 drv_data->rx = drv_data->rx_end -
478 (((trailing_sssr >> 12) & 0x0f) + 1);
479 drv_data->read(drv_data);
480 }
481 msg->actual_length += drv_data->len;
482
483 /* Release chip select if requested, transfer delays are
484 * handled in pump_transfers */
485 if (drv_data->cs_change)
486 drv_data->cs_control(PXA2XX_CS_DEASSERT);
487
488 /* Move to next transfer */
489 msg->state = next_transfer(drv_data);
490
491 /* Schedule transfer tasklet */
492 tasklet_schedule(&drv_data->pump_transfers);
493 } 521 }
494} 522}
495 523
496static irqreturn_t dma_transfer(struct driver_data *drv_data) 524static irqreturn_t dma_transfer(struct driver_data *drv_data)
497{ 525{
498 u32 irq_status; 526 u32 irq_status;
499 u32 trailing_sssr = 0;
500 struct spi_message *msg = drv_data->cur_msg;
501 void *reg = drv_data->ioaddr; 527 void *reg = drv_data->ioaddr;
502 528
503 irq_status = read_SSSR(reg) & drv_data->mask_sr; 529 irq_status = read_SSSR(reg) & drv_data->mask_sr;
504 if (irq_status & SSSR_ROR) { 530 if (irq_status & SSSR_ROR) {
505 /* Clear and disable interrupts on SSP and DMA channels*/ 531 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
506 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
507 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
508 if (drv_data->ssp_type != PXA25x_SSP)
509 write_SSTO(0, reg);
510 write_SSSR(drv_data->clear_sr, reg);
511 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
512 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
513 unmap_dma_buffers(drv_data);
514
515 if (flush(drv_data) == 0)
516 dev_err(&drv_data->pdev->dev,
517 "dma_transfer: flush fail\n");
518
519 dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
520
521 drv_data->cur_msg->state = ERROR_STATE;
522 tasklet_schedule(&drv_data->pump_transfers);
523
524 return IRQ_HANDLED; 532 return IRQ_HANDLED;
525 } 533 }
526 534
527 /* Check for false positive timeout */ 535 /* Check for false positive timeout */
528 if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) { 536 if ((irq_status & SSSR_TINT)
537 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
529 write_SSSR(SSSR_TINT, reg); 538 write_SSSR(SSSR_TINT, reg);
530 return IRQ_HANDLED; 539 return IRQ_HANDLED;
531 } 540 }
532 541
533 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { 542 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
534 543
535 /* Clear and disable interrupts on SSP and DMA channels*/ 544 /* Clear and disable timeout interrupt, do the rest in
536 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 545 * dma_transfer_complete */
537 if (drv_data->ssp_type != PXA25x_SSP) 546 if (drv_data->ssp_type != PXA25x_SSP)
538 write_SSTO(0, reg); 547 write_SSTO(0, reg);
539 write_SSSR(drv_data->clear_sr, reg);
540 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
541 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
542 548
543 if (wait_dma_channel_stop(drv_data->rx_channel) == 0) 549 /* finish this transfer, start the next */
544 dev_err(&drv_data->pdev->dev, 550 dma_transfer_complete(drv_data);
545 "dma_transfer: dma rx channel stop failed\n");
546
547 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
548 dev_err(&drv_data->pdev->dev,
549 "dma_transfer: ssp rx stall failed\n");
550
551 unmap_dma_buffers(drv_data);
552
553 /* Calculate number of trailing bytes, read them */
554 trailing_sssr = read_SSSR(reg);
555 if ((trailing_sssr & 0xf008) != 0xf000) {
556 drv_data->rx = drv_data->rx_end -
557 (((trailing_sssr >> 12) & 0x0f) + 1);
558 drv_data->read(drv_data);
559 }
560 msg->actual_length += drv_data->len;
561
562 /* Release chip select if requested, transfer delays are
563 * handled in pump_transfers */
564 if (drv_data->cs_change)
565 drv_data->cs_control(PXA2XX_CS_DEASSERT);
566
567 /* Move to next transfer */
568 msg->state = next_transfer(drv_data);
569
570 /* Schedule transfer tasklet */
571 tasklet_schedule(&drv_data->pump_transfers);
572 551
573 return IRQ_HANDLED; 552 return IRQ_HANDLED;
574 } 553 }
@@ -577,89 +556,103 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
577 return IRQ_NONE; 556 return IRQ_NONE;
578} 557}
579 558
580static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 559static void int_error_stop(struct driver_data *drv_data, const char* msg)
581{ 560{
582 struct spi_message *msg = drv_data->cur_msg;
583 void *reg = drv_data->ioaddr; 561 void *reg = drv_data->ioaddr;
584 unsigned long limit = loops_per_jiffy << 1;
585 u32 irq_status;
586 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
587 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
588
589 while ((irq_status = read_SSSR(reg) & irq_mask)) {
590
591 if (irq_status & SSSR_ROR) {
592 562
593 /* Clear and disable interrupts */ 563 /* Stop and reset SSP */
594 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 564 write_SSSR(drv_data->clear_sr, reg);
595 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 565 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
596 if (drv_data->ssp_type != PXA25x_SSP) 566 if (drv_data->ssp_type != PXA25x_SSP)
597 write_SSTO(0, reg); 567 write_SSTO(0, reg);
598 write_SSSR(drv_data->clear_sr, reg); 568 flush(drv_data);
569 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
599 570
600 if (flush(drv_data) == 0) 571 dev_err(&drv_data->pdev->dev, "%s\n", msg);
601 dev_err(&drv_data->pdev->dev,
602 "interrupt_transfer: flush fail\n");
603 572
604 /* Stop the SSP */ 573 drv_data->cur_msg->state = ERROR_STATE;
574 tasklet_schedule(&drv_data->pump_transfers);
575}
605 576
606 dev_warn(&drv_data->pdev->dev, 577static void int_transfer_complete(struct driver_data *drv_data)
607 "interrupt_transfer: fifo overun\n"); 578{
579 void *reg = drv_data->ioaddr;
608 580
609 msg->state = ERROR_STATE; 581 /* Stop SSP */
610 tasklet_schedule(&drv_data->pump_transfers); 582 write_SSSR(drv_data->clear_sr, reg);
583 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
584 if (drv_data->ssp_type != PXA25x_SSP)
585 write_SSTO(0, reg);
611 586
612 return IRQ_HANDLED; 587 /* Update total byte transfered return count actual bytes read */
613 } 588 drv_data->cur_msg->actual_length += drv_data->len -
589 (drv_data->rx_end - drv_data->rx);
614 590
615 /* Look for false positive timeout */ 591 /* Release chip select if requested, transfer delays are
616 if ((irq_status & SSSR_TINT) 592 * handled in pump_transfers */
617 && (drv_data->rx < drv_data->rx_end)) 593 if (drv_data->cs_change)
618 write_SSSR(SSSR_TINT, reg); 594 drv_data->cs_control(PXA2XX_CS_DEASSERT);
619 595
620 /* Pump data */ 596 /* Move to next transfer */
621 drv_data->read(drv_data); 597 drv_data->cur_msg->state = next_transfer(drv_data);
622 drv_data->write(drv_data);
623 598
624 if (drv_data->tx == drv_data->tx_end) { 599 /* Schedule transfer tasklet */
625 /* Disable tx interrupt */ 600 tasklet_schedule(&drv_data->pump_transfers);
626 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); 601}
627 irq_mask = drv_data->mask_sr & ~SSSR_TFS;
628 602
629 /* PXA25x_SSP has no timeout, read trailing bytes */ 603static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
630 if (drv_data->ssp_type == PXA25x_SSP) { 604{
631 while ((read_SSSR(reg) & SSSR_BSY) && limit--) 605 void *reg = drv_data->ioaddr;
632 drv_data->read(drv_data);
633 606
634 if (limit == 0) 607 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
635 dev_err(&drv_data->pdev->dev, 608 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
636 "interrupt_transfer: "
637 "trailing byte read failed\n");
638 }
639 }
640 609
641 if ((irq_status & SSSR_TINT) 610 u32 irq_status = read_SSSR(reg) & irq_mask;
642 || (drv_data->rx == drv_data->rx_end)) {
643 611
644 /* Clear timeout */ 612 if (irq_status & SSSR_ROR) {
645 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 613 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
646 if (drv_data->ssp_type != PXA25x_SSP) 614 return IRQ_HANDLED;
647 write_SSTO(0, reg); 615 }
648 write_SSSR(drv_data->clear_sr, reg);
649 616
650 /* Update total byte transfered */ 617 if (irq_status & SSSR_TINT) {
651 msg->actual_length += drv_data->len; 618 write_SSSR(SSSR_TINT, reg);
619 if (drv_data->read(drv_data)) {
620 int_transfer_complete(drv_data);
621 return IRQ_HANDLED;
622 }
623 }
652 624
653 /* Release chip select if requested, transfer delays are 625 /* Drain rx fifo, Fill tx fifo and prevent overruns */
654 * handled in pump_transfers */ 626 do {
655 if (drv_data->cs_change) 627 if (drv_data->read(drv_data)) {
656 drv_data->cs_control(PXA2XX_CS_DEASSERT); 628 int_transfer_complete(drv_data);
629 return IRQ_HANDLED;
630 }
631 } while (drv_data->write(drv_data));
657 632
658 /* Move to next transfer */ 633 if (drv_data->read(drv_data)) {
659 msg->state = next_transfer(drv_data); 634 int_transfer_complete(drv_data);
635 return IRQ_HANDLED;
636 }
660 637
661 /* Schedule transfer tasklet */ 638 if (drv_data->tx == drv_data->tx_end) {
662 tasklet_schedule(&drv_data->pump_transfers); 639 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
640 /* PXA25x_SSP has no timeout, read trailing bytes */
641 if (drv_data->ssp_type == PXA25x_SSP) {
642 if (!wait_ssp_rx_stall(reg))
643 {
644 int_error_stop(drv_data, "interrupt_transfer: "
645 "rx stall failed");
646 return IRQ_HANDLED;
647 }
648 if (!drv_data->read(drv_data))
649 {
650 int_error_stop(drv_data,
651 "interrupt_transfer: "
652 "trailing byte read failed");
653 return IRQ_HANDLED;
654 }
655 int_transfer_complete(drv_data);
663 } 656 }
664 } 657 }
665 658
@@ -681,7 +674,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
681 write_SSSR(drv_data->clear_sr, reg); 674 write_SSSR(drv_data->clear_sr, reg);
682 675
683 dev_err(&drv_data->pdev->dev, "bad message state " 676 dev_err(&drv_data->pdev->dev, "bad message state "
684 "in interrupt handler"); 677 "in interrupt handler\n");
685 678
686 /* Never fail */ 679 /* Never fail */
687 return IRQ_HANDLED; 680 return IRQ_HANDLED;
@@ -690,6 +683,102 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
690 return drv_data->transfer_handler(drv_data); 683 return drv_data->transfer_handler(drv_data);
691} 684}
692 685
686int set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi,
687 u8 bits_per_word, u32 *burst_code,
688 u32 *threshold)
689{
690 struct pxa2xx_spi_chip *chip_info =
691 (struct pxa2xx_spi_chip *)spi->controller_data;
692 int bytes_per_word;
693 int burst_bytes;
694 int thresh_words;
695 int req_burst_size;
696 int retval = 0;
697
698 /* Set the threshold (in registers) to equal the same amount of data
699 * as represented by burst size (in bytes). The computation below
700 * is (burst_size rounded up to nearest 8 byte, word or long word)
701 * divided by (bytes/register); the tx threshold is the inverse of
702 * the rx, so that there will always be enough data in the rx fifo
703 * to satisfy a burst, and there will always be enough space in the
704 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
705 * there is not enough space), there must always remain enough empty
706 * space in the rx fifo for any data loaded to the tx fifo.
707 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
708 * will be 8, or half the fifo;
709 * The threshold can only be set to 2, 4 or 8, but not 16, because
710 * to burst 16 to the tx fifo, the fifo would have to be empty;
711 * however, the minimum fifo trigger level is 1, and the tx will
712 * request service when the fifo is at this level, with only 15 spaces.
713 */
714
715 /* find bytes/word */
716 if (bits_per_word <= 8)
717 bytes_per_word = 1;
718 else if (bits_per_word <= 16)
719 bytes_per_word = 2;
720 else
721 bytes_per_word = 4;
722
723 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
724 if (chip_info)
725 req_burst_size = chip_info->dma_burst_size;
726 else {
727 switch (chip->dma_burst_size) {
728 default:
729 /* if the default burst size is not set,
730 * do it now */
731 chip->dma_burst_size = DCMD_BURST8;
732 case DCMD_BURST8:
733 req_burst_size = 8;
734 break;
735 case DCMD_BURST16:
736 req_burst_size = 16;
737 break;
738 case DCMD_BURST32:
739 req_burst_size = 32;
740 break;
741 }
742 }
743 if (req_burst_size <= 8) {
744 *burst_code = DCMD_BURST8;
745 burst_bytes = 8;
746 } else if (req_burst_size <= 16) {
747 if (bytes_per_word == 1) {
748 /* don't burst more than 1/2 the fifo */
749 *burst_code = DCMD_BURST8;
750 burst_bytes = 8;
751 retval = 1;
752 } else {
753 *burst_code = DCMD_BURST16;
754 burst_bytes = 16;
755 }
756 } else {
757 if (bytes_per_word == 1) {
758 /* don't burst more than 1/2 the fifo */
759 *burst_code = DCMD_BURST8;
760 burst_bytes = 8;
761 retval = 1;
762 } else if (bytes_per_word == 2) {
763 /* don't burst more than 1/2 the fifo */
764 *burst_code = DCMD_BURST16;
765 burst_bytes = 16;
766 retval = 1;
767 } else {
768 *burst_code = DCMD_BURST32;
769 burst_bytes = 32;
770 }
771 }
772
773 thresh_words = burst_bytes / bytes_per_word;
774
775 /* thresh_words will be between 2 and 8 */
776 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
777 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
778
779 return retval;
780}
781
693static void pump_transfers(unsigned long data) 782static void pump_transfers(unsigned long data)
694{ 783{
695 struct driver_data *drv_data = (struct driver_data *)data; 784 struct driver_data *drv_data = (struct driver_data *)data;
@@ -702,6 +791,9 @@ static void pump_transfers(unsigned long data)
702 u8 bits = 0; 791 u8 bits = 0;
703 u32 speed = 0; 792 u32 speed = 0;
704 u32 cr0; 793 u32 cr0;
794 u32 cr1;
795 u32 dma_thresh = drv_data->cur_chip->dma_threshold;
796 u32 dma_burst = drv_data->cur_chip->dma_burst_size;
705 797
706 /* Get current state information */ 798 /* Get current state information */
707 message = drv_data->cur_msg; 799 message = drv_data->cur_msg;
@@ -731,6 +823,16 @@ static void pump_transfers(unsigned long data)
731 udelay(previous->delay_usecs); 823 udelay(previous->delay_usecs);
732 } 824 }
733 825
826 /* Check transfer length */
827 if (transfer->len > 8191)
828 {
829 dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
830 "length greater than 8191\n");
831 message->status = -EINVAL;
832 giveback(drv_data);
833 return;
834 }
835
734 /* Setup the transfer state based on the type of transfer */ 836 /* Setup the transfer state based on the type of transfer */
735 if (flush(drv_data) == 0) { 837 if (flush(drv_data) == 0) {
736 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 838 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
@@ -747,17 +849,15 @@ static void pump_transfers(unsigned long data)
747 drv_data->rx_end = drv_data->rx + transfer->len; 849 drv_data->rx_end = drv_data->rx + transfer->len;
748 drv_data->rx_dma = transfer->rx_dma; 850 drv_data->rx_dma = transfer->rx_dma;
749 drv_data->tx_dma = transfer->tx_dma; 851 drv_data->tx_dma = transfer->tx_dma;
750 drv_data->len = transfer->len; 852 drv_data->len = transfer->len & DCMD_LENGTH;
751 drv_data->write = drv_data->tx ? chip->write : null_writer; 853 drv_data->write = drv_data->tx ? chip->write : null_writer;
752 drv_data->read = drv_data->rx ? chip->read : null_reader; 854 drv_data->read = drv_data->rx ? chip->read : null_reader;
753 drv_data->cs_change = transfer->cs_change; 855 drv_data->cs_change = transfer->cs_change;
754 856
755 /* Change speed and bit per word on a per transfer */ 857 /* Change speed and bit per word on a per transfer */
858 cr0 = chip->cr0;
756 if (transfer->speed_hz || transfer->bits_per_word) { 859 if (transfer->speed_hz || transfer->bits_per_word) {
757 860
758 /* Disable clock */
759 write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg);
760 cr0 = chip->cr0;
761 bits = chip->bits_per_word; 861 bits = chip->bits_per_word;
762 speed = chip->speed_hz; 862 speed = chip->speed_hz;
763 863
@@ -796,15 +896,24 @@ static void pump_transfers(unsigned long data)
796 drv_data->write = drv_data->write != null_writer ? 896 drv_data->write = drv_data->write != null_writer ?
797 u32_writer : null_writer; 897 u32_writer : null_writer;
798 } 898 }
899 /* if bits/word is changed in dma mode, then must check the
900 * thresholds and burst also */
901 if (chip->enable_dma) {
902 if (set_dma_burst_and_threshold(chip, message->spi,
903 bits, &dma_burst,
904 &dma_thresh))
905 if (printk_ratelimit())
906 dev_warn(&message->spi->dev,
907 "pump_transfer: "
908 "DMA burst size reduced to "
909 "match bits_per_word\n");
910 }
799 911
800 cr0 = clk_div 912 cr0 = clk_div
801 | SSCR0_Motorola 913 | SSCR0_Motorola
802 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) 914 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
803 | SSCR0_SSE 915 | SSCR0_SSE
804 | (bits > 16 ? SSCR0_EDSS : 0); 916 | (bits > 16 ? SSCR0_EDSS : 0);
805
806 /* Start it back up */
807 write_SSCR0(cr0, reg);
808 } 917 }
809 918
810 message->state = RUNNING_STATE; 919 message->state = RUNNING_STATE;
@@ -823,13 +932,13 @@ static void pump_transfers(unsigned long data)
823 /* No target address increment */ 932 /* No target address increment */
824 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC 933 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
825 | drv_data->dma_width 934 | drv_data->dma_width
826 | chip->dma_burst_size 935 | dma_burst
827 | drv_data->len; 936 | drv_data->len;
828 else 937 else
829 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR 938 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
830 | DCMD_FLOWSRC 939 | DCMD_FLOWSRC
831 | drv_data->dma_width 940 | drv_data->dma_width
832 | chip->dma_burst_size 941 | dma_burst
833 | drv_data->len; 942 | drv_data->len;
834 943
835 /* Setup tx DMA Channel */ 944 /* Setup tx DMA Channel */
@@ -840,13 +949,13 @@ static void pump_transfers(unsigned long data)
840 /* No source address increment */ 949 /* No source address increment */
841 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG 950 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
842 | drv_data->dma_width 951 | drv_data->dma_width
843 | chip->dma_burst_size 952 | dma_burst
844 | drv_data->len; 953 | drv_data->len;
845 else 954 else
846 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR 955 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
847 | DCMD_FLOWTRG 956 | DCMD_FLOWTRG
848 | drv_data->dma_width 957 | drv_data->dma_width
849 | chip->dma_burst_size 958 | dma_burst
850 | drv_data->len; 959 | drv_data->len;
851 960
852 /* Enable dma end irqs on SSP to detect end of transfer */ 961 /* Enable dma end irqs on SSP to detect end of transfer */
@@ -856,16 +965,11 @@ static void pump_transfers(unsigned long data)
856 /* Fix me, need to handle cs polarity */ 965 /* Fix me, need to handle cs polarity */
857 drv_data->cs_control(PXA2XX_CS_ASSERT); 966 drv_data->cs_control(PXA2XX_CS_ASSERT);
858 967
859 /* Go baby, go */ 968 /* Clear status and start DMA engine */
969 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
860 write_SSSR(drv_data->clear_sr, reg); 970 write_SSSR(drv_data->clear_sr, reg);
861 DCSR(drv_data->rx_channel) |= DCSR_RUN; 971 DCSR(drv_data->rx_channel) |= DCSR_RUN;
862 DCSR(drv_data->tx_channel) |= DCSR_RUN; 972 DCSR(drv_data->tx_channel) |= DCSR_RUN;
863 if (drv_data->ssp_type != PXA25x_SSP)
864 write_SSTO(chip->timeout, reg);
865 write_SSCR1(chip->cr1
866 | chip->dma_threshold
867 | drv_data->dma_cr1,
868 reg);
869 } else { 973 } else {
870 /* Ensure we have the correct interrupt handler */ 974 /* Ensure we have the correct interrupt handler */
871 drv_data->transfer_handler = interrupt_transfer; 975 drv_data->transfer_handler = interrupt_transfer;
@@ -873,14 +977,25 @@ static void pump_transfers(unsigned long data)
873 /* Fix me, need to handle cs polarity */ 977 /* Fix me, need to handle cs polarity */
874 drv_data->cs_control(PXA2XX_CS_ASSERT); 978 drv_data->cs_control(PXA2XX_CS_ASSERT);
875 979
876 /* Go baby, go */ 980 /* Clear status */
981 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
877 write_SSSR(drv_data->clear_sr, reg); 982 write_SSSR(drv_data->clear_sr, reg);
983 }
984
985 /* see if we need to reload the config registers */
986 if ((read_SSCR0(reg) != cr0)
987 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
988 (cr1 & SSCR1_CHANGE_MASK)) {
989
990 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
878 if (drv_data->ssp_type != PXA25x_SSP) 991 if (drv_data->ssp_type != PXA25x_SSP)
879 write_SSTO(chip->timeout, reg); 992 write_SSTO(chip->timeout, reg);
880 write_SSCR1(chip->cr1 993 write_SSCR1(cr1, reg);
881 | chip->threshold 994 write_SSCR0(cr0, reg);
882 | drv_data->int_cr1, 995 } else {
883 reg); 996 if (drv_data->ssp_type != PXA25x_SSP)
997 write_SSTO(chip->timeout, reg);
998 write_SSCR1(cr1, reg);
884 } 999 }
885} 1000}
886 1001
@@ -915,9 +1030,9 @@ static void pump_messages(struct work_struct *work)
915 struct spi_transfer, 1030 struct spi_transfer,
916 transfer_list); 1031 transfer_list);
917 1032
918 /* Setup the SSP using the per chip configuration */ 1033 /* prepare to setup the SSP, in pump_transfers, using the per
1034 * chip configuration */
919 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 1035 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
920 restore_state(drv_data);
921 1036
922 /* Mark as busy and launch transfers */ 1037 /* Mark as busy and launch transfers */
923 tasklet_schedule(&drv_data->pump_transfers); 1038 tasklet_schedule(&drv_data->pump_transfers);
@@ -963,63 +1078,77 @@ static int setup(struct spi_device *spi)
963 spi->bits_per_word = 8; 1078 spi->bits_per_word = 8;
964 1079
965 if (drv_data->ssp_type != PXA25x_SSP 1080 if (drv_data->ssp_type != PXA25x_SSP
966 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) 1081 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1082 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1083 "b/w not 4-32 for type non-PXA25x_SSP\n",
1084 drv_data->ssp_type, spi->bits_per_word);
967 return -EINVAL; 1085 return -EINVAL;
968 else if (spi->bits_per_word < 4 || spi->bits_per_word > 16) 1086 }
1087 else if (drv_data->ssp_type == PXA25x_SSP
1088 && (spi->bits_per_word < 4
1089 || spi->bits_per_word > 16)) {
1090 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1091 "b/w not 4-16 for type PXA25x_SSP\n",
1092 drv_data->ssp_type, spi->bits_per_word);
969 return -EINVAL; 1093 return -EINVAL;
1094 }
970 1095
971 /* Only alloc (or use chip_info) on first setup */ 1096 /* Only alloc on first setup */
972 chip = spi_get_ctldata(spi); 1097 chip = spi_get_ctldata(spi);
973 if (chip == NULL) { 1098 if (!chip) {
974 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1099 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
975 if (!chip) 1100 if (!chip) {
1101 dev_err(&spi->dev,
1102 "failed setup: can't allocate chip data\n");
976 return -ENOMEM; 1103 return -ENOMEM;
1104 }
977 1105
978 chip->cs_control = null_cs_control; 1106 chip->cs_control = null_cs_control;
979 chip->enable_dma = 0; 1107 chip->enable_dma = 0;
980 chip->timeout = SSP_TIMEOUT(1000); 1108 chip->timeout = 1000;
981 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1); 1109 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
982 chip->dma_burst_size = drv_data->master_info->enable_dma ? 1110 chip->dma_burst_size = drv_data->master_info->enable_dma ?
983 DCMD_BURST8 : 0; 1111 DCMD_BURST8 : 0;
984
985 chip_info = spi->controller_data;
986 } 1112 }
987 1113
1114 /* protocol drivers may change the chip settings, so...
1115 * if chip_info exists, use it */
1116 chip_info = spi->controller_data;
1117
988 /* chip_info isn't always needed */ 1118 /* chip_info isn't always needed */
1119 chip->cr1 = 0;
989 if (chip_info) { 1120 if (chip_info) {
990 if (chip_info->cs_control) 1121 if (chip_info->cs_control)
991 chip->cs_control = chip_info->cs_control; 1122 chip->cs_control = chip_info->cs_control;
992 1123
993 chip->timeout = SSP_TIMEOUT(chip_info->timeout_microsecs); 1124 chip->timeout = chip_info->timeout;
994 1125
995 chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold) 1126 chip->threshold = (SSCR1_RxTresh(chip_info->rx_threshold) &
996 | SSCR1_TxTresh(chip_info->tx_threshold); 1127 SSCR1_RFT) |
1128 (SSCR1_TxTresh(chip_info->tx_threshold) &
1129 SSCR1_TFT);
997 1130
998 chip->enable_dma = chip_info->dma_burst_size != 0 1131 chip->enable_dma = chip_info->dma_burst_size != 0
999 && drv_data->master_info->enable_dma; 1132 && drv_data->master_info->enable_dma;
1000 chip->dma_threshold = 0; 1133 chip->dma_threshold = 0;
1001 1134
1002 if (chip->enable_dma) {
1003 if (chip_info->dma_burst_size <= 8) {
1004 chip->dma_threshold = SSCR1_RxTresh(8)
1005 | SSCR1_TxTresh(8);
1006 chip->dma_burst_size = DCMD_BURST8;
1007 } else if (chip_info->dma_burst_size <= 16) {
1008 chip->dma_threshold = SSCR1_RxTresh(16)
1009 | SSCR1_TxTresh(16);
1010 chip->dma_burst_size = DCMD_BURST16;
1011 } else {
1012 chip->dma_threshold = SSCR1_RxTresh(32)
1013 | SSCR1_TxTresh(32);
1014 chip->dma_burst_size = DCMD_BURST32;
1015 }
1016 }
1017
1018
1019 if (chip_info->enable_loopback) 1135 if (chip_info->enable_loopback)
1020 chip->cr1 = SSCR1_LBM; 1136 chip->cr1 = SSCR1_LBM;
1021 } 1137 }
1022 1138
1139 /* set dma burst and threshold outside of chip_info path so that if
1140 * chip_info goes away after setting chip->enable_dma, the
1141 * burst and threshold can still respond to changes in bits_per_word */
1142 if (chip->enable_dma) {
1143 /* set up legal burst and threshold for dma */
1144 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
1145 &chip->dma_burst_size,
1146 &chip->dma_threshold)) {
1147 dev_warn(&spi->dev, "in setup: DMA burst size reduced "
1148 "to match bits_per_word\n");
1149 }
1150 }
1151
1023 if (drv_data->ioaddr == SSP1_VIRT) 1152 if (drv_data->ioaddr == SSP1_VIRT)
1024 clk_div = SSP1_SerClkDiv(spi->max_speed_hz); 1153 clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
1025 else if (drv_data->ioaddr == SSP2_VIRT) 1154 else if (drv_data->ioaddr == SSP2_VIRT)
@@ -1027,7 +1156,11 @@ static int setup(struct spi_device *spi)
1027 else if (drv_data->ioaddr == SSP3_VIRT) 1156 else if (drv_data->ioaddr == SSP3_VIRT)
1028 clk_div = SSP3_SerClkDiv(spi->max_speed_hz); 1157 clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
1029 else 1158 else
1159 {
1160 dev_err(&spi->dev, "failed setup: unknown IO address=0x%p\n",
1161 drv_data->ioaddr);
1030 return -ENODEV; 1162 return -ENODEV;
1163 }
1031 chip->speed_hz = spi->max_speed_hz; 1164 chip->speed_hz = spi->max_speed_hz;
1032 1165
1033 chip->cr0 = clk_div 1166 chip->cr0 = clk_div
@@ -1071,7 +1204,6 @@ static int setup(struct spi_device *spi)
1071 chip->write = u32_writer; 1204 chip->write = u32_writer;
1072 } else { 1205 } else {
1073 dev_err(&spi->dev, "invalid wordsize\n"); 1206 dev_err(&spi->dev, "invalid wordsize\n");
1074 kfree(chip);
1075 return -ENODEV; 1207 return -ENODEV;
1076 } 1208 }
1077 chip->bits_per_word = spi->bits_per_word; 1209 chip->bits_per_word = spi->bits_per_word;
@@ -1162,6 +1294,12 @@ static int destroy_queue(struct driver_data *drv_data)
1162 int status; 1294 int status;
1163 1295
1164 status = stop_queue(drv_data); 1296 status = stop_queue(drv_data);
1297 /* we are unloading the module or failing to load (only two calls
1298 * to this routine), and neither call can handle a return value.
1299 * However, destroy_workqueue calls flush_workqueue, and that will
1300 * block until all work is done. If the reason that stop_queue
1301 * timed out is that the work will never finish, then it does no
1302 * good to call destroy_workqueue, so return anyway. */
1165 if (status != 0) 1303 if (status != 0)
1166 return status; 1304 return status;
1167 1305
@@ -1360,7 +1498,16 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1360 /* Remove the queue */ 1498 /* Remove the queue */
1361 status = destroy_queue(drv_data); 1499 status = destroy_queue(drv_data);
1362 if (status != 0) 1500 if (status != 0)
1363 return status; 1501 /* the kernel does not check the return status of this
1502 * this routine (mod->exit, within the kernel). Therefore
1503 * nothing is gained by returning from here, the module is
1504 * going away regardless, and we should not leave any more
1505 * resources allocated than necessary. We cannot free the
1506 * message memory in drv_data->queue, but we can release the
1507 * resources below. I think the kernel should honor -EBUSY
1508 * returns but... */
1509 dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
1510 "complete, message memory not freed\n");
1364 1511
1365 /* Disable the SSP at the peripheral and SOC level */ 1512 /* Disable the SSP at the peripheral and SOC level */
1366 write_SSCR0(0, drv_data->ioaddr); 1513 write_SSCR0(0, drv_data->ioaddr);