aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/input/touchscreen/ads7846.c12
-rw-r--r--drivers/mtd/devices/m25p80.c50
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c28
-rw-r--r--drivers/spi/spi.c18
-rw-r--r--drivers/spi/spi_bitbang.c86
-rw-r--r--include/linux/spi/spi.h92
-rw-r--r--include/linux/spi/spi_bitbang.h7
7 files changed, 180 insertions, 113 deletions
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index c741776ef3bf..dd8c6a9ffc76 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -155,10 +155,13 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
155 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); 155 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL);
156 int status; 156 int status;
157 int sample; 157 int sample;
158 int i;
158 159
159 if (!req) 160 if (!req)
160 return -ENOMEM; 161 return -ENOMEM;
161 162
163 INIT_LIST_HEAD(&req->msg.transfers);
164
162 /* activate reference, so it has time to settle; */ 165 /* activate reference, so it has time to settle; */
163 req->xfer[0].tx_buf = &ref_on; 166 req->xfer[0].tx_buf = &ref_on;
164 req->xfer[0].len = 1; 167 req->xfer[0].len = 1;
@@ -192,8 +195,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
192 /* group all the transfers together, so we can't interfere with 195 /* group all the transfers together, so we can't interfere with
193 * reading touchscreen state; disable penirq while sampling 196 * reading touchscreen state; disable penirq while sampling
194 */ 197 */
195 req->msg.transfers = req->xfer; 198 for (i = 0; i < 6; i++)
196 req->msg.n_transfer = 6; 199 spi_message_add_tail(&req->xfer[i], &req->msg);
197 200
198 disable_irq(spi->irq); 201 disable_irq(spi->irq);
199 status = spi_sync(spi, &req->msg); 202 status = spi_sync(spi, &req->msg);
@@ -398,6 +401,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
398 struct ads7846 *ts; 401 struct ads7846 *ts;
399 struct ads7846_platform_data *pdata = spi->dev.platform_data; 402 struct ads7846_platform_data *pdata = spi->dev.platform_data;
400 struct spi_transfer *x; 403 struct spi_transfer *x;
404 int i;
401 405
402 if (!spi->irq) { 406 if (!spi->irq) {
403 dev_dbg(&spi->dev, "no IRQ?\n"); 407 dev_dbg(&spi->dev, "no IRQ?\n");
@@ -500,8 +504,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
500 504
501 CS_CHANGE(x[-1]); 505 CS_CHANGE(x[-1]);
502 506
503 ts->msg.transfers = ts->xfer; 507 for (i = 0; i < x - ts->xfer; i++)
504 ts->msg.n_transfer = x - ts->xfer; 508 spi_message_add_tail(&ts->xfer[i], &ts->msg);
505 ts->msg.complete = ads7846_rx; 509 ts->msg.complete = ads7846_rx;
506 ts->msg.context = ts; 510 ts->msg.context = ts;
507 511
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 71a072103a7f..45108ed85588 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -245,6 +245,21 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
245 if (from + len > flash->mtd.size) 245 if (from + len > flash->mtd.size)
246 return -EINVAL; 246 return -EINVAL;
247 247
248 spi_message_init(&m);
249 memset(t, 0, (sizeof t));
250
251 t[0].tx_buf = flash->command;
252 t[0].len = sizeof(flash->command);
253 spi_message_add_tail(&t[0], &m);
254
255 t[1].rx_buf = buf;
256 t[1].len = len;
257 spi_message_add_tail(&t[1], &m);
258
259 /* Byte count starts at zero. */
260 if (retlen)
261 *retlen = 0;
262
248 down(&flash->lock); 263 down(&flash->lock);
249 264
250 /* Wait till previous write/erase is done. */ 265 /* Wait till previous write/erase is done. */
@@ -254,8 +269,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
254 return 1; 269 return 1;
255 } 270 }
256 271
257 memset(t, 0, (sizeof t));
258
259 /* NOTE: OPCODE_FAST_READ (if available) is faster... */ 272 /* NOTE: OPCODE_FAST_READ (if available) is faster... */
260 273
261 /* Set up the write data buffer. */ 274 /* Set up the write data buffer. */
@@ -264,19 +277,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
264 flash->command[2] = from >> 8; 277 flash->command[2] = from >> 8;
265 flash->command[3] = from; 278 flash->command[3] = from;
266 279
267 /* Byte count starts at zero. */
268 if (retlen)
269 *retlen = 0;
270
271 t[0].tx_buf = flash->command;
272 t[0].len = sizeof(flash->command);
273
274 t[1].rx_buf = buf;
275 t[1].len = len;
276
277 m.transfers = t;
278 m.n_transfer = 2;
279
280 spi_sync(flash->spi, &m); 280 spi_sync(flash->spi, &m);
281 281
282 *retlen = m.actual_length - sizeof(flash->command); 282 *retlen = m.actual_length - sizeof(flash->command);
@@ -313,6 +313,16 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
313 if (to + len > flash->mtd.size) 313 if (to + len > flash->mtd.size)
314 return -EINVAL; 314 return -EINVAL;
315 315
316 spi_message_init(&m);
317 memset(t, 0, (sizeof t));
318
319 t[0].tx_buf = flash->command;
320 t[0].len = sizeof(flash->command);
321 spi_message_add_tail(&t[0], &m);
322
323 t[1].tx_buf = buf;
324 spi_message_add_tail(&t[1], &m);
325
316 down(&flash->lock); 326 down(&flash->lock);
317 327
318 /* Wait until finished previous write command. */ 328 /* Wait until finished previous write command. */
@@ -321,26 +331,17 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
321 331
322 write_enable(flash); 332 write_enable(flash);
323 333
324 memset(t, 0, (sizeof t));
325
326 /* Set up the opcode in the write buffer. */ 334 /* Set up the opcode in the write buffer. */
327 flash->command[0] = OPCODE_PP; 335 flash->command[0] = OPCODE_PP;
328 flash->command[1] = to >> 16; 336 flash->command[1] = to >> 16;
329 flash->command[2] = to >> 8; 337 flash->command[2] = to >> 8;
330 flash->command[3] = to; 338 flash->command[3] = to;
331 339
332 t[0].tx_buf = flash->command;
333 t[0].len = sizeof(flash->command);
334
335 m.transfers = t;
336 m.n_transfer = 2;
337
338 /* what page do we start with? */ 340 /* what page do we start with? */
339 page_offset = to % FLASH_PAGESIZE; 341 page_offset = to % FLASH_PAGESIZE;
340 342
341 /* do all the bytes fit onto one page? */ 343 /* do all the bytes fit onto one page? */
342 if (page_offset + len <= FLASH_PAGESIZE) { 344 if (page_offset + len <= FLASH_PAGESIZE) {
343 t[1].tx_buf = buf;
344 t[1].len = len; 345 t[1].len = len;
345 346
346 spi_sync(flash->spi, &m); 347 spi_sync(flash->spi, &m);
@@ -352,7 +353,6 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
352 /* the size of data remaining on the first page */ 353 /* the size of data remaining on the first page */
353 page_size = FLASH_PAGESIZE - page_offset; 354 page_size = FLASH_PAGESIZE - page_offset;
354 355
355 t[1].tx_buf = buf;
356 t[1].len = page_size; 356 t[1].len = page_size;
357 spi_sync(flash->spi, &m); 357 spi_sync(flash->spi, &m);
358 358
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a39b3b6b266c..99d3a0320fc9 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -147,7 +147,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
147{ 147{
148 struct dataflash *priv = (struct dataflash *)mtd->priv; 148 struct dataflash *priv = (struct dataflash *)mtd->priv;
149 struct spi_device *spi = priv->spi; 149 struct spi_device *spi = priv->spi;
150 struct spi_transfer x[1] = { { .tx_dma = 0, }, }; 150 struct spi_transfer x = { .tx_dma = 0, };
151 struct spi_message msg; 151 struct spi_message msg;
152 unsigned blocksize = priv->page_size << 3; 152 unsigned blocksize = priv->page_size << 3;
153 u8 *command; 153 u8 *command;
@@ -162,10 +162,11 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
162 || (instr->addr % priv->page_size) != 0) 162 || (instr->addr % priv->page_size) != 0)
163 return -EINVAL; 163 return -EINVAL;
164 164
165 x[0].tx_buf = command = priv->command; 165 spi_message_init(&msg);
166 x[0].len = 4; 166
167 msg.transfers = x; 167 x.tx_buf = command = priv->command;
168 msg.n_transfer = 1; 168 x.len = 4;
169 spi_message_add_tail(&x, &msg);
169 170
170 down(&priv->lock); 171 down(&priv->lock);
171 while (instr->len > 0) { 172 while (instr->len > 0) {
@@ -256,12 +257,15 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
256 DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n", 257 DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
257 command[0], command[1], command[2], command[3]); 258 command[0], command[1], command[2], command[3]);
258 259
260 spi_message_init(&msg);
261
259 x[0].tx_buf = command; 262 x[0].tx_buf = command;
260 x[0].len = 8; 263 x[0].len = 8;
264 spi_message_add_tail(&x[0], &msg);
265
261 x[1].rx_buf = buf; 266 x[1].rx_buf = buf;
262 x[1].len = len; 267 x[1].len = len;
263 msg.transfers = x; 268 spi_message_add_tail(&x[1], &msg);
264 msg.n_transfer = 2;
265 269
266 down(&priv->lock); 270 down(&priv->lock);
267 271
@@ -320,9 +324,11 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
320 if ((to + len) > mtd->size) 324 if ((to + len) > mtd->size)
321 return -EINVAL; 325 return -EINVAL;
322 326
327 spi_message_init(&msg);
328
323 x[0].tx_buf = command = priv->command; 329 x[0].tx_buf = command = priv->command;
324 x[0].len = 4; 330 x[0].len = 4;
325 msg.transfers = x; 331 spi_message_add_tail(&x[0], &msg);
326 332
327 pageaddr = ((unsigned)to / priv->page_size); 333 pageaddr = ((unsigned)to / priv->page_size);
328 offset = ((unsigned)to % priv->page_size); 334 offset = ((unsigned)to % priv->page_size);
@@ -364,7 +370,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
364 DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n", 370 DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
365 command[0], command[1], command[2], command[3]); 371 command[0], command[1], command[2], command[3]);
366 372
367 msg.n_transfer = 1;
368 status = spi_sync(spi, &msg); 373 status = spi_sync(spi, &msg);
369 if (status < 0) 374 if (status < 0)
370 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", 375 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
@@ -385,14 +390,16 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
385 390
386 x[1].tx_buf = writebuf; 391 x[1].tx_buf = writebuf;
387 x[1].len = writelen; 392 x[1].len = writelen;
388 msg.n_transfer = 2; 393 spi_message_add_tail(x + 1, &msg);
389 status = spi_sync(spi, &msg); 394 status = spi_sync(spi, &msg);
395 spi_transfer_del(x + 1);
390 if (status < 0) 396 if (status < 0)
391 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 397 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
392 spi->dev.bus_id, addr, writelen, status); 398 spi->dev.bus_id, addr, writelen, status);
393 399
394 (void) dataflash_waitready(priv->spi); 400 (void) dataflash_waitready(priv->spi);
395 401
402
396#ifdef CONFIG_DATAFLASH_WRITE_VERIFY 403#ifdef CONFIG_DATAFLASH_WRITE_VERIFY
397 404
398 /* (3) Compare to Buffer1 */ 405 /* (3) Compare to Buffer1 */
@@ -405,7 +412,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
405 DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n", 412 DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
406 command[0], command[1], command[2], command[3]); 413 command[0], command[1], command[2], command[3]);
407 414
408 msg.n_transfer = 1;
409 status = spi_sync(spi, &msg); 415 status = spi_sync(spi, &msg);
410 if (status < 0) 416 if (status < 0)
411 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", 417 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3ecedccdb96c..cdb242de901d 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -557,6 +557,17 @@ int spi_write_then_read(struct spi_device *spi,
557 if ((n_tx + n_rx) > SPI_BUFSIZ) 557 if ((n_tx + n_rx) > SPI_BUFSIZ)
558 return -EINVAL; 558 return -EINVAL;
559 559
560 spi_message_init(&message);
561 memset(x, 0, sizeof x);
562 if (n_tx) {
563 x[0].len = n_tx;
564 spi_message_add_tail(&x[0], &message);
565 }
566 if (n_rx) {
567 x[1].len = n_rx;
568 spi_message_add_tail(&x[1], &message);
569 }
570
560 /* ... unless someone else is using the pre-allocated buffer */ 571 /* ... unless someone else is using the pre-allocated buffer */
561 if (down_trylock(&lock)) { 572 if (down_trylock(&lock)) {
562 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 573 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
@@ -565,18 +576,11 @@ int spi_write_then_read(struct spi_device *spi,
565 } else 576 } else
566 local_buf = buf; 577 local_buf = buf;
567 578
568 memset(x, 0, sizeof x);
569
570 memcpy(local_buf, txbuf, n_tx); 579 memcpy(local_buf, txbuf, n_tx);
571 x[0].tx_buf = local_buf; 580 x[0].tx_buf = local_buf;
572 x[0].len = n_tx;
573
574 x[1].rx_buf = local_buf + n_tx; 581 x[1].rx_buf = local_buf + n_tx;
575 x[1].len = n_rx;
576 582
577 /* do the i/o */ 583 /* do the i/o */
578 message.transfers = x;
579 message.n_transfer = ARRAY_SIZE(x);
580 status = spi_sync(spi, &message); 584 status = spi_sync(spi, &message);
581 if (status == 0) { 585 if (status == 0) {
582 memcpy(rxbuf, x[1].rx_buf, n_rx); 586 memcpy(rxbuf, x[1].rx_buf, n_rx);
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 44aff198eb96..f037e5593269 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -146,6 +146,9 @@ int spi_bitbang_setup(struct spi_device *spi)
146 struct spi_bitbang_cs *cs = spi->controller_state; 146 struct spi_bitbang_cs *cs = spi->controller_state;
147 struct spi_bitbang *bitbang; 147 struct spi_bitbang *bitbang;
148 148
149 if (!spi->max_speed_hz)
150 return -EINVAL;
151
149 if (!cs) { 152 if (!cs) {
150 cs = kzalloc(sizeof *cs, SLAB_KERNEL); 153 cs = kzalloc(sizeof *cs, SLAB_KERNEL);
151 if (!cs) 154 if (!cs)
@@ -172,13 +175,8 @@ int spi_bitbang_setup(struct spi_device *spi)
172 if (!cs->txrx_word) 175 if (!cs->txrx_word)
173 return -EINVAL; 176 return -EINVAL;
174 177
175 if (!spi->max_speed_hz) 178 /* nsecs = (clock period)/2 */
176 spi->max_speed_hz = 500 * 1000;
177
178 /* nsecs = max(50, (clock period)/2), be optimistic */
179 cs->nsecs = (1000000000/2) / (spi->max_speed_hz); 179 cs->nsecs = (1000000000/2) / (spi->max_speed_hz);
180 if (cs->nsecs < 50)
181 cs->nsecs = 50;
182 if (cs->nsecs > MAX_UDELAY_MS * 1000) 180 if (cs->nsecs > MAX_UDELAY_MS * 1000)
183 return -EINVAL; 181 return -EINVAL;
184 182
@@ -194,7 +192,7 @@ int spi_bitbang_setup(struct spi_device *spi)
194 /* deselect chip (low or high) */ 192 /* deselect chip (low or high) */
195 spin_lock(&bitbang->lock); 193 spin_lock(&bitbang->lock);
196 if (!bitbang->busy) { 194 if (!bitbang->busy) {
197 bitbang->chipselect(spi, 0); 195 bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
198 ndelay(cs->nsecs); 196 ndelay(cs->nsecs);
199 } 197 }
200 spin_unlock(&bitbang->lock); 198 spin_unlock(&bitbang->lock);
@@ -244,9 +242,9 @@ static void bitbang_work(void *_bitbang)
244 struct spi_message *m; 242 struct spi_message *m;
245 struct spi_device *spi; 243 struct spi_device *spi;
246 unsigned nsecs; 244 unsigned nsecs;
247 struct spi_transfer *t; 245 struct spi_transfer *t = NULL;
248 unsigned tmp; 246 unsigned tmp;
249 unsigned chipselect; 247 unsigned cs_change;
250 int status; 248 int status;
251 249
252 m = container_of(bitbang->queue.next, struct spi_message, 250 m = container_of(bitbang->queue.next, struct spi_message,
@@ -254,37 +252,49 @@ static void bitbang_work(void *_bitbang)
254 list_del_init(&m->queue); 252 list_del_init(&m->queue);
255 spin_unlock_irqrestore(&bitbang->lock, flags); 253 spin_unlock_irqrestore(&bitbang->lock, flags);
256 254
257// FIXME this is made-up 255 /* FIXME this is made-up ... the correct value is known to
258nsecs = 100; 256 * word-at-a-time bitbang code, and presumably chipselect()
257 * should enforce these requirements too?
258 */
259 nsecs = 100;
259 260
260 spi = m->spi; 261 spi = m->spi;
261 t = m->transfers;
262 tmp = 0; 262 tmp = 0;
263 chipselect = 0; 263 cs_change = 1;
264 status = 0; 264 status = 0;
265 265
266 for (;;t++) { 266 list_for_each_entry (t, &m->transfers, transfer_list) {
267 if (bitbang->shutdown) { 267 if (bitbang->shutdown) {
268 status = -ESHUTDOWN; 268 status = -ESHUTDOWN;
269 break; 269 break;
270 } 270 }
271 271
272 /* set up default clock polarity, and activate chip */ 272 /* set up default clock polarity, and activate chip;
273 if (!chipselect) { 273 * this implicitly updates clock and spi modes as
274 bitbang->chipselect(spi, 1); 274 * previously recorded for this device via setup().
275 * (and also deselects any other chip that might be
276 * selected ...)
277 */
278 if (cs_change) {
279 bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
275 ndelay(nsecs); 280 ndelay(nsecs);
276 } 281 }
282 cs_change = t->cs_change;
277 if (!t->tx_buf && !t->rx_buf && t->len) { 283 if (!t->tx_buf && !t->rx_buf && t->len) {
278 status = -EINVAL; 284 status = -EINVAL;
279 break; 285 break;
280 } 286 }
281 287
282 /* transfer data */ 288 /* transfer data. the lower level code handles any
289 * new dma mappings it needs. our caller always gave
290 * us dma-safe buffers.
291 */
283 if (t->len) { 292 if (t->len) {
284 /* FIXME if bitbang->use_dma, dma_map_single() 293 /* REVISIT dma API still needs a designated
285 * before the transfer, and dma_unmap_single() 294 * DMA_ADDR_INVALID; ~0 might be better.
286 * afterwards, for either or both buffers...
287 */ 295 */
296 if (!m->is_dma_mapped)
297 t->rx_dma = t->tx_dma = 0;
288 status = bitbang->txrx_bufs(spi, t); 298 status = bitbang->txrx_bufs(spi, t);
289 } 299 }
290 if (status != t->len) { 300 if (status != t->len) {
@@ -299,29 +309,31 @@ nsecs = 100;
299 if (t->delay_usecs) 309 if (t->delay_usecs)
300 udelay(t->delay_usecs); 310 udelay(t->delay_usecs);
301 311
302 tmp++; 312 if (!cs_change)
303 if (tmp >= m->n_transfer)
304 break;
305
306 chipselect = !t->cs_change;
307 if (chipselect);
308 continue; 313 continue;
314 if (t->transfer_list.next == &m->transfers)
315 break;
309 316
310 bitbang->chipselect(spi, 0); 317 /* sometimes a short mid-message deselect of the chip
311 318 * may be needed to terminate a mode or command
312 /* REVISIT do we want the udelay here instead? */ 319 */
313 msleep(1); 320 ndelay(nsecs);
321 bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
322 ndelay(nsecs);
314 } 323 }
315 324
316 tmp = m->n_transfer - 1;
317 tmp = m->transfers[tmp].cs_change;
318
319 m->status = status; 325 m->status = status;
320 m->complete(m->context); 326 m->complete(m->context);
321 327
322 ndelay(2 * nsecs); 328 /* normally deactivate chipselect ... unless no error and
323 bitbang->chipselect(spi, status == 0 && tmp); 329 * cs_change has hinted that the next message will probably
324 ndelay(nsecs); 330 * be for this chip too.
331 */
332 if (!(status == 0 && cs_change)) {
333 ndelay(nsecs);
334 bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
335 ndelay(nsecs);
336 }
325 337
326 spin_lock_irqsave(&bitbang->lock, flags); 338 spin_lock_irqsave(&bitbang->lock, flags);
327 } 339 }
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 6a41e2650b2e..939afd3a2e72 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -263,15 +263,16 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
263 263
264/** 264/**
265 * struct spi_transfer - a read/write buffer pair 265 * struct spi_transfer - a read/write buffer pair
266 * @tx_buf: data to be written (dma-safe address), or NULL 266 * @tx_buf: data to be written (dma-safe memory), or NULL
267 * @rx_buf: data to be read (dma-safe address), or NULL 267 * @rx_buf: data to be read (dma-safe memory), or NULL
268 * @tx_dma: DMA address of buffer, if spi_message.is_dma_mapped 268 * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
269 * @rx_dma: DMA address of buffer, if spi_message.is_dma_mapped 269 * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
270 * @len: size of rx and tx buffers (in bytes) 270 * @len: size of rx and tx buffers (in bytes)
271 * @cs_change: affects chipselect after this transfer completes 271 * @cs_change: affects chipselect after this transfer completes
272 * @delay_usecs: microseconds to delay after this transfer before 272 * @delay_usecs: microseconds to delay after this transfer before
273 * (optionally) changing the chipselect status, then starting 273 * (optionally) changing the chipselect status, then starting
274 * the next transfer or completing this spi_message. 274 * the next transfer or completing this spi_message.
275 * @transfer_list: transfers are sequenced through spi_message.transfers
275 * 276 *
276 * SPI transfers always write the same number of bytes as they read. 277 * SPI transfers always write the same number of bytes as they read.
277 * Protocol drivers should always provide rx_buf and/or tx_buf. 278 * Protocol drivers should always provide rx_buf and/or tx_buf.
@@ -279,11 +280,16 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
279 * the data being transferred; that may reduce overhead, when the 280 * the data being transferred; that may reduce overhead, when the
280 * underlying driver uses dma. 281 * underlying driver uses dma.
281 * 282 *
282 * All SPI transfers start with the relevant chipselect active. Drivers 283 * If the transmit buffer is null, undefined data will be shifted out
283 * can change behavior of the chipselect after the transfer finishes 284 * while filling rx_buf. If the receive buffer is null, the data
284 * (including any mandatory delay). The normal behavior is to leave it 285 * shifted in will be discarded. Only "len" bytes shift out (or in).
285 * selected, except for the last transfer in a message. Setting cs_change 286 * It's an error to try to shift out a partial word. (For example, by
286 * allows two additional behavior options: 287 * shifting out three bytes with word size of sixteen or twenty bits;
288 * the former uses two bytes per word, the latter uses four bytes.)
289 *
290 * All SPI transfers start with the relevant chipselect active. Normally
291 * it stays selected until after the last transfer in a message. Drivers
292 * can affect the chipselect signal using cs_change:
287 * 293 *
288 * (i) If the transfer isn't the last one in the message, this flag is 294 * (i) If the transfer isn't the last one in the message, this flag is
289 * used to make the chipselect briefly go inactive in the middle of the 295 * used to make the chipselect briefly go inactive in the middle of the
@@ -299,7 +305,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
299 * The code that submits an spi_message (and its spi_transfers) 305 * The code that submits an spi_message (and its spi_transfers)
300 * to the lower layers is responsible for managing its memory. 306 * to the lower layers is responsible for managing its memory.
301 * Zero-initialize every field you don't set up explicitly, to 307 * Zero-initialize every field you don't set up explicitly, to
302 * insulate against future API updates. 308 * insulate against future API updates. After you submit a message
309 * and its transfers, ignore them until its completion callback.
303 */ 310 */
304struct spi_transfer { 311struct spi_transfer {
305 /* it's ok if tx_buf == rx_buf (right?) 312 /* it's ok if tx_buf == rx_buf (right?)
@@ -316,12 +323,13 @@ struct spi_transfer {
316 323
317 unsigned cs_change:1; 324 unsigned cs_change:1;
318 u16 delay_usecs; 325 u16 delay_usecs;
326
327 struct list_head transfer_list;
319}; 328};
320 329
321/** 330/**
322 * struct spi_message - one multi-segment SPI transaction 331 * struct spi_message - one multi-segment SPI transaction
323 * @transfers: the segements of the transaction 332 * @transfers: list of transfer segments in this transaction
324 * @n_transfer: how many segments
325 * @spi: SPI device to which the transaction is queued 333 * @spi: SPI device to which the transaction is queued
326 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual 334 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
327 * addresses for each transfer buffer 335 * addresses for each transfer buffer
@@ -333,14 +341,22 @@ struct spi_transfer {
333 * @queue: for use by whichever driver currently owns the message 341 * @queue: for use by whichever driver currently owns the message
334 * @state: for use by whichever driver currently owns the message 342 * @state: for use by whichever driver currently owns the message
335 * 343 *
344 * An spi_message is used to execute an atomic sequence of data transfers,
345 * each represented by a struct spi_transfer. The sequence is "atomic"
346 * in the sense that no other spi_message may use that SPI bus until that
347 * sequence completes. On some systems, many such sequences can execute as
348 * as single programmed DMA transfer. On all systems, these messages are
349 * queued, and might complete after transactions to other devices. Messages
350 * sent to a given spi_device are alway executed in FIFO order.
351 *
336 * The code that submits an spi_message (and its spi_transfers) 352 * The code that submits an spi_message (and its spi_transfers)
337 * to the lower layers is responsible for managing its memory. 353 * to the lower layers is responsible for managing its memory.
338 * Zero-initialize every field you don't set up explicitly, to 354 * Zero-initialize every field you don't set up explicitly, to
339 * insulate against future API updates. 355 * insulate against future API updates. After you submit a message
356 * and its transfers, ignore them until its completion callback.
340 */ 357 */
341struct spi_message { 358struct spi_message {
342 struct spi_transfer *transfers; 359 struct list_head transfers;
343 unsigned n_transfer;
344 360
345 struct spi_device *spi; 361 struct spi_device *spi;
346 362
@@ -371,6 +387,24 @@ struct spi_message {
371 void *state; 387 void *state;
372}; 388};
373 389
390static inline void spi_message_init(struct spi_message *m)
391{
392 memset(m, 0, sizeof *m);
393 INIT_LIST_HEAD(&m->transfers);
394}
395
396static inline void
397spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
398{
399 list_add_tail(&t->transfer_list, &m->transfers);
400}
401
402static inline void
403spi_transfer_del(struct spi_transfer *t)
404{
405 list_del(&t->transfer_list);
406}
407
374/* It's fine to embed message and transaction structures in other data 408/* It's fine to embed message and transaction structures in other data
375 * structures so long as you don't free them while they're in use. 409 * structures so long as you don't free them while they're in use.
376 */ 410 */
@@ -383,8 +417,12 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags
383 + ntrans * sizeof(struct spi_transfer), 417 + ntrans * sizeof(struct spi_transfer),
384 flags); 418 flags);
385 if (m) { 419 if (m) {
386 m->transfers = (void *)(m + 1); 420 int i;
387 m->n_transfer = ntrans; 421 struct spi_transfer *t = (struct spi_transfer *)(m + 1);
422
423 INIT_LIST_HEAD(&m->transfers);
424 for (i = 0; i < ntrans; i++, t++)
425 spi_message_add_tail(t, m);
388 } 426 }
389 return m; 427 return m;
390} 428}
@@ -402,6 +440,8 @@ static inline void spi_message_free(struct spi_message *m)
402 * device doesn't work with the mode 0 default. They may likewise need 440 * device doesn't work with the mode 0 default. They may likewise need
403 * to update clock rates or word sizes from initial values. This function 441 * to update clock rates or word sizes from initial values. This function
404 * changes those settings, and must be called from a context that can sleep. 442 * changes those settings, and must be called from a context that can sleep.
443 * The changes take effect the next time the device is selected and data
444 * is transferred to or from it.
405 */ 445 */
406static inline int 446static inline int
407spi_setup(struct spi_device *spi) 447spi_setup(struct spi_device *spi)
@@ -468,15 +508,12 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len)
468{ 508{
469 struct spi_transfer t = { 509 struct spi_transfer t = {
470 .tx_buf = buf, 510 .tx_buf = buf,
471 .rx_buf = NULL,
472 .len = len, 511 .len = len,
473 .cs_change = 0,
474 };
475 struct spi_message m = {
476 .transfers = &t,
477 .n_transfer = 1,
478 }; 512 };
513 struct spi_message m;
479 514
515 spi_message_init(&m);
516 spi_message_add_tail(&t, &m);
480 return spi_sync(spi, &m); 517 return spi_sync(spi, &m);
481} 518}
482 519
@@ -493,16 +530,13 @@ static inline int
493spi_read(struct spi_device *spi, u8 *buf, size_t len) 530spi_read(struct spi_device *spi, u8 *buf, size_t len)
494{ 531{
495 struct spi_transfer t = { 532 struct spi_transfer t = {
496 .tx_buf = NULL,
497 .rx_buf = buf, 533 .rx_buf = buf,
498 .len = len, 534 .len = len,
499 .cs_change = 0,
500 };
501 struct spi_message m = {
502 .transfers = &t,
503 .n_transfer = 1,
504 }; 535 };
536 struct spi_message m;
505 537
538 spi_message_init(&m);
539 spi_message_add_tail(&t, &m);
506 return spi_sync(spi, &m); 540 return spi_sync(spi, &m);
507} 541}
508 542
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 8dfe61a445f4..c961fe9bf3eb 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -31,8 +31,15 @@ struct spi_bitbang {
31 struct spi_master *master; 31 struct spi_master *master;
32 32
33 void (*chipselect)(struct spi_device *spi, int is_on); 33 void (*chipselect)(struct spi_device *spi, int is_on);
34#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
35#define BITBANG_CS_INACTIVE 0
34 36
37 /* txrx_bufs() may handle dma mapping for transfers that don't
38 * already have one (transfer.{tx,rx}_dma is zero), or use PIO
39 */
35 int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); 40 int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
41
42 /* txrx_word[SPI_MODE_*]() just looks like a shift register */
36 u32 (*txrx_word[4])(struct spi_device *spi, 43 u32 (*txrx_word[4])(struct spi_device *spi,
37 unsigned nsecs, 44 unsigned nsecs,
38 u32 word, u8 bits); 45 u32 word, u8 bits);