diff options
-rw-r--r-- | Documentation/spi/spi-summary | 16 | ||||
-rw-r--r-- | drivers/spi/spi.c | 45 | ||||
-rw-r--r-- | include/linux/spi/spi.h | 75 |
3 files changed, 113 insertions, 23 deletions
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index c6152d1ff2b0..761debf748e9 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -363,6 +363,22 @@ upper boundaries might include sysfs (especially for sensor readings), | |||
363 | the input layer, ALSA, networking, MTD, the character device framework, | 363 | the input layer, ALSA, networking, MTD, the character device framework, |
364 | or other Linux subsystems. | 364 | or other Linux subsystems. |
365 | 365 | ||
366 | Note that there are two types of memory your driver must manage as part | ||
367 | of interacting with SPI devices. | ||
368 | |||
369 | - I/O buffers use the usual Linux rules, and must be DMA-safe. | ||
370 | You'd normally allocate them from the heap or free page pool. | ||
371 | Don't use the stack, or anything that's declared "static". | ||
372 | |||
373 | - The spi_message and spi_transfer metadata used to glue those | ||
374 | I/O buffers into a group of protocol transactions. These can | ||
375 | be allocated anywhere it's convenient, including as part of | ||
376 | other allocate-once driver data structures. Zero-init these. | ||
377 | |||
378 | If you like, spi_message_alloc() and spi_message_free() convenience | ||
379 | routines are available to allocate and zero-initialize an spi_message | ||
380 | with several transfers. | ||
381 | |||
366 | 382 | ||
367 | How do I write an "SPI Master Controller Driver"? | 383 | How do I write an "SPI Master Controller Driver"? |
368 | ------------------------------------------------- | 384 | ------------------------------------------------- |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2ecb86cb3689..3ecedccdb96c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -38,7 +38,7 @@ static void spidev_release(struct device *dev) | |||
38 | if (spi->master->cleanup) | 38 | if (spi->master->cleanup) |
39 | spi->master->cleanup(spi); | 39 | spi->master->cleanup(spi); |
40 | 40 | ||
41 | class_device_put(&spi->master->cdev); | 41 | spi_master_put(spi->master); |
42 | kfree(dev); | 42 | kfree(dev); |
43 | } | 43 | } |
44 | 44 | ||
@@ -90,7 +90,7 @@ static int spi_suspend(struct device *dev, pm_message_t message) | |||
90 | int value; | 90 | int value; |
91 | struct spi_driver *drv = to_spi_driver(dev->driver); | 91 | struct spi_driver *drv = to_spi_driver(dev->driver); |
92 | 92 | ||
93 | if (!drv || !drv->suspend) | 93 | if (!drv->suspend) |
94 | return 0; | 94 | return 0; |
95 | 95 | ||
96 | /* suspend will stop irqs and dma; no more i/o */ | 96 | /* suspend will stop irqs and dma; no more i/o */ |
@@ -105,7 +105,7 @@ static int spi_resume(struct device *dev) | |||
105 | int value; | 105 | int value; |
106 | struct spi_driver *drv = to_spi_driver(dev->driver); | 106 | struct spi_driver *drv = to_spi_driver(dev->driver); |
107 | 107 | ||
108 | if (!drv || !drv->resume) | 108 | if (!drv->resume) |
109 | return 0; | 109 | return 0; |
110 | 110 | ||
111 | /* resume may restart the i/o queue */ | 111 | /* resume may restart the i/o queue */ |
@@ -198,7 +198,7 @@ spi_new_device(struct spi_master *master, struct spi_board_info *chip) | |||
198 | 198 | ||
199 | /* NOTE: caller did any chip->bus_num checks necessary */ | 199 | /* NOTE: caller did any chip->bus_num checks necessary */ |
200 | 200 | ||
201 | if (!class_device_get(&master->cdev)) | 201 | if (!spi_master_get(master)) |
202 | return NULL; | 202 | return NULL; |
203 | 203 | ||
204 | proxy = kzalloc(sizeof *proxy, GFP_KERNEL); | 204 | proxy = kzalloc(sizeof *proxy, GFP_KERNEL); |
@@ -244,7 +244,7 @@ spi_new_device(struct spi_master *master, struct spi_board_info *chip) | |||
244 | return proxy; | 244 | return proxy; |
245 | 245 | ||
246 | fail: | 246 | fail: |
247 | class_device_put(&master->cdev); | 247 | spi_master_put(master); |
248 | kfree(proxy); | 248 | kfree(proxy); |
249 | return NULL; | 249 | return NULL; |
250 | } | 250 | } |
@@ -324,8 +324,6 @@ static void spi_master_release(struct class_device *cdev) | |||
324 | struct spi_master *master; | 324 | struct spi_master *master; |
325 | 325 | ||
326 | master = container_of(cdev, struct spi_master, cdev); | 326 | master = container_of(cdev, struct spi_master, cdev); |
327 | put_device(master->cdev.dev); | ||
328 | master->cdev.dev = NULL; | ||
329 | kfree(master); | 327 | kfree(master); |
330 | } | 328 | } |
331 | 329 | ||
@@ -339,8 +337,9 @@ static struct class spi_master_class = { | |||
339 | /** | 337 | /** |
340 | * spi_alloc_master - allocate SPI master controller | 338 | * spi_alloc_master - allocate SPI master controller |
341 | * @dev: the controller, possibly using the platform_bus | 339 | * @dev: the controller, possibly using the platform_bus |
342 | * @size: how much driver-private data to preallocate; a pointer to this | 340 | * @size: how much driver-private data to preallocate; the pointer to this |
343 | * memory in the class_data field of the returned class_device | 341 | * memory is in the class_data field of the returned class_device, |
342 | * accessible with spi_master_get_devdata(). | ||
344 | * | 343 | * |
345 | * This call is used only by SPI master controller drivers, which are the | 344 | * This call is used only by SPI master controller drivers, which are the |
346 | * only ones directly touching chip registers. It's how they allocate | 345 | * only ones directly touching chip registers. It's how they allocate |
@@ -350,14 +349,17 @@ static struct class spi_master_class = { | |||
350 | * master structure on success, else NULL. | 349 | * master structure on success, else NULL. |
351 | * | 350 | * |
352 | * The caller is responsible for assigning the bus number and initializing | 351 | * The caller is responsible for assigning the bus number and initializing |
353 | * the master's methods before calling spi_add_master(), or else (on error) | 352 | * the master's methods before calling spi_add_master(); and (after errors |
354 | * calling class_device_put() to prevent a memory leak. | 353 | * adding the device) calling spi_master_put() to prevent a memory leak. |
355 | */ | 354 | */ |
356 | struct spi_master * __init_or_module | 355 | struct spi_master * __init_or_module |
357 | spi_alloc_master(struct device *dev, unsigned size) | 356 | spi_alloc_master(struct device *dev, unsigned size) |
358 | { | 357 | { |
359 | struct spi_master *master; | 358 | struct spi_master *master; |
360 | 359 | ||
360 | if (!dev) | ||
361 | return NULL; | ||
362 | |||
361 | master = kzalloc(size + sizeof *master, SLAB_KERNEL); | 363 | master = kzalloc(size + sizeof *master, SLAB_KERNEL); |
362 | if (!master) | 364 | if (!master) |
363 | return NULL; | 365 | return NULL; |
@@ -365,7 +367,7 @@ spi_alloc_master(struct device *dev, unsigned size) | |||
365 | class_device_initialize(&master->cdev); | 367 | class_device_initialize(&master->cdev); |
366 | master->cdev.class = &spi_master_class; | 368 | master->cdev.class = &spi_master_class; |
367 | master->cdev.dev = get_device(dev); | 369 | master->cdev.dev = get_device(dev); |
368 | class_set_devdata(&master->cdev, &master[1]); | 370 | spi_master_set_devdata(master, &master[1]); |
369 | 371 | ||
370 | return master; | 372 | return master; |
371 | } | 373 | } |
@@ -387,6 +389,8 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); | |||
387 | * | 389 | * |
388 | * This must be called from context that can sleep. It returns zero on | 390 | * This must be called from context that can sleep. It returns zero on |
389 | * success, else a negative error code (dropping the master's refcount). | 391 | * success, else a negative error code (dropping the master's refcount). |
392 | * After a successful return, the caller is responsible for calling | ||
393 | * spi_unregister_master(). | ||
390 | */ | 394 | */ |
391 | int __init_or_module | 395 | int __init_or_module |
392 | spi_register_master(struct spi_master *master) | 396 | spi_register_master(struct spi_master *master) |
@@ -396,6 +400,9 @@ spi_register_master(struct spi_master *master) | |||
396 | int status = -ENODEV; | 400 | int status = -ENODEV; |
397 | int dynamic = 0; | 401 | int dynamic = 0; |
398 | 402 | ||
403 | if (!dev) | ||
404 | return -ENODEV; | ||
405 | |||
399 | /* convention: dynamically assigned bus IDs count down from the max */ | 406 | /* convention: dynamically assigned bus IDs count down from the max */ |
400 | if (master->bus_num == 0) { | 407 | if (master->bus_num == 0) { |
401 | master->bus_num = atomic_dec_return(&dyn_bus_id); | 408 | master->bus_num = atomic_dec_return(&dyn_bus_id); |
@@ -425,7 +432,7 @@ EXPORT_SYMBOL_GPL(spi_register_master); | |||
425 | static int __unregister(struct device *dev, void *unused) | 432 | static int __unregister(struct device *dev, void *unused) |
426 | { | 433 | { |
427 | /* note: before about 2.6.14-rc1 this would corrupt memory: */ | 434 | /* note: before about 2.6.14-rc1 this would corrupt memory: */ |
428 | device_unregister(dev); | 435 | spi_unregister_device(to_spi_device(dev)); |
429 | return 0; | 436 | return 0; |
430 | } | 437 | } |
431 | 438 | ||
@@ -440,8 +447,9 @@ static int __unregister(struct device *dev, void *unused) | |||
440 | */ | 447 | */ |
441 | void spi_unregister_master(struct spi_master *master) | 448 | void spi_unregister_master(struct spi_master *master) |
442 | { | 449 | { |
443 | class_device_unregister(&master->cdev); | ||
444 | (void) device_for_each_child(master->cdev.dev, NULL, __unregister); | 450 | (void) device_for_each_child(master->cdev.dev, NULL, __unregister); |
451 | class_device_unregister(&master->cdev); | ||
452 | master->cdev.dev = NULL; | ||
445 | } | 453 | } |
446 | EXPORT_SYMBOL_GPL(spi_unregister_master); | 454 | EXPORT_SYMBOL_GPL(spi_unregister_master); |
447 | 455 | ||
@@ -487,6 +495,9 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); | |||
487 | * by leaving it selected in anticipation that the next message will go | 495 | * by leaving it selected in anticipation that the next message will go |
488 | * to the same chip. (That may increase power usage.) | 496 | * to the same chip. (That may increase power usage.) |
489 | * | 497 | * |
498 | * Also, the caller is guaranteeing that the memory associated with the | ||
499 | * message will not be freed before this call returns. | ||
500 | * | ||
490 | * The return value is a negative error code if the message could not be | 501 | * The return value is a negative error code if the message could not be |
491 | * submitted, else zero. When the value is zero, then message->status is | 502 | * submitted, else zero. When the value is zero, then message->status is |
492 | * also defined: it's the completion code for the transfer, either zero | 503 | * also defined: it's the completion code for the transfer, either zero |
@@ -524,9 +535,9 @@ static u8 *buf; | |||
524 | * is zero for success, else a negative errno status code. | 535 | * is zero for success, else a negative errno status code. |
525 | * This call may only be used from a context that may sleep. | 536 | * This call may only be used from a context that may sleep. |
526 | * | 537 | * |
527 | * Parameters to this routine are always copied using a small buffer, | 538 | * Parameters to this routine are always copied using a small buffer; |
528 | * large transfers should use use spi_{async,sync}() calls with | 539 | * performance-sensitive or bulk transfer code should instead use |
529 | * dma-safe buffers. | 540 | * spi_{async,sync}() calls with dma-safe buffers. |
530 | */ | 541 | */ |
531 | int spi_write_then_read(struct spi_device *spi, | 542 | int spi_write_then_read(struct spi_device *spi, |
532 | const u8 *txbuf, unsigned n_tx, | 543 | const u8 *txbuf, unsigned n_tx, |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c851b3d13208..6a41e2650b2e 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -60,8 +60,8 @@ struct spi_device { | |||
60 | u8 mode; | 60 | u8 mode; |
61 | #define SPI_CPHA 0x01 /* clock phase */ | 61 | #define SPI_CPHA 0x01 /* clock phase */ |
62 | #define SPI_CPOL 0x02 /* clock polarity */ | 62 | #define SPI_CPOL 0x02 /* clock polarity */ |
63 | #define SPI_MODE_0 (0|0) | 63 | #define SPI_MODE_0 (0|0) /* (original MicroWire) */ |
64 | #define SPI_MODE_1 (0|SPI_CPHA) /* (original MicroWire) */ | 64 | #define SPI_MODE_1 (0|SPI_CPHA) |
65 | #define SPI_MODE_2 (SPI_CPOL|0) | 65 | #define SPI_MODE_2 (SPI_CPOL|0) |
66 | #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) | 66 | #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) |
67 | #define SPI_CS_HIGH 0x04 /* chipselect active high? */ | 67 | #define SPI_CS_HIGH 0x04 /* chipselect active high? */ |
@@ -209,6 +209,30 @@ struct spi_master { | |||
209 | void (*cleanup)(const struct spi_device *spi); | 209 | void (*cleanup)(const struct spi_device *spi); |
210 | }; | 210 | }; |
211 | 211 | ||
212 | static inline void *spi_master_get_devdata(struct spi_master *master) | ||
213 | { | ||
214 | return class_get_devdata(&master->cdev); | ||
215 | } | ||
216 | |||
217 | static inline void spi_master_set_devdata(struct spi_master *master, void *data) | ||
218 | { | ||
219 | class_set_devdata(&master->cdev, data); | ||
220 | } | ||
221 | |||
222 | static inline struct spi_master *spi_master_get(struct spi_master *master) | ||
223 | { | ||
224 | if (!master || !class_device_get(&master->cdev)) | ||
225 | return NULL; | ||
226 | return master; | ||
227 | } | ||
228 | |||
229 | static inline void spi_master_put(struct spi_master *master) | ||
230 | { | ||
231 | if (master) | ||
232 | class_device_put(&master->cdev); | ||
233 | } | ||
234 | |||
235 | |||
212 | /* the spi driver core manages memory for the spi_master classdev */ | 236 | /* the spi driver core manages memory for the spi_master classdev */ |
213 | extern struct spi_master * | 237 | extern struct spi_master * |
214 | spi_alloc_master(struct device *host, unsigned size); | 238 | spi_alloc_master(struct device *host, unsigned size); |
@@ -271,11 +295,17 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); | |||
271 | * stay selected until the next transfer. This is purely a performance | 295 | * stay selected until the next transfer. This is purely a performance |
272 | * hint; the controller driver may need to select a different device | 296 | * hint; the controller driver may need to select a different device |
273 | * for the next message. | 297 | * for the next message. |
298 | * | ||
299 | * The code that submits an spi_message (and its spi_transfers) | ||
300 | * to the lower layers is responsible for managing its memory. | ||
301 | * Zero-initialize every field you don't set up explicitly, to | ||
302 | * insulate against future API updates. | ||
274 | */ | 303 | */ |
275 | struct spi_transfer { | 304 | struct spi_transfer { |
276 | /* it's ok if tx_buf == rx_buf (right?) | 305 | /* it's ok if tx_buf == rx_buf (right?) |
277 | * for MicroWire, one buffer must be null | 306 | * for MicroWire, one buffer must be null |
278 | * buffers must work with dma_*map_single() calls | 307 | * buffers must work with dma_*map_single() calls, unless |
308 | * spi_message.is_dma_mapped reports a pre-existing mapping | ||
279 | */ | 309 | */ |
280 | const void *tx_buf; | 310 | const void *tx_buf; |
281 | void *rx_buf; | 311 | void *rx_buf; |
@@ -302,6 +332,11 @@ struct spi_transfer { | |||
302 | * @status: zero for success, else negative errno | 332 | * @status: zero for success, else negative errno |
303 | * @queue: for use by whichever driver currently owns the message | 333 | * @queue: for use by whichever driver currently owns the message |
304 | * @state: for use by whichever driver currently owns the message | 334 | * @state: for use by whichever driver currently owns the message |
335 | * | ||
336 | * The code that submits an spi_message (and its spi_transfers) | ||
337 | * to the lower layers is responsible for managing its memory. | ||
338 | * Zero-initialize every field you don't set up explicitly, to | ||
339 | * insulate against future API updates. | ||
305 | */ | 340 | */ |
306 | struct spi_message { | 341 | struct spi_message { |
307 | struct spi_transfer *transfers; | 342 | struct spi_transfer *transfers; |
@@ -336,6 +371,29 @@ struct spi_message { | |||
336 | void *state; | 371 | void *state; |
337 | }; | 372 | }; |
338 | 373 | ||
374 | /* It's fine to embed message and transaction structures in other data | ||
375 | * structures so long as you don't free them while they're in use. | ||
376 | */ | ||
377 | |||
378 | static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) | ||
379 | { | ||
380 | struct spi_message *m; | ||
381 | |||
382 | m = kzalloc(sizeof(struct spi_message) | ||
383 | + ntrans * sizeof(struct spi_transfer), | ||
384 | flags); | ||
385 | if (m) { | ||
386 | m->transfers = (void *)(m + 1); | ||
387 | m->n_transfer = ntrans; | ||
388 | } | ||
389 | return m; | ||
390 | } | ||
391 | |||
392 | static inline void spi_message_free(struct spi_message *m) | ||
393 | { | ||
394 | kfree(m); | ||
395 | } | ||
396 | |||
339 | /** | 397 | /** |
340 | * spi_setup -- setup SPI mode and clock rate | 398 | * spi_setup -- setup SPI mode and clock rate |
341 | * @spi: the device whose settings are being modified | 399 | * @spi: the device whose settings are being modified |
@@ -363,7 +421,10 @@ spi_setup(struct spi_device *spi) | |||
363 | * The completion callback is invoked in a context which can't sleep. | 421 | * The completion callback is invoked in a context which can't sleep. |
364 | * Before that invocation, the value of message->status is undefined. | 422 | * Before that invocation, the value of message->status is undefined. |
365 | * When the callback is issued, message->status holds either zero (to | 423 | * When the callback is issued, message->status holds either zero (to |
366 | * indicate complete success) or a negative error code. | 424 | * indicate complete success) or a negative error code. After that |
425 | * callback returns, the driver which issued the transfer request may | ||
426 | * deallocate the associated memory; it's no longer in use by any SPI | ||
427 | * core or controller driver code. | ||
367 | * | 428 | * |
368 | * Note that although all messages to a spi_device are handled in | 429 | * Note that although all messages to a spi_device are handled in |
369 | * FIFO order, messages may go to different devices in other orders. | 430 | * FIFO order, messages may go to different devices in other orders. |
@@ -445,6 +506,7 @@ spi_read(struct spi_device *spi, u8 *buf, size_t len) | |||
445 | return spi_sync(spi, &m); | 506 | return spi_sync(spi, &m); |
446 | } | 507 | } |
447 | 508 | ||
509 | /* this copies txbuf and rxbuf data; for small transfers only! */ | ||
448 | extern int spi_write_then_read(struct spi_device *spi, | 510 | extern int spi_write_then_read(struct spi_device *spi, |
449 | const u8 *txbuf, unsigned n_tx, | 511 | const u8 *txbuf, unsigned n_tx, |
450 | u8 *rxbuf, unsigned n_rx); | 512 | u8 *rxbuf, unsigned n_rx); |
@@ -555,8 +617,9 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) | |||
555 | 617 | ||
556 | 618 | ||
557 | /* If you're hotplugging an adapter with devices (parport, usb, etc) | 619 | /* If you're hotplugging an adapter with devices (parport, usb, etc) |
558 | * use spi_new_device() to describe each device. You would then call | 620 | * use spi_new_device() to describe each device. You can also call |
559 | * spi_unregister_device() to start making that device vanish. | 621 | * spi_unregister_device() to start making that device vanish, but |
622 | * normally that would be handled by spi_unregister_master(). | ||
560 | */ | 623 | */ |
561 | extern struct spi_device * | 624 | extern struct spi_device * |
562 | spi_new_device(struct spi_master *, struct spi_board_info *); | 625 | spi_new_device(struct spi_master *, struct spi_board_info *); |