Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[deliverable/linux.git] / include / linux / spi / spi.h
1 /*
2 * Copyright (C) 2005 David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 #ifndef __LINUX_SPI_H
20 #define __LINUX_SPI_H
21
22 /*
23 * INTERFACES between SPI master-side drivers and SPI infrastructure.
24 * (There's no SPI slave support for Linux yet...)
25 */
26 extern struct bus_type spi_bus_type;
27
28 /**
29 * struct spi_device - Master side proxy for an SPI slave device
30 * @dev: Driver model representation of the device.
31 * @master: SPI controller used with the device.
32 * @max_speed_hz: Maximum clock rate to be used with this chip
33 * (on this board); may be changed by the device's driver.
34 * @chip-select: Chipselect, distinguishing chips handled by "master".
35 * @mode: The spi mode defines how data is clocked out and in.
36 * This may be changed by the device's driver.
37 * @bits_per_word: Data transfers involve one or more words; word sizes
38 * like eight or 12 bits are common. In-memory wordsizes are
39 * powers of two bytes (e.g. 20 bit samples use 32 bits).
40 * This may be changed by the device's driver.
41 * @irq: Negative, or the number passed to request_irq() to receive
42 * interrupts from this device.
43 * @controller_state: Controller's runtime state
44 * @controller_data: Board-specific definitions for controller, such as
45 * FIFO initialization parameters; from board_info.controller_data
46 *
47 * An spi_device is used to interchange data between an SPI slave
48 * (usually a discrete chip) and CPU memory.
49 *
50 * In "dev", the platform_data is used to hold information about this
51 * device that's meaningful to the device's protocol driver, but not
52 * to its controller. One example might be an identifier for a chip
53 * variant with slightly different functionality.
54 */
55 struct spi_device {
56 struct device dev;
57 struct spi_master *master;
58 u32 max_speed_hz;
59 u8 chip_select;
60 u8 mode;
61 #define SPI_CPHA 0x01 /* clock phase */
62 #define SPI_CPOL 0x02 /* clock polarity */
63 #define SPI_MODE_0 (0|0) /* (original MicroWire) */
64 #define SPI_MODE_1 (0|SPI_CPHA)
65 #define SPI_MODE_2 (SPI_CPOL|0)
66 #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
67 #define SPI_CS_HIGH 0x04 /* chipselect active high? */
68 u8 bits_per_word;
69 int irq;
70 void *controller_state;
71 void *controller_data;
72 const char *modalias;
73
74 // likely need more hooks for more protocol options affecting how
75 // the controller talks to each chip, like:
76 // - bit order (default is wordwise msb-first)
77 // - memory packing (12 bit samples into low bits, others zeroed)
78 // - priority
79 // - drop chipselect after each word
80 // - chipselect delays
81 // - ...
82 };
83
84 static inline struct spi_device *to_spi_device(struct device *dev)
85 {
86 return dev ? container_of(dev, struct spi_device, dev) : NULL;
87 }
88
89 /* most drivers won't need to care about device refcounting */
90 static inline struct spi_device *spi_dev_get(struct spi_device *spi)
91 {
92 return (spi && get_device(&spi->dev)) ? spi : NULL;
93 }
94
95 static inline void spi_dev_put(struct spi_device *spi)
96 {
97 if (spi)
98 put_device(&spi->dev);
99 }
100
101 /* ctldata is for the bus_master driver's runtime state */
102 static inline void *spi_get_ctldata(struct spi_device *spi)
103 {
104 return spi->controller_state;
105 }
106
107 static inline void spi_set_ctldata(struct spi_device *spi, void *state)
108 {
109 spi->controller_state = state;
110 }
111
112
113 struct spi_message;
114
115
116
117 struct spi_driver {
118 int (*probe)(struct spi_device *spi);
119 int (*remove)(struct spi_device *spi);
120 void (*shutdown)(struct spi_device *spi);
121 int (*suspend)(struct spi_device *spi, pm_message_t mesg);
122 int (*resume)(struct spi_device *spi);
123 struct device_driver driver;
124 };
125
126 static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
127 {
128 return drv ? container_of(drv, struct spi_driver, driver) : NULL;
129 }
130
131 extern int spi_register_driver(struct spi_driver *sdrv);
132
133 static inline void spi_unregister_driver(struct spi_driver *sdrv)
134 {
135 if (!sdrv)
136 return;
137 driver_unregister(&sdrv->driver);
138 }
139
140
141
142 /**
143 * struct spi_master - interface to SPI master controller
144 * @cdev: class interface to this driver
145 * @bus_num: board-specific (and often SOC-specific) identifier for a
146 * given SPI controller.
147 * @num_chipselect: chipselects are used to distinguish individual
148 * SPI slaves, and are numbered from zero to num_chipselects.
149 * each slave has a chipselect signal, but it's common that not
150 * every chipselect is connected to a slave.
151 * @setup: updates the device mode and clocking records used by a
152 * device's SPI controller; protocol code may call this.
153 * @transfer: adds a message to the controller's transfer queue.
154 * @cleanup: frees controller-specific state
155 *
156 * Each SPI master controller can communicate with one or more spi_device
157 * children. These make a small bus, sharing MOSI, MISO and SCK signals
158 * but not chip select signals. Each device may be configured to use a
159 * different clock rate, since those shared signals are ignored unless
160 * the chip is selected.
161 *
162 * The driver for an SPI controller manages access to those devices through
163 * a queue of spi_message transactions, copyin data between CPU memory and
164 * an SPI slave device). For each such message it queues, it calls the
165 * message's completion function when the transaction completes.
166 */
167 struct spi_master {
168 struct class_device cdev;
169
170 /* other than zero (== assign one dynamically), bus_num is fully
171 * board-specific. usually that simplifies to being SOC-specific.
172 * example: one SOC has three SPI controllers, numbered 1..3,
173 * and one board's schematics might show it using SPI-2. software
174 * would normally use bus_num=2 for that controller.
175 */
176 u16 bus_num;
177
178 /* chipselects will be integral to many controllers; some others
179 * might use board-specific GPIOs.
180 */
181 u16 num_chipselect;
182
183 /* setup mode and clock, etc (spi driver may call many times) */
184 int (*setup)(struct spi_device *spi);
185
186 /* bidirectional bulk transfers
187 *
188 * + The transfer() method may not sleep; its main role is
189 * just to add the message to the queue.
190 * + For now there's no remove-from-queue operation, or
191 * any other request management
192 * + To a given spi_device, message queueing is pure fifo
193 *
194 * + The master's main job is to process its message queue,
195 * selecting a chip then transferring data
196 * + If there are multiple spi_device children, the i/o queue
197 * arbitration algorithm is unspecified (round robin, fifo,
198 * priority, reservations, preemption, etc)
199 *
200 * + Chipselect stays active during the entire message
201 * (unless modified by spi_transfer.cs_change != 0).
202 * + The message transfers use clock and SPI mode parameters
203 * previously established by setup() for this device
204 */
205 int (*transfer)(struct spi_device *spi,
206 struct spi_message *mesg);
207
208 /* called on release() to free memory provided by spi_master */
209 void (*cleanup)(const struct spi_device *spi);
210 };
211
212 static inline void *spi_master_get_devdata(struct spi_master *master)
213 {
214 return class_get_devdata(&master->cdev);
215 }
216
217 static inline void spi_master_set_devdata(struct spi_master *master, void *data)
218 {
219 class_set_devdata(&master->cdev, data);
220 }
221
222 static inline struct spi_master *spi_master_get(struct spi_master *master)
223 {
224 if (!master || !class_device_get(&master->cdev))
225 return NULL;
226 return master;
227 }
228
229 static inline void spi_master_put(struct spi_master *master)
230 {
231 if (master)
232 class_device_put(&master->cdev);
233 }
234
235
236 /* the spi driver core manages memory for the spi_master classdev */
237 extern struct spi_master *
238 spi_alloc_master(struct device *host, unsigned size);
239
240 extern int spi_register_master(struct spi_master *master);
241 extern void spi_unregister_master(struct spi_master *master);
242
243 extern struct spi_master *spi_busnum_to_master(u16 busnum);
244
245 /*---------------------------------------------------------------------------*/
246
247 /*
248 * I/O INTERFACE between SPI controller and protocol drivers
249 *
250 * Protocol drivers use a queue of spi_messages, each transferring data
251 * between the controller and memory buffers.
252 *
253 * The spi_messages themselves consist of a series of read+write transfer
254 * segments. Those segments always read the same number of bits as they
255 * write; but one or the other is easily ignored by passing a null buffer
256 * pointer. (This is unlike most types of I/O API, because SPI hardware
257 * is full duplex.)
258 *
259 * NOTE: Allocation of spi_transfer and spi_message memory is entirely
260 * up to the protocol driver, which guarantees the integrity of both (as
261 * well as the data buffers) for as long as the message is queued.
262 */
263
264 /**
265 * struct spi_transfer - a read/write buffer pair
266 * @tx_buf: data to be written (dma-safe memory), or NULL
267 * @rx_buf: data to be read (dma-safe memory), or NULL
268 * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
269 * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
270 * @len: size of rx and tx buffers (in bytes)
271 * @cs_change: affects chipselect after this transfer completes
272 * @delay_usecs: microseconds to delay after this transfer before
273 * (optionally) changing the chipselect status, then starting
274 * the next transfer or completing this spi_message.
275 * @transfer_list: transfers are sequenced through spi_message.transfers
276 *
277 * SPI transfers always write the same number of bytes as they read.
278 * Protocol drivers should always provide rx_buf and/or tx_buf.
279 * In some cases, they may also want to provide DMA addresses for
280 * the data being transferred; that may reduce overhead, when the
281 * underlying driver uses dma.
282 *
283 * If the transmit buffer is null, undefined data will be shifted out
284 * while filling rx_buf. If the receive buffer is null, the data
285 * shifted in will be discarded. Only "len" bytes shift out (or in).
286 * It's an error to try to shift out a partial word. (For example, by
287 * shifting out three bytes with word size of sixteen or twenty bits;
288 * the former uses two bytes per word, the latter uses four bytes.)
289 *
290 * All SPI transfers start with the relevant chipselect active. Normally
291 * it stays selected until after the last transfer in a message. Drivers
292 * can affect the chipselect signal using cs_change:
293 *
294 * (i) If the transfer isn't the last one in the message, this flag is
295 * used to make the chipselect briefly go inactive in the middle of the
296 * message. Toggling chipselect in this way may be needed to terminate
297 * a chip command, letting a single spi_message perform all of group of
298 * chip transactions together.
299 *
300 * (ii) When the transfer is the last one in the message, the chip may
301 * stay selected until the next transfer. This is purely a performance
302 * hint; the controller driver may need to select a different device
303 * for the next message.
304 *
305 * The code that submits an spi_message (and its spi_transfers)
306 * to the lower layers is responsible for managing its memory.
307 * Zero-initialize every field you don't set up explicitly, to
308 * insulate against future API updates. After you submit a message
309 * and its transfers, ignore them until its completion callback.
310 */
311 struct spi_transfer {
312 /* it's ok if tx_buf == rx_buf (right?)
313 * for MicroWire, one buffer must be null
314 * buffers must work with dma_*map_single() calls, unless
315 * spi_message.is_dma_mapped reports a pre-existing mapping
316 */
317 const void *tx_buf;
318 void *rx_buf;
319 unsigned len;
320
321 dma_addr_t tx_dma;
322 dma_addr_t rx_dma;
323
324 unsigned cs_change:1;
325 u16 delay_usecs;
326
327 struct list_head transfer_list;
328 };
329
330 /**
331 * struct spi_message - one multi-segment SPI transaction
332 * @transfers: list of transfer segments in this transaction
333 * @spi: SPI device to which the transaction is queued
334 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
335 * addresses for each transfer buffer
336 * @complete: called to report transaction completions
337 * @context: the argument to complete() when it's called
338 * @actual_length: the total number of bytes that were transferred in all
339 * successful segments
340 * @status: zero for success, else negative errno
341 * @queue: for use by whichever driver currently owns the message
342 * @state: for use by whichever driver currently owns the message
343 *
344 * An spi_message is used to execute an atomic sequence of data transfers,
345 * each represented by a struct spi_transfer. The sequence is "atomic"
346 * in the sense that no other spi_message may use that SPI bus until that
347 * sequence completes. On some systems, many such sequences can execute as
348 * as single programmed DMA transfer. On all systems, these messages are
349 * queued, and might complete after transactions to other devices. Messages
350 * sent to a given spi_device are alway executed in FIFO order.
351 *
352 * The code that submits an spi_message (and its spi_transfers)
353 * to the lower layers is responsible for managing its memory.
354 * Zero-initialize every field you don't set up explicitly, to
355 * insulate against future API updates. After you submit a message
356 * and its transfers, ignore them until its completion callback.
357 */
358 struct spi_message {
359 struct list_head transfers;
360
361 struct spi_device *spi;
362
363 unsigned is_dma_mapped:1;
364
365 /* REVISIT: we might want a flag affecting the behavior of the
366 * last transfer ... allowing things like "read 16 bit length L"
367 * immediately followed by "read L bytes". Basically imposing
368 * a specific message scheduling algorithm.
369 *
370 * Some controller drivers (message-at-a-time queue processing)
371 * could provide that as their default scheduling algorithm. But
372 * others (with multi-message pipelines) could need a flag to
373 * tell them about such special cases.
374 */
375
376 /* completion is reported through a callback */
377 void (*complete)(void *context);
378 void *context;
379 unsigned actual_length;
380 int status;
381
382 /* for optional use by whatever driver currently owns the
383 * spi_message ... between calls to spi_async and then later
384 * complete(), that's the spi_master controller driver.
385 */
386 struct list_head queue;
387 void *state;
388 };
389
390 static inline void spi_message_init(struct spi_message *m)
391 {
392 memset(m, 0, sizeof *m);
393 INIT_LIST_HEAD(&m->transfers);
394 }
395
396 static inline void
397 spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
398 {
399 list_add_tail(&t->transfer_list, &m->transfers);
400 }
401
402 static inline void
403 spi_transfer_del(struct spi_transfer *t)
404 {
405 list_del(&t->transfer_list);
406 }
407
408 /* It's fine to embed message and transaction structures in other data
409 * structures so long as you don't free them while they're in use.
410 */
411
412 static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
413 {
414 struct spi_message *m;
415
416 m = kzalloc(sizeof(struct spi_message)
417 + ntrans * sizeof(struct spi_transfer),
418 flags);
419 if (m) {
420 int i;
421 struct spi_transfer *t = (struct spi_transfer *)(m + 1);
422
423 INIT_LIST_HEAD(&m->transfers);
424 for (i = 0; i < ntrans; i++, t++)
425 spi_message_add_tail(t, m);
426 }
427 return m;
428 }
429
430 static inline void spi_message_free(struct spi_message *m)
431 {
432 kfree(m);
433 }
434
435 /**
436 * spi_setup -- setup SPI mode and clock rate
437 * @spi: the device whose settings are being modified
438 *
439 * SPI protocol drivers may need to update the transfer mode if the
440 * device doesn't work with the mode 0 default. They may likewise need
441 * to update clock rates or word sizes from initial values. This function
442 * changes those settings, and must be called from a context that can sleep.
443 * The changes take effect the next time the device is selected and data
444 * is transferred to or from it.
445 */
446 static inline int
447 spi_setup(struct spi_device *spi)
448 {
449 return spi->master->setup(spi);
450 }
451
452
453 /**
454 * spi_async -- asynchronous SPI transfer
455 * @spi: device with which data will be exchanged
456 * @message: describes the data transfers, including completion callback
457 *
458 * This call may be used in_irq and other contexts which can't sleep,
459 * as well as from task contexts which can sleep.
460 *
461 * The completion callback is invoked in a context which can't sleep.
462 * Before that invocation, the value of message->status is undefined.
463 * When the callback is issued, message->status holds either zero (to
464 * indicate complete success) or a negative error code. After that
465 * callback returns, the driver which issued the transfer request may
466 * deallocate the associated memory; it's no longer in use by any SPI
467 * core or controller driver code.
468 *
469 * Note that although all messages to a spi_device are handled in
470 * FIFO order, messages may go to different devices in other orders.
471 * Some device might be higher priority, or have various "hard" access
472 * time requirements, for example.
473 *
474 * On detection of any fault during the transfer, processing of
475 * the entire message is aborted, and the device is deselected.
476 * Until returning from the associated message completion callback,
477 * no other spi_message queued to that device will be processed.
478 * (This rule applies equally to all the synchronous transfer calls,
479 * which are wrappers around this core asynchronous primitive.)
480 */
481 static inline int
482 spi_async(struct spi_device *spi, struct spi_message *message)
483 {
484 message->spi = spi;
485 return spi->master->transfer(spi, message);
486 }
487
488 /*---------------------------------------------------------------------------*/
489
490 /* All these synchronous SPI transfer routines are utilities layered
491 * over the core async transfer primitive. Here, "synchronous" means
492 * they will sleep uninterruptibly until the async transfer completes.
493 */
494
495 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
496
497 /**
498 * spi_write - SPI synchronous write
499 * @spi: device to which data will be written
500 * @buf: data buffer
501 * @len: data buffer size
502 *
503 * This writes the buffer and returns zero or a negative error code.
504 * Callable only from contexts that can sleep.
505 */
506 static inline int
507 spi_write(struct spi_device *spi, const u8 *buf, size_t len)
508 {
509 struct spi_transfer t = {
510 .tx_buf = buf,
511 .len = len,
512 };
513 struct spi_message m;
514
515 spi_message_init(&m);
516 spi_message_add_tail(&t, &m);
517 return spi_sync(spi, &m);
518 }
519
520 /**
521 * spi_read - SPI synchronous read
522 * @spi: device from which data will be read
523 * @buf: data buffer
524 * @len: data buffer size
525 *
526 * This writes the buffer and returns zero or a negative error code.
527 * Callable only from contexts that can sleep.
528 */
529 static inline int
530 spi_read(struct spi_device *spi, u8 *buf, size_t len)
531 {
532 struct spi_transfer t = {
533 .rx_buf = buf,
534 .len = len,
535 };
536 struct spi_message m;
537
538 spi_message_init(&m);
539 spi_message_add_tail(&t, &m);
540 return spi_sync(spi, &m);
541 }
542
543 /* this copies txbuf and rxbuf data; for small transfers only! */
544 extern int spi_write_then_read(struct spi_device *spi,
545 const u8 *txbuf, unsigned n_tx,
546 u8 *rxbuf, unsigned n_rx);
547
548 /**
549 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
550 * @spi: device with which data will be exchanged
551 * @cmd: command to be written before data is read back
552 *
553 * This returns the (unsigned) eight bit number returned by the
554 * device, or else a negative error code. Callable only from
555 * contexts that can sleep.
556 */
557 static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
558 {
559 ssize_t status;
560 u8 result;
561
562 status = spi_write_then_read(spi, &cmd, 1, &result, 1);
563
564 /* return negative errno or unsigned value */
565 return (status < 0) ? status : result;
566 }
567
568 /**
569 * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
570 * @spi: device with which data will be exchanged
571 * @cmd: command to be written before data is read back
572 *
573 * This returns the (unsigned) sixteen bit number returned by the
574 * device, or else a negative error code. Callable only from
575 * contexts that can sleep.
576 *
577 * The number is returned in wire-order, which is at least sometimes
578 * big-endian.
579 */
580 static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
581 {
582 ssize_t status;
583 u16 result;
584
585 status = spi_write_then_read(spi, &cmd, 1, (u8 *) &result, 2);
586
587 /* return negative errno or unsigned value */
588 return (status < 0) ? status : result;
589 }
590
591 /*---------------------------------------------------------------------------*/
592
593 /*
594 * INTERFACE between board init code and SPI infrastructure.
595 *
596 * No SPI driver ever sees these SPI device table segments, but
597 * it's how the SPI core (or adapters that get hotplugged) grows
598 * the driver model tree.
599 *
600 * As a rule, SPI devices can't be probed. Instead, board init code
601 * provides a table listing the devices which are present, with enough
602 * information to bind and set up the device's driver. There's basic
603 * support for nonstatic configurations too; enough to handle adding
604 * parport adapters, or microcontrollers acting as USB-to-SPI bridges.
605 */
606
607 /* board-specific information about each SPI device */
608 struct spi_board_info {
609 /* the device name and module name are coupled, like platform_bus;
610 * "modalias" is normally the driver name.
611 *
612 * platform_data goes to spi_device.dev.platform_data,
613 * controller_data goes to spi_device.controller_data,
614 * irq is copied too
615 */
616 char modalias[KOBJ_NAME_LEN];
617 const void *platform_data;
618 void *controller_data;
619 int irq;
620
621 /* slower signaling on noisy or low voltage boards */
622 u32 max_speed_hz;
623
624
625 /* bus_num is board specific and matches the bus_num of some
626 * spi_master that will probably be registered later.
627 *
628 * chip_select reflects how this chip is wired to that master;
629 * it's less than num_chipselect.
630 */
631 u16 bus_num;
632 u16 chip_select;
633
634 /* ... may need additional spi_device chip config data here.
635 * avoid stuff protocol drivers can set; but include stuff
636 * needed to behave without being bound to a driver:
637 * - chipselect polarity
638 * - quirks like clock rate mattering when not selected
639 */
640 };
641
642 #ifdef CONFIG_SPI
643 extern int
644 spi_register_board_info(struct spi_board_info const *info, unsigned n);
645 #else
646 /* board init code may ignore whether SPI is configured or not */
647 static inline int
648 spi_register_board_info(struct spi_board_info const *info, unsigned n)
649 { return 0; }
650 #endif
651
652
653 /* If you're hotplugging an adapter with devices (parport, usb, etc)
654 * use spi_new_device() to describe each device. You can also call
655 * spi_unregister_device() to start making that device vanish, but
656 * normally that would be handled by spi_unregister_master().
657 */
658 extern struct spi_device *
659 spi_new_device(struct spi_master *, struct spi_board_info *);
660
661 static inline void
662 spi_unregister_device(struct spi_device *spi)
663 {
664 if (spi)
665 device_unregister(&spi->dev);
666 }
667
668 #endif /* __LINUX_SPI_H */
This page took 0.046736 seconds and 6 git commands to generate.