spi: davinci: setup chip-select timers values only if timer enabled
[deliverable/linux.git] / drivers / spi / davinci_spi.c
CommitLineData
358934a6
SP
1/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
5a0e3ad6 30#include <linux/slab.h>
358934a6
SP
31
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
7fe0092b 55#define SPIFMT_PRESCALE_SHIFT 8
358934a6 56
358934a6
SP
57
58/* SPIPC0 */
59#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
60#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
61#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
62#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
358934a6
SP
63
64#define SPIINT_MASKALL 0x0101035F
65#define SPI_INTLVL_1 0x000001FFu
66#define SPI_INTLVL_0 0x00000000u
67
cfbc5d1d
BN
68/* SPIDAT1 (upper 16 bit defines) */
69#define SPIDAT1_CSHOLD_MASK BIT(12)
70
71/* SPIGCR1 */
358934a6
SP
72#define SPIGCR1_CLKMOD_MASK BIT(1)
73#define SPIGCR1_MASTER_MASK BIT(0)
74#define SPIGCR1_LOOPBACK_MASK BIT(16)
8e206f1c 75#define SPIGCR1_SPIENA_MASK BIT(24)
358934a6
SP
76
77/* SPIBUF */
78#define SPIBUF_TXFULL_MASK BIT(29)
79#define SPIBUF_RXEMPTY_MASK BIT(31)
80
81/* Error Masks */
82#define SPIFLG_DLEN_ERR_MASK BIT(0)
83#define SPIFLG_TIMEOUT_MASK BIT(1)
84#define SPIFLG_PARERR_MASK BIT(2)
85#define SPIFLG_DESYNC_MASK BIT(3)
86#define SPIFLG_BITERR_MASK BIT(4)
87#define SPIFLG_OVRRUN_MASK BIT(6)
88#define SPIFLG_RX_INTR_MASK BIT(8)
89#define SPIFLG_TX_INTR_MASK BIT(9)
90#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
8e206f1c 91
358934a6
SP
92#define SPIINT_BITERR_INTR BIT(4)
93#define SPIINT_OVRRUN_INTR BIT(6)
94#define SPIINT_RX_INTR BIT(8)
95#define SPIINT_TX_INTR BIT(9)
96#define SPIINT_DMA_REQ_EN BIT(16)
358934a6
SP
97
98#define SPI_T2CDELAY_SHIFT 16
99#define SPI_C2TDELAY_SHIFT 24
100
101/* SPI Controller registers */
102#define SPIGCR0 0x00
103#define SPIGCR1 0x04
104#define SPIINT 0x08
105#define SPILVL 0x0c
106#define SPIFLG 0x10
107#define SPIPC0 0x14
358934a6
SP
108#define SPIDAT1 0x3c
109#define SPIBUF 0x40
358934a6
SP
110#define SPIDELAY 0x48
111#define SPIDEF 0x4c
112#define SPIFMT0 0x50
358934a6
SP
113
114struct davinci_spi_slave {
115 u32 cmd_to_write;
116 u32 clk_ctrl_to_write;
117 u32 bytes_per_word;
118 u8 active_cs;
119};
120
121/* We have 2 DMA channels per CS, one for RX and one for TX */
122struct davinci_spi_dma {
123 int dma_tx_channel;
124 int dma_rx_channel;
125 int dma_tx_sync_dev;
126 int dma_rx_sync_dev;
127 enum dma_event_q eventq;
128
129 struct completion dma_tx_completion;
130 struct completion dma_rx_completion;
131};
132
133/* SPI Controller driver's private data. */
134struct davinci_spi {
135 struct spi_bitbang bitbang;
136 struct clk *clk;
137
138 u8 version;
139 resource_size_t pbase;
140 void __iomem *base;
141 size_t region_size;
142 u32 irq;
143 struct completion done;
144
145 const void *tx;
146 void *rx;
147 u8 *tmp_buf;
148 int count;
149 struct davinci_spi_dma *dma_channels;
778e261e 150 struct davinci_spi_platform_data *pdata;
358934a6
SP
151
152 void (*get_rx)(u32 rx_data, struct davinci_spi *);
153 u32 (*get_tx)(struct davinci_spi *);
154
155 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
156};
157
53a31b07
BN
158static struct davinci_spi_config davinci_spi_default_cfg;
159
358934a6
SP
160static unsigned use_dma;
161
162static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
163{
164 u8 *rx = davinci_spi->rx;
165
166 *rx++ = (u8)data;
167 davinci_spi->rx = rx;
168}
169
170static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
171{
172 u16 *rx = davinci_spi->rx;
173
174 *rx++ = (u16)data;
175 davinci_spi->rx = rx;
176}
177
178static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
179{
180 u32 data;
181 const u8 *tx = davinci_spi->tx;
182
183 data = *tx++;
184 davinci_spi->tx = tx;
185 return data;
186}
187
188static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
189{
190 u32 data;
191 const u16 *tx = davinci_spi->tx;
192
193 data = *tx++;
194 davinci_spi->tx = tx;
195 return data;
196}
197
198static inline void set_io_bits(void __iomem *addr, u32 bits)
199{
200 u32 v = ioread32(addr);
201
202 v |= bits;
203 iowrite32(v, addr);
204}
205
206static inline void clear_io_bits(void __iomem *addr, u32 bits)
207{
208 u32 v = ioread32(addr);
209
210 v &= ~bits;
211 iowrite32(v, addr);
212}
213
358934a6
SP
214static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
215{
216 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
217
218 if (enable)
219 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
220 else
221 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
222}
223
224/*
225 * Interface to control the chip select signal
226 */
227static void davinci_spi_chipselect(struct spi_device *spi, int value)
228{
229 struct davinci_spi *davinci_spi;
230 struct davinci_spi_platform_data *pdata;
7978b8c3 231 u8 chip_sel = spi->chip_select;
cfbc5d1d 232 u16 spidat1_cfg = CS_DEFAULT;
23853973 233 bool gpio_chipsel = false;
358934a6
SP
234
235 davinci_spi = spi_master_get_devdata(spi->master);
236 pdata = davinci_spi->pdata;
237
23853973
BN
238 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
239 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
240 gpio_chipsel = true;
241
358934a6
SP
242 /*
243 * Board specific chip select logic decides the polarity and cs
244 * line for the controller
245 */
23853973
BN
246 if (gpio_chipsel) {
247 if (value == BITBANG_CS_ACTIVE)
248 gpio_set_value(pdata->chip_sel[chip_sel], 0);
249 else
250 gpio_set_value(pdata->chip_sel[chip_sel], 1);
251 } else {
252 if (value == BITBANG_CS_ACTIVE) {
253 spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
254 spidat1_cfg &= ~(0x1 << chip_sel);
255 }
7978b8c3 256
23853973
BN
257 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
258 }
358934a6
SP
259}
260
7fe0092b
BN
261/**
262 * davinci_spi_get_prescale - Calculates the correct prescale value
263 * @maxspeed_hz: the maximum rate the SPI clock can run at
264 *
265 * This function calculates the prescale value that generates a clock rate
266 * less than or equal to the specified maximum.
267 *
268 * Returns: calculated prescale - 1 for easy programming into SPI registers
269 * or negative error number if valid prescalar cannot be updated.
270 */
271static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
272 u32 max_speed_hz)
273{
274 int ret;
275
276 ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
277
278 if (ret < 3 || ret > 256)
279 return -EINVAL;
280
281 return ret - 1;
282}
283
358934a6
SP
284/**
285 * davinci_spi_setup_transfer - This functions will determine transfer method
286 * @spi: spi device on which data transfer to be done
287 * @t: spi transfer in which transfer info is filled
288 *
289 * This function determines data transfer method (8/16/32 bit transfer).
290 * It will also set the SPI Clock Control register according to
291 * SPI slave device freq.
292 */
293static int davinci_spi_setup_transfer(struct spi_device *spi,
294 struct spi_transfer *t)
295{
296
297 struct davinci_spi *davinci_spi;
25f33512 298 struct davinci_spi_config *spicfg;
358934a6 299 u8 bits_per_word = 0;
25f33512 300 u32 hz = 0, spifmt = 0, prescale = 0;
358934a6
SP
301
302 davinci_spi = spi_master_get_devdata(spi->master);
25f33512
BN
303 spicfg = (struct davinci_spi_config *)spi->controller_data;
304 if (!spicfg)
305 spicfg = &davinci_spi_default_cfg;
358934a6
SP
306
307 if (t) {
308 bits_per_word = t->bits_per_word;
309 hz = t->speed_hz;
310 }
311
312 /* if bits_per_word is not set then set it default */
313 if (!bits_per_word)
314 bits_per_word = spi->bits_per_word;
315
316 /*
317 * Assign function pointer to appropriate transfer method
318 * 8bit, 16bit or 32bit transfer
319 */
320 if (bits_per_word <= 8 && bits_per_word >= 2) {
321 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
322 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
323 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
324 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
325 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
326 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
327 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
328 } else
329 return -EINVAL;
330
331 if (!hz)
332 hz = spi->max_speed_hz;
333
25f33512
BN
334 /* Set up SPIFMTn register, unique to this chipselect. */
335
7fe0092b
BN
336 prescale = davinci_spi_get_prescale(davinci_spi, hz);
337 if (prescale < 0)
338 return prescale;
339
25f33512
BN
340 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
341
342 if (spi->mode & SPI_LSB_FIRST)
343 spifmt |= SPIFMT_SHIFTDIR_MASK;
344
345 if (spi->mode & SPI_CPOL)
346 spifmt |= SPIFMT_POLARITY_MASK;
347
348 if (!(spi->mode & SPI_CPHA))
349 spifmt |= SPIFMT_PHASE_MASK;
350
351 /*
352 * Version 1 hardware supports two basic SPI modes:
353 * - Standard SPI mode uses 4 pins, with chipselect
354 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
355 * (distinct from SPI_3WIRE, with just one data wire;
356 * or similar variants without MOSI or without MISO)
357 *
358 * Version 2 hardware supports an optional handshaking signal,
359 * so it can support two more modes:
360 * - 5 pin SPI variant is standard SPI plus SPI_READY
361 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
362 */
363
364 if (davinci_spi->version == SPI_VERSION_2) {
365
366 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
367 & SPIFMT_WDELAY_MASK);
358934a6 368
25f33512
BN
369 if (spicfg->odd_parity)
370 spifmt |= SPIFMT_ODD_PARITY_MASK;
371
372 if (spicfg->parity_enable)
373 spifmt |= SPIFMT_PARITYENA_MASK;
374
375 if (spicfg->timer_disable)
376 spifmt |= SPIFMT_DISTIMER_MASK;
fd764463
BN
377 else
378 iowrite32((spicfg->c2tdelay << SPI_C2TDELAY_SHIFT) |
379 (spicfg->t2cdelay << SPI_T2CDELAY_SHIFT),
380 davinci_spi->base + SPIDELAY);
25f33512
BN
381
382 if (spi->mode & SPI_READY)
383 spifmt |= SPIFMT_WAITENA_MASK;
384 }
385
386 iowrite32(spifmt, davinci_spi->base + SPIFMT0);
358934a6
SP
387
388 return 0;
389}
390
391static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
392{
393 struct spi_device *spi = (struct spi_device *)data;
394 struct davinci_spi *davinci_spi;
395 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
396
397 davinci_spi = spi_master_get_devdata(spi->master);
398 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
358934a6
SP
399
400 if (ch_status == DMA_COMPLETE)
401 edma_stop(davinci_spi_dma->dma_rx_channel);
402 else
403 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
404
405 complete(&davinci_spi_dma->dma_rx_completion);
406 /* We must disable the DMA RX request */
407 davinci_spi_set_dma_req(spi, 0);
408}
409
410static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
411{
412 struct spi_device *spi = (struct spi_device *)data;
413 struct davinci_spi *davinci_spi;
414 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
415
416 davinci_spi = spi_master_get_devdata(spi->master);
417 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
358934a6
SP
418
419 if (ch_status == DMA_COMPLETE)
420 edma_stop(davinci_spi_dma->dma_tx_channel);
421 else
422 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
423
424 complete(&davinci_spi_dma->dma_tx_completion);
425 /* We must disable the DMA TX request */
426 davinci_spi_set_dma_req(spi, 0);
427}
428
429static int davinci_spi_request_dma(struct spi_device *spi)
430{
431 struct davinci_spi *davinci_spi;
432 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
433 struct device *sdev;
434 int r;
435
436 davinci_spi = spi_master_get_devdata(spi->master);
437 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
358934a6
SP
438 sdev = davinci_spi->bitbang.master->dev.parent;
439
440 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
441 davinci_spi_dma_rx_callback, spi,
442 davinci_spi_dma->eventq);
443 if (r < 0) {
444 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
445 return -EAGAIN;
446 }
447 davinci_spi_dma->dma_rx_channel = r;
448 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
449 davinci_spi_dma_tx_callback, spi,
450 davinci_spi_dma->eventq);
451 if (r < 0) {
452 edma_free_channel(davinci_spi_dma->dma_rx_channel);
453 davinci_spi_dma->dma_rx_channel = -1;
454 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
455 return -EAGAIN;
456 }
457 davinci_spi_dma->dma_tx_channel = r;
458
459 return 0;
460}
461
462/**
463 * davinci_spi_setup - This functions will set default transfer method
464 * @spi: spi device on which data transfer to be done
465 *
466 * This functions sets the default transfer method.
467 */
358934a6
SP
468static int davinci_spi_setup(struct spi_device *spi)
469{
470 int retval;
471 struct davinci_spi *davinci_spi;
472 struct davinci_spi_dma *davinci_spi_dma;
358934a6
SP
473
474 davinci_spi = spi_master_get_devdata(spi->master);
358934a6
SP
475
476 /* if bits per word length is zero then set it default 8 */
477 if (!spi->bits_per_word)
478 spi->bits_per_word = 8;
479
480 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
481
482 if (use_dma && davinci_spi->dma_channels) {
483 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
484
485 if ((davinci_spi_dma->dma_rx_channel == -1)
486 || (davinci_spi_dma->dma_tx_channel == -1)) {
487 retval = davinci_spi_request_dma(spi);
488 if (retval < 0)
489 return retval;
490 }
491 }
492
358934a6
SP
493 retval = davinci_spi_setup_transfer(spi, NULL);
494
495 return retval;
496}
497
498static void davinci_spi_cleanup(struct spi_device *spi)
499{
500 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
501 struct davinci_spi_dma *davinci_spi_dma;
502
503 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
504
505 if (use_dma && davinci_spi->dma_channels) {
506 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
507
508 if ((davinci_spi_dma->dma_rx_channel != -1)
509 && (davinci_spi_dma->dma_tx_channel != -1)) {
510 edma_free_channel(davinci_spi_dma->dma_tx_channel);
511 edma_free_channel(davinci_spi_dma->dma_rx_channel);
512 }
513 }
514}
515
516static int davinci_spi_bufs_prep(struct spi_device *spi,
517 struct davinci_spi *davinci_spi)
518{
23853973 519 struct davinci_spi_platform_data *pdata;
358934a6
SP
520 int op_mode = 0;
521
522 /*
523 * REVISIT unless devices disagree about SPI_LOOP or
524 * SPI_READY (SPI_NO_CS only allows one device!), this
525 * should not need to be done before each message...
526 * optimize for both flags staying cleared.
527 */
528
529 op_mode = SPIPC0_DIFUN_MASK
530 | SPIPC0_DOFUN_MASK
531 | SPIPC0_CLKFUN_MASK;
23853973
BN
532 if (!(spi->mode & SPI_NO_CS)) {
533 pdata = davinci_spi->pdata;
534 if (!pdata->chip_sel ||
535 pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)
536 op_mode |= 1 << spi->chip_select;
537 }
358934a6
SP
538 if (spi->mode & SPI_READY)
539 op_mode |= SPIPC0_SPIENA_MASK;
540
541 iowrite32(op_mode, davinci_spi->base + SPIPC0);
542
543 if (spi->mode & SPI_LOOP)
544 set_io_bits(davinci_spi->base + SPIGCR1,
545 SPIGCR1_LOOPBACK_MASK);
546 else
547 clear_io_bits(davinci_spi->base + SPIGCR1,
548 SPIGCR1_LOOPBACK_MASK);
549
550 return 0;
551}
552
553static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
554 int int_status)
555{
556 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
557
558 if (int_status & SPIFLG_TIMEOUT_MASK) {
559 dev_dbg(sdev, "SPI Time-out Error\n");
560 return -ETIMEDOUT;
561 }
562 if (int_status & SPIFLG_DESYNC_MASK) {
563 dev_dbg(sdev, "SPI Desynchronization Error\n");
564 return -EIO;
565 }
566 if (int_status & SPIFLG_BITERR_MASK) {
567 dev_dbg(sdev, "SPI Bit error\n");
568 return -EIO;
569 }
570
571 if (davinci_spi->version == SPI_VERSION_2) {
572 if (int_status & SPIFLG_DLEN_ERR_MASK) {
573 dev_dbg(sdev, "SPI Data Length Error\n");
574 return -EIO;
575 }
576 if (int_status & SPIFLG_PARERR_MASK) {
577 dev_dbg(sdev, "SPI Parity Error\n");
578 return -EIO;
579 }
580 if (int_status & SPIFLG_OVRRUN_MASK) {
581 dev_dbg(sdev, "SPI Data Overrun error\n");
582 return -EIO;
583 }
584 if (int_status & SPIFLG_TX_INTR_MASK) {
585 dev_dbg(sdev, "SPI TX intr bit set\n");
586 return -EIO;
587 }
588 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
589 dev_dbg(sdev, "SPI Buffer Init Active\n");
590 return -EBUSY;
591 }
592 }
593
594 return 0;
595}
596
597/**
598 * davinci_spi_bufs - functions which will handle transfer data
599 * @spi: spi device on which data transfer to be done
600 * @t: spi transfer in which transfer info is filled
601 *
602 * This function will put data to be transferred into data register
603 * of SPI controller and then wait until the completion will be marked
604 * by the IRQ Handler.
605 */
606static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
607{
608 struct davinci_spi *davinci_spi;
609 int int_status, count, ret;
7978b8c3 610 u8 conv;
358934a6
SP
611 u32 tx_data, data1_reg_val;
612 u32 buf_val, flg_val;
613 struct davinci_spi_platform_data *pdata;
614
615 davinci_spi = spi_master_get_devdata(spi->master);
616 pdata = davinci_spi->pdata;
617
618 davinci_spi->tx = t->tx_buf;
619 davinci_spi->rx = t->rx_buf;
620
621 /* convert len to words based on bits_per_word */
622 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
623 davinci_spi->count = t->len / conv;
624
7978b8c3
BN
625 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
626
358934a6
SP
627 INIT_COMPLETION(davinci_spi->done);
628
629 ret = davinci_spi_bufs_prep(spi, davinci_spi);
630 if (ret)
631 return ret;
632
633 /* Enable SPI */
634 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
635
358934a6 636 count = davinci_spi->count;
358934a6
SP
637
638 /* Determine the command to execute READ or WRITE */
639 if (t->tx_buf) {
640 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
641
642 while (1) {
643 tx_data = davinci_spi->get_tx(davinci_spi);
644
645 data1_reg_val &= ~(0xFFFF);
646 data1_reg_val |= (0xFFFF & tx_data);
647
648 buf_val = ioread32(davinci_spi->base + SPIBUF);
649 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
650 iowrite32(data1_reg_val,
651 davinci_spi->base + SPIDAT1);
652
653 count--;
654 }
655 while (ioread32(davinci_spi->base + SPIBUF)
656 & SPIBUF_RXEMPTY_MASK)
657 cpu_relax();
658
659 /* getting the returned byte */
660 if (t->rx_buf) {
661 buf_val = ioread32(davinci_spi->base + SPIBUF);
662 davinci_spi->get_rx(buf_val, davinci_spi);
663 }
664 if (count <= 0)
665 break;
666 }
667 } else {
668 if (pdata->poll_mode) {
669 while (1) {
670 /* keeps the serial clock going */
671 if ((ioread32(davinci_spi->base + SPIBUF)
672 & SPIBUF_TXFULL_MASK) == 0)
673 iowrite32(data1_reg_val,
674 davinci_spi->base + SPIDAT1);
675
676 while (ioread32(davinci_spi->base + SPIBUF) &
677 SPIBUF_RXEMPTY_MASK)
678 cpu_relax();
679
680 flg_val = ioread32(davinci_spi->base + SPIFLG);
681 buf_val = ioread32(davinci_spi->base + SPIBUF);
682
683 davinci_spi->get_rx(buf_val, davinci_spi);
684
685 count--;
686 if (count <= 0)
687 break;
688 }
689 } else { /* Receive in Interrupt mode */
690 int i;
691
692 for (i = 0; i < davinci_spi->count; i++) {
693 set_io_bits(davinci_spi->base + SPIINT,
694 SPIINT_BITERR_INTR
695 | SPIINT_OVRRUN_INTR
696 | SPIINT_RX_INTR);
697
698 iowrite32(data1_reg_val,
699 davinci_spi->base + SPIDAT1);
700
701 while (ioread32(davinci_spi->base + SPIINT) &
702 SPIINT_RX_INTR)
703 cpu_relax();
704 }
705 iowrite32((data1_reg_val & 0x0ffcffff),
706 davinci_spi->base + SPIDAT1);
707 }
708 }
709
710 /*
711 * Check for bit error, desync error,parity error,timeout error and
712 * receive overflow errors
713 */
714 int_status = ioread32(davinci_spi->base + SPIFLG);
715
716 ret = davinci_spi_check_error(davinci_spi, int_status);
717 if (ret != 0)
718 return ret;
719
720 /* SPI Framework maintains the count only in bytes so convert back */
721 davinci_spi->count *= conv;
722
723 return t->len;
724}
725
726#define DAVINCI_DMA_DATA_TYPE_S8 0x01
727#define DAVINCI_DMA_DATA_TYPE_S16 0x02
728#define DAVINCI_DMA_DATA_TYPE_S32 0x04
729
730static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
731{
732 struct davinci_spi *davinci_spi;
733 int int_status = 0;
734 int count, temp_count;
735 u8 conv = 1;
358934a6
SP
736 u32 data1_reg_val;
737 struct davinci_spi_dma *davinci_spi_dma;
738 int word_len, data_type, ret;
739 unsigned long tx_reg, rx_reg;
358934a6
SP
740 struct device *sdev;
741
742 davinci_spi = spi_master_get_devdata(spi->master);
358934a6
SP
743 sdev = davinci_spi->bitbang.master->dev.parent;
744
745 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
746
747 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
748 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
749
750 davinci_spi->tx = t->tx_buf;
751 davinci_spi->rx = t->rx_buf;
752
753 /* convert len to words based on bits_per_word */
754 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
755 davinci_spi->count = t->len / conv;
756
7978b8c3
BN
757 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
758
358934a6
SP
759 INIT_COMPLETION(davinci_spi->done);
760
761 init_completion(&davinci_spi_dma->dma_rx_completion);
762 init_completion(&davinci_spi_dma->dma_tx_completion);
763
764 word_len = conv * 8;
765
766 if (word_len <= 8)
767 data_type = DAVINCI_DMA_DATA_TYPE_S8;
768 else if (word_len <= 16)
769 data_type = DAVINCI_DMA_DATA_TYPE_S16;
770 else if (word_len <= 32)
771 data_type = DAVINCI_DMA_DATA_TYPE_S32;
772 else
773 return -EINVAL;
774
775 ret = davinci_spi_bufs_prep(spi, davinci_spi);
776 if (ret)
777 return ret;
778
358934a6 779 count = davinci_spi->count; /* the number of elements */
358934a6
SP
780
781 /* disable all interrupts for dma transfers */
782 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
783 /* Disable SPI to write configuration bits in SPIDAT */
784 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
358934a6
SP
785 /* Enable SPI */
786 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
787
358934a6
SP
788 if (t->tx_buf) {
789 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
790 DMA_TO_DEVICE);
791 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
792 dev_dbg(sdev, "Unable to DMA map a %d bytes"
793 " TX buffer\n", count);
794 return -ENOMEM;
795 }
796 temp_count = count;
797 } else {
798 /* We need TX clocking for RX transaction */
799 t->tx_dma = dma_map_single(&spi->dev,
800 (void *)davinci_spi->tmp_buf, count + 1,
801 DMA_TO_DEVICE);
802 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
803 dev_dbg(sdev, "Unable to DMA map a %d bytes"
804 " TX tmp buffer\n", count);
805 return -ENOMEM;
806 }
807 temp_count = count + 1;
808 }
809
810 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
811 data_type, temp_count, 1, 0, ASYNC);
812 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
813 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
814 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
815 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
816
817 if (t->rx_buf) {
818 /* initiate transaction */
819 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
820
821 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
822 DMA_FROM_DEVICE);
823 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
824 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
825 count);
826 if (t->tx_buf != NULL)
827 dma_unmap_single(NULL, t->tx_dma,
828 count, DMA_TO_DEVICE);
829 return -ENOMEM;
830 }
831 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
832 data_type, count, 1, 0, ASYNC);
833 edma_set_src(davinci_spi_dma->dma_rx_channel,
834 rx_reg, INCR, W8BIT);
835 edma_set_dest(davinci_spi_dma->dma_rx_channel,
836 t->rx_dma, INCR, W8BIT);
837 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
838 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
839 data_type, 0);
840 }
841
842 if ((t->tx_buf) || (t->rx_buf))
843 edma_start(davinci_spi_dma->dma_tx_channel);
844
845 if (t->rx_buf)
846 edma_start(davinci_spi_dma->dma_rx_channel);
847
848 if ((t->rx_buf) || (t->tx_buf))
849 davinci_spi_set_dma_req(spi, 1);
850
851 if (t->tx_buf)
852 wait_for_completion_interruptible(
853 &davinci_spi_dma->dma_tx_completion);
854
855 if (t->rx_buf)
856 wait_for_completion_interruptible(
857 &davinci_spi_dma->dma_rx_completion);
858
859 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
860
861 if (t->rx_buf)
862 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
863
864 /*
865 * Check for bit error, desync error,parity error,timeout error and
866 * receive overflow errors
867 */
868 int_status = ioread32(davinci_spi->base + SPIFLG);
869
870 ret = davinci_spi_check_error(davinci_spi, int_status);
871 if (ret != 0)
872 return ret;
873
874 /* SPI Framework maintains the count only in bytes so convert back */
875 davinci_spi->count *= conv;
876
877 return t->len;
878}
879
880/**
881 * davinci_spi_irq - IRQ handler for DaVinci SPI
882 * @irq: IRQ number for this SPI Master
883 * @context_data: structure for SPI Master controller davinci_spi
884 */
885static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
886{
887 struct davinci_spi *davinci_spi = context_data;
888 u32 int_status, rx_data = 0;
889 irqreturn_t ret = IRQ_NONE;
890
891 int_status = ioread32(davinci_spi->base + SPIFLG);
892
893 while ((int_status & SPIFLG_RX_INTR_MASK)) {
894 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
895 ret = IRQ_HANDLED;
896
897 rx_data = ioread32(davinci_spi->base + SPIBUF);
898 davinci_spi->get_rx(rx_data, davinci_spi);
899
900 /* Disable Receive Interrupt */
901 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
902 davinci_spi->base + SPIINT);
903 } else
904 (void)davinci_spi_check_error(davinci_spi, int_status);
905
906 int_status = ioread32(davinci_spi->base + SPIFLG);
907 }
908
909 return ret;
910}
911
912/**
913 * davinci_spi_probe - probe function for SPI Master Controller
914 * @pdev: platform_device structure which contains plateform specific data
915 */
916static int davinci_spi_probe(struct platform_device *pdev)
917{
918 struct spi_master *master;
919 struct davinci_spi *davinci_spi;
920 struct davinci_spi_platform_data *pdata;
921 struct resource *r, *mem;
922 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
923 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
924 resource_size_t dma_eventq = SPI_NO_RESOURCE;
925 int i = 0, ret = 0;
926
927 pdata = pdev->dev.platform_data;
928 if (pdata == NULL) {
929 ret = -ENODEV;
930 goto err;
931 }
932
933 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
934 if (master == NULL) {
935 ret = -ENOMEM;
936 goto err;
937 }
938
939 dev_set_drvdata(&pdev->dev, master);
940
941 davinci_spi = spi_master_get_devdata(master);
942 if (davinci_spi == NULL) {
943 ret = -ENOENT;
944 goto free_master;
945 }
946
947 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
948 if (r == NULL) {
949 ret = -ENOENT;
950 goto free_master;
951 }
952
953 davinci_spi->pbase = r->start;
954 davinci_spi->region_size = resource_size(r);
955 davinci_spi->pdata = pdata;
956
957 mem = request_mem_region(r->start, davinci_spi->region_size,
958 pdev->name);
959 if (mem == NULL) {
960 ret = -EBUSY;
961 goto free_master;
962 }
963
50356dd7 964 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
358934a6
SP
965 if (davinci_spi->base == NULL) {
966 ret = -ENOMEM;
967 goto release_region;
968 }
969
970 davinci_spi->irq = platform_get_irq(pdev, 0);
971 if (davinci_spi->irq <= 0) {
972 ret = -EINVAL;
973 goto unmap_io;
974 }
975
976 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
977 dev_name(&pdev->dev), davinci_spi);
978 if (ret)
979 goto unmap_io;
980
981 /* Allocate tmp_buf for tx_buf */
982 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
983 if (davinci_spi->tmp_buf == NULL) {
984 ret = -ENOMEM;
985 goto irq_free;
986 }
987
988 davinci_spi->bitbang.master = spi_master_get(master);
989 if (davinci_spi->bitbang.master == NULL) {
990 ret = -ENODEV;
991 goto free_tmp_buf;
992 }
993
994 davinci_spi->clk = clk_get(&pdev->dev, NULL);
995 if (IS_ERR(davinci_spi->clk)) {
996 ret = -ENODEV;
997 goto put_master;
998 }
999 clk_enable(davinci_spi->clk);
1000
358934a6
SP
1001 master->bus_num = pdev->id;
1002 master->num_chipselect = pdata->num_chipselect;
1003 master->setup = davinci_spi_setup;
1004 master->cleanup = davinci_spi_cleanup;
1005
1006 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1007 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1008
1009 davinci_spi->version = pdata->version;
1010 use_dma = pdata->use_dma;
1011
1012 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1013 if (davinci_spi->version == SPI_VERSION_2)
1014 davinci_spi->bitbang.flags |= SPI_READY;
1015
1016 if (use_dma) {
778e261e
BN
1017 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1018 if (r)
1019 dma_rx_chan = r->start;
1020 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1021 if (r)
1022 dma_tx_chan = r->start;
1023 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1024 if (r)
1025 dma_eventq = r->start;
358934a6
SP
1026 }
1027
1028 if (!use_dma ||
1029 dma_rx_chan == SPI_NO_RESOURCE ||
1030 dma_tx_chan == SPI_NO_RESOURCE ||
1031 dma_eventq == SPI_NO_RESOURCE) {
1032 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1033 use_dma = 0;
1034 } else {
1035 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1036 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1037 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1038 if (davinci_spi->dma_channels == NULL) {
1039 ret = -ENOMEM;
1040 goto free_clk;
1041 }
1042
1043 for (i = 0; i < master->num_chipselect; i++) {
1044 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1045 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1046 dma_rx_chan;
1047 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1048 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1049 dma_tx_chan;
1050 davinci_spi->dma_channels[i].eventq = dma_eventq;
1051 }
1052 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1053 "Using RX channel = %d , TX channel = %d and "
1054 "event queue = %d", dma_rx_chan, dma_tx_chan,
1055 dma_eventq);
1056 }
1057
1058 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1059 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1060
1061 init_completion(&davinci_spi->done);
1062
1063 /* Reset In/OUT SPI module */
1064 iowrite32(0, davinci_spi->base + SPIGCR0);
1065 udelay(100);
1066 iowrite32(1, davinci_spi->base + SPIGCR0);
1067
23853973
BN
1068 /* initialize chip selects */
1069 if (pdata->chip_sel) {
1070 for (i = 0; i < pdata->num_chipselect; i++) {
1071 if (pdata->chip_sel[i] != SPI_INTERN_CS)
1072 gpio_direction_output(pdata->chip_sel[i], 1);
1073 }
1074 }
1075
358934a6
SP
1076 /* Clock internal */
1077 if (davinci_spi->pdata->clk_internal)
1078 set_io_bits(davinci_spi->base + SPIGCR1,
1079 SPIGCR1_CLKMOD_MASK);
1080 else
1081 clear_io_bits(davinci_spi->base + SPIGCR1,
1082 SPIGCR1_CLKMOD_MASK);
1083
843a713b
BN
1084 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1085
358934a6
SP
1086 /* master mode default */
1087 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1088
1089 if (davinci_spi->pdata->intr_level)
1090 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1091 else
1092 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1093
1094 ret = spi_bitbang_start(&davinci_spi->bitbang);
1095 if (ret)
1096 goto free_clk;
1097
3b740b10 1098 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
358934a6
SP
1099
1100 if (!pdata->poll_mode)
1101 dev_info(&pdev->dev, "Operating in interrupt mode"
1102 " using IRQ %d\n", davinci_spi->irq);
1103
1104 return ret;
1105
1106free_clk:
1107 clk_disable(davinci_spi->clk);
1108 clk_put(davinci_spi->clk);
1109put_master:
1110 spi_master_put(master);
1111free_tmp_buf:
1112 kfree(davinci_spi->tmp_buf);
1113irq_free:
1114 free_irq(davinci_spi->irq, davinci_spi);
1115unmap_io:
1116 iounmap(davinci_spi->base);
1117release_region:
1118 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1119free_master:
1120 kfree(master);
1121err:
1122 return ret;
1123}
1124
1125/**
1126 * davinci_spi_remove - remove function for SPI Master Controller
1127 * @pdev: platform_device structure which contains plateform specific data
1128 *
1129 * This function will do the reverse action of davinci_spi_probe function
1130 * It will free the IRQ and SPI controller's memory region.
1131 * It will also call spi_bitbang_stop to destroy the work queue which was
1132 * created by spi_bitbang_start.
1133 */
1134static int __exit davinci_spi_remove(struct platform_device *pdev)
1135{
1136 struct davinci_spi *davinci_spi;
1137 struct spi_master *master;
1138
1139 master = dev_get_drvdata(&pdev->dev);
1140 davinci_spi = spi_master_get_devdata(master);
1141
1142 spi_bitbang_stop(&davinci_spi->bitbang);
1143
1144 clk_disable(davinci_spi->clk);
1145 clk_put(davinci_spi->clk);
1146 spi_master_put(master);
1147 kfree(davinci_spi->tmp_buf);
1148 free_irq(davinci_spi->irq, davinci_spi);
1149 iounmap(davinci_spi->base);
1150 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1151
1152 return 0;
1153}
1154
1155static struct platform_driver davinci_spi_driver = {
1156 .driver.name = "spi_davinci",
1157 .remove = __exit_p(davinci_spi_remove),
1158};
1159
1160static int __init davinci_spi_init(void)
1161{
1162 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1163}
1164module_init(davinci_spi_init);
1165
1166static void __exit davinci_spi_exit(void)
1167{
1168 platform_driver_unregister(&davinci_spi_driver);
1169}
1170module_exit(davinci_spi_exit);
1171
1172MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1173MODULE_LICENSE("GPL");
This page took 0.168227 seconds and 5 git commands to generate.