spi: rspi: Make more pointers const
[deliverable/linux.git] / drivers / spi / spi-rspi.c
1 /*
2 * SH RSPI driver
3 *
4 * Copyright (C) 2012 Renesas Solutions Corp.
5 *
6 * Based on spi-sh.c:
7 * Copyright (C) 2011 Renesas Solutions Corp.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/list.h>
29 #include <linux/workqueue.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
32 #include <linux/io.h>
33 #include <linux/clk.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/sh_dma.h>
37 #include <linux/spi/spi.h>
38 #include <linux/spi/rspi.h>
39
40 #define RSPI_SPCR 0x00
41 #define RSPI_SSLP 0x01
42 #define RSPI_SPPCR 0x02
43 #define RSPI_SPSR 0x03
44 #define RSPI_SPDR 0x04
45 #define RSPI_SPSCR 0x08
46 #define RSPI_SPSSR 0x09
47 #define RSPI_SPBR 0x0a
48 #define RSPI_SPDCR 0x0b
49 #define RSPI_SPCKD 0x0c
50 #define RSPI_SSLND 0x0d
51 #define RSPI_SPND 0x0e
52 #define RSPI_SPCR2 0x0f
53 #define RSPI_SPCMD0 0x10
54 #define RSPI_SPCMD1 0x12
55 #define RSPI_SPCMD2 0x14
56 #define RSPI_SPCMD3 0x16
57 #define RSPI_SPCMD4 0x18
58 #define RSPI_SPCMD5 0x1a
59 #define RSPI_SPCMD6 0x1c
60 #define RSPI_SPCMD7 0x1e
61
62 /*qspi only */
63 #define QSPI_SPBFCR 0x18
64 #define QSPI_SPBDCR 0x1a
65 #define QSPI_SPBMUL0 0x1c
66 #define QSPI_SPBMUL1 0x20
67 #define QSPI_SPBMUL2 0x24
68 #define QSPI_SPBMUL3 0x28
69
70 /* SPCR */
71 #define SPCR_SPRIE 0x80
72 #define SPCR_SPE 0x40
73 #define SPCR_SPTIE 0x20
74 #define SPCR_SPEIE 0x10
75 #define SPCR_MSTR 0x08
76 #define SPCR_MODFEN 0x04
77 #define SPCR_TXMD 0x02
78 #define SPCR_SPMS 0x01
79
80 /* SSLP */
81 #define SSLP_SSL1P 0x02
82 #define SSLP_SSL0P 0x01
83
84 /* SPPCR */
85 #define SPPCR_MOIFE 0x20
86 #define SPPCR_MOIFV 0x10
87 #define SPPCR_SPOM 0x04
88 #define SPPCR_SPLP2 0x02
89 #define SPPCR_SPLP 0x01
90
91 /* SPSR */
92 #define SPSR_SPRF 0x80
93 #define SPSR_SPTEF 0x20
94 #define SPSR_PERF 0x08
95 #define SPSR_MODF 0x04
96 #define SPSR_IDLNF 0x02
97 #define SPSR_OVRF 0x01
98
99 /* SPSCR */
100 #define SPSCR_SPSLN_MASK 0x07
101
102 /* SPSSR */
103 #define SPSSR_SPECM_MASK 0x70
104 #define SPSSR_SPCP_MASK 0x07
105
106 /* SPDCR */
107 #define SPDCR_SPLW 0x20
108 #define SPDCR_SPRDTD 0x10
109 #define SPDCR_SLSEL1 0x08
110 #define SPDCR_SLSEL0 0x04
111 #define SPDCR_SLSEL_MASK 0x0c
112 #define SPDCR_SPFC1 0x02
113 #define SPDCR_SPFC0 0x01
114
115 /* SPCKD */
116 #define SPCKD_SCKDL_MASK 0x07
117
118 /* SSLND */
119 #define SSLND_SLNDL_MASK 0x07
120
121 /* SPND */
122 #define SPND_SPNDL_MASK 0x07
123
124 /* SPCR2 */
125 #define SPCR2_PTE 0x08
126 #define SPCR2_SPIE 0x04
127 #define SPCR2_SPOE 0x02
128 #define SPCR2_SPPE 0x01
129
130 /* SPCMDn */
131 #define SPCMD_SCKDEN 0x8000
132 #define SPCMD_SLNDEN 0x4000
133 #define SPCMD_SPNDEN 0x2000
134 #define SPCMD_LSBF 0x1000
135 #define SPCMD_SPB_MASK 0x0f00
136 #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
137 #define SPCMD_SPB_8BIT 0x0000 /* qspi only */
138 #define SPCMD_SPB_16BIT 0x0100
139 #define SPCMD_SPB_20BIT 0x0000
140 #define SPCMD_SPB_24BIT 0x0100
141 #define SPCMD_SPB_32BIT 0x0200
142 #define SPCMD_SSLKP 0x0080
143 #define SPCMD_SSLA_MASK 0x0030
144 #define SPCMD_BRDV_MASK 0x000c
145 #define SPCMD_CPOL 0x0002
146 #define SPCMD_CPHA 0x0001
147
148 /* SPBFCR */
149 #define SPBFCR_TXRST 0x80 /* qspi only */
150 #define SPBFCR_RXRST 0x40 /* qspi only */
151
152 struct rspi_data {
153 void __iomem *addr;
154 u32 max_speed_hz;
155 struct spi_master *master;
156 struct list_head queue;
157 struct work_struct ws;
158 wait_queue_head_t wait;
159 spinlock_t lock;
160 struct clk *clk;
161 unsigned char spsr;
162 const struct spi_ops *ops;
163
164 /* for dmaengine */
165 struct dma_chan *chan_tx;
166 struct dma_chan *chan_rx;
167 int irq;
168
169 unsigned dma_width_16bit:1;
170 unsigned dma_callbacked:1;
171 };
172
173 static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
174 {
175 iowrite8(data, rspi->addr + offset);
176 }
177
178 static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
179 {
180 iowrite16(data, rspi->addr + offset);
181 }
182
183 static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
184 {
185 iowrite32(data, rspi->addr + offset);
186 }
187
188 static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
189 {
190 return ioread8(rspi->addr + offset);
191 }
192
193 static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
194 {
195 return ioread16(rspi->addr + offset);
196 }
197
198 /* optional functions */
199 struct spi_ops {
200 int (*set_config_register)(const struct rspi_data *rspi,
201 int access_size);
202 int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
203 struct spi_transfer *t);
204 int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
205 struct spi_transfer *t);
206
207 };
208
209 /*
210 * functions for RSPI
211 */
212 static int rspi_set_config_register(const struct rspi_data *rspi,
213 int access_size)
214 {
215 int spbr;
216
217 /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
218 rspi_write8(rspi, 0x00, RSPI_SPPCR);
219
220 /* Sets transfer bit rate */
221 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
222 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
223
224 /* Sets number of frames to be used: 1 frame */
225 rspi_write8(rspi, 0x00, RSPI_SPDCR);
226
227 /* Sets RSPCK, SSL, next-access delay value */
228 rspi_write8(rspi, 0x00, RSPI_SPCKD);
229 rspi_write8(rspi, 0x00, RSPI_SSLND);
230 rspi_write8(rspi, 0x00, RSPI_SPND);
231
232 /* Sets parity, interrupt mask */
233 rspi_write8(rspi, 0x00, RSPI_SPCR2);
234
235 /* Sets SPCMD */
236 rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
237 RSPI_SPCMD0);
238
239 /* Sets RSPI mode */
240 rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
241
242 return 0;
243 }
244
245 /*
246 * functions for QSPI
247 */
248 static int qspi_set_config_register(const struct rspi_data *rspi,
249 int access_size)
250 {
251 u16 spcmd;
252 int spbr;
253
254 /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
255 rspi_write8(rspi, 0x00, RSPI_SPPCR);
256
257 /* Sets transfer bit rate */
258 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
259 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
260
261 /* Sets number of frames to be used: 1 frame */
262 rspi_write8(rspi, 0x00, RSPI_SPDCR);
263
264 /* Sets RSPCK, SSL, next-access delay value */
265 rspi_write8(rspi, 0x00, RSPI_SPCKD);
266 rspi_write8(rspi, 0x00, RSPI_SSLND);
267 rspi_write8(rspi, 0x00, RSPI_SPND);
268
269 /* Data Length Setting */
270 if (access_size == 8)
271 spcmd = SPCMD_SPB_8BIT;
272 else if (access_size == 16)
273 spcmd = SPCMD_SPB_16BIT;
274 else if (access_size == 32)
275 spcmd = SPCMD_SPB_32BIT;
276
277 spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
278
279 /* Resets transfer data length */
280 rspi_write32(rspi, 0, QSPI_SPBMUL0);
281
282 /* Resets transmit and receive buffer */
283 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
284 /* Sets buffer to allow normal operation */
285 rspi_write8(rspi, 0x00, QSPI_SPBFCR);
286
287 /* Sets SPCMD */
288 rspi_write16(rspi, spcmd, RSPI_SPCMD0);
289
290 /* Enables SPI function in a master mode */
291 rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
292
293 return 0;
294 }
295
296 #define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
297
298 static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
299 {
300 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
301 }
302
303 static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
304 {
305 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
306 }
307
308 static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
309 u8 enable_bit)
310 {
311 int ret;
312
313 rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
314 rspi_enable_irq(rspi, enable_bit);
315 ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
316 if (ret == 0 && !(rspi->spsr & wait_mask))
317 return -ETIMEDOUT;
318
319 return 0;
320 }
321
322 static void rspi_assert_ssl(const struct rspi_data *rspi)
323 {
324 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
325 }
326
327 static void rspi_negate_ssl(const struct rspi_data *rspi)
328 {
329 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
330 }
331
332 static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
333 struct spi_transfer *t)
334 {
335 int remain = t->len;
336 const u8 *data = t->tx_buf;
337 while (remain > 0) {
338 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
339 RSPI_SPCR);
340
341 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
342 dev_err(&rspi->master->dev,
343 "%s: tx empty timeout\n", __func__);
344 return -ETIMEDOUT;
345 }
346
347 rspi_write16(rspi, *data, RSPI_SPDR);
348 data++;
349 remain--;
350 }
351
352 /* Waiting for the last transmition */
353 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
354
355 return 0;
356 }
357
358 static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
359 struct spi_transfer *t)
360 {
361 int remain = t->len;
362 const u8 *data = t->tx_buf;
363
364 rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
365 rspi_write8(rspi, 0x00, QSPI_SPBFCR);
366
367 while (remain > 0) {
368
369 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
370 dev_err(&rspi->master->dev,
371 "%s: tx empty timeout\n", __func__);
372 return -ETIMEDOUT;
373 }
374 rspi_write8(rspi, *data++, RSPI_SPDR);
375
376 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
377 dev_err(&rspi->master->dev,
378 "%s: receive timeout\n", __func__);
379 return -ETIMEDOUT;
380 }
381 rspi_read8(rspi, RSPI_SPDR);
382
383 remain--;
384 }
385
386 /* Waiting for the last transmition */
387 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
388
389 return 0;
390 }
391
392 #define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
393
394 static void rspi_dma_complete(void *arg)
395 {
396 struct rspi_data *rspi = arg;
397
398 rspi->dma_callbacked = 1;
399 wake_up_interruptible(&rspi->wait);
400 }
401
402 static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf,
403 unsigned len, struct dma_chan *chan,
404 enum dma_transfer_direction dir)
405 {
406 sg_init_table(sg, 1);
407 sg_set_buf(sg, buf, len);
408 sg_dma_len(sg) = len;
409 return dma_map_sg(chan->device->dev, sg, 1, dir);
410 }
411
412 static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
413 enum dma_transfer_direction dir)
414 {
415 dma_unmap_sg(chan->device->dev, sg, 1, dir);
416 }
417
418 static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
419 {
420 u16 *dst = buf;
421 const u8 *src = data;
422
423 while (len) {
424 *dst++ = (u16)(*src++);
425 len--;
426 }
427 }
428
429 static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
430 {
431 u8 *dst = buf;
432 const u16 *src = data;
433
434 while (len) {
435 *dst++ = (u8)*src++;
436 len--;
437 }
438 }
439
440 static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
441 {
442 struct scatterlist sg;
443 const void *buf = NULL;
444 struct dma_async_tx_descriptor *desc;
445 unsigned len;
446 int ret = 0;
447
448 if (rspi->dma_width_16bit) {
449 void *tmp;
450 /*
451 * If DMAC bus width is 16-bit, the driver allocates a dummy
452 * buffer. And, the driver converts original data into the
453 * DMAC data as the following format:
454 * original data: 1st byte, 2nd byte ...
455 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
456 */
457 len = t->len * 2;
458 tmp = kmalloc(len, GFP_KERNEL);
459 if (!tmp)
460 return -ENOMEM;
461 rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
462 buf = tmp;
463 } else {
464 len = t->len;
465 buf = t->tx_buf;
466 }
467
468 if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
469 ret = -EFAULT;
470 goto end_nomap;
471 }
472 desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
473 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
474 if (!desc) {
475 ret = -EIO;
476 goto end;
477 }
478
479 /*
480 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
481 * called. So, this driver disables the IRQ while DMA transfer.
482 */
483 disable_irq(rspi->irq);
484
485 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
486 rspi_enable_irq(rspi, SPCR_SPTIE);
487 rspi->dma_callbacked = 0;
488
489 desc->callback = rspi_dma_complete;
490 desc->callback_param = rspi;
491 dmaengine_submit(desc);
492 dma_async_issue_pending(rspi->chan_tx);
493
494 ret = wait_event_interruptible_timeout(rspi->wait,
495 rspi->dma_callbacked, HZ);
496 if (ret > 0 && rspi->dma_callbacked)
497 ret = 0;
498 else if (!ret)
499 ret = -ETIMEDOUT;
500 rspi_disable_irq(rspi, SPCR_SPTIE);
501
502 enable_irq(rspi->irq);
503
504 end:
505 rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
506 end_nomap:
507 if (rspi->dma_width_16bit)
508 kfree(buf);
509
510 return ret;
511 }
512
513 static void rspi_receive_init(const struct rspi_data *rspi)
514 {
515 unsigned char spsr;
516
517 spsr = rspi_read8(rspi, RSPI_SPSR);
518 if (spsr & SPSR_SPRF)
519 rspi_read16(rspi, RSPI_SPDR); /* dummy read */
520 if (spsr & SPSR_OVRF)
521 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
522 RSPI_SPCR);
523 }
524
525 static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
526 struct spi_transfer *t)
527 {
528 int remain = t->len;
529 u8 *data;
530
531 rspi_receive_init(rspi);
532
533 data = t->rx_buf;
534 while (remain > 0) {
535 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
536 RSPI_SPCR);
537
538 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
539 dev_err(&rspi->master->dev,
540 "%s: tx empty timeout\n", __func__);
541 return -ETIMEDOUT;
542 }
543 /* dummy write for generate clock */
544 rspi_write16(rspi, 0x00, RSPI_SPDR);
545
546 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
547 dev_err(&rspi->master->dev,
548 "%s: receive timeout\n", __func__);
549 return -ETIMEDOUT;
550 }
551 /* SPDR allows 16 or 32-bit access only */
552 *data = (u8)rspi_read16(rspi, RSPI_SPDR);
553
554 data++;
555 remain--;
556 }
557
558 return 0;
559 }
560
561 static void qspi_receive_init(const struct rspi_data *rspi)
562 {
563 unsigned char spsr;
564
565 spsr = rspi_read8(rspi, RSPI_SPSR);
566 if (spsr & SPSR_SPRF)
567 rspi_read8(rspi, RSPI_SPDR); /* dummy read */
568 rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
569 rspi_write8(rspi, 0x00, QSPI_SPBFCR);
570 }
571
572 static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
573 struct spi_transfer *t)
574 {
575 int remain = t->len;
576 u8 *data;
577
578 qspi_receive_init(rspi);
579
580 data = t->rx_buf;
581 while (remain > 0) {
582
583 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
584 dev_err(&rspi->master->dev,
585 "%s: tx empty timeout\n", __func__);
586 return -ETIMEDOUT;
587 }
588 /* dummy write for generate clock */
589 rspi_write8(rspi, 0x00, RSPI_SPDR);
590
591 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
592 dev_err(&rspi->master->dev,
593 "%s: receive timeout\n", __func__);
594 return -ETIMEDOUT;
595 }
596 /* SPDR allows 8, 16 or 32-bit access */
597 *data++ = rspi_read8(rspi, RSPI_SPDR);
598 remain--;
599 }
600
601 return 0;
602 }
603
604 #define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
605
606 static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
607 {
608 struct scatterlist sg, sg_dummy;
609 void *dummy = NULL, *rx_buf = NULL;
610 struct dma_async_tx_descriptor *desc, *desc_dummy;
611 unsigned len;
612 int ret = 0;
613
614 if (rspi->dma_width_16bit) {
615 /*
616 * If DMAC bus width is 16-bit, the driver allocates a dummy
617 * buffer. And, finally the driver converts the DMAC data into
618 * actual data as the following format:
619 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
620 * actual data: 1st byte, 2nd byte ...
621 */
622 len = t->len * 2;
623 rx_buf = kmalloc(len, GFP_KERNEL);
624 if (!rx_buf)
625 return -ENOMEM;
626 } else {
627 len = t->len;
628 rx_buf = t->rx_buf;
629 }
630
631 /* prepare dummy transfer to generate SPI clocks */
632 dummy = kzalloc(len, GFP_KERNEL);
633 if (!dummy) {
634 ret = -ENOMEM;
635 goto end_nomap;
636 }
637 if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
638 DMA_TO_DEVICE)) {
639 ret = -EFAULT;
640 goto end_nomap;
641 }
642 desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
643 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
644 if (!desc_dummy) {
645 ret = -EIO;
646 goto end_dummy_mapped;
647 }
648
649 /* prepare receive transfer */
650 if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
651 DMA_FROM_DEVICE)) {
652 ret = -EFAULT;
653 goto end_dummy_mapped;
654
655 }
656 desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
657 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
658 if (!desc) {
659 ret = -EIO;
660 goto end;
661 }
662
663 rspi_receive_init(rspi);
664
665 /*
666 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
667 * called. So, this driver disables the IRQ while DMA transfer.
668 */
669 disable_irq(rspi->irq);
670
671 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
672 rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
673 rspi->dma_callbacked = 0;
674
675 desc->callback = rspi_dma_complete;
676 desc->callback_param = rspi;
677 dmaengine_submit(desc);
678 dma_async_issue_pending(rspi->chan_rx);
679
680 desc_dummy->callback = NULL; /* No callback */
681 dmaengine_submit(desc_dummy);
682 dma_async_issue_pending(rspi->chan_tx);
683
684 ret = wait_event_interruptible_timeout(rspi->wait,
685 rspi->dma_callbacked, HZ);
686 if (ret > 0 && rspi->dma_callbacked)
687 ret = 0;
688 else if (!ret)
689 ret = -ETIMEDOUT;
690 rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
691
692 enable_irq(rspi->irq);
693
694 end:
695 rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
696 end_dummy_mapped:
697 rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
698 end_nomap:
699 if (rspi->dma_width_16bit) {
700 if (!ret)
701 rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
702 kfree(rx_buf);
703 }
704 kfree(dummy);
705
706 return ret;
707 }
708
709 static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
710 {
711 if (t->tx_buf && rspi->chan_tx)
712 return 1;
713 /* If the module receives data by DMAC, it also needs TX DMAC */
714 if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
715 return 1;
716
717 return 0;
718 }
719
720 static void rspi_work(struct work_struct *work)
721 {
722 struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
723 struct spi_message *mesg;
724 struct spi_transfer *t;
725 unsigned long flags;
726 int ret;
727
728 while (1) {
729 spin_lock_irqsave(&rspi->lock, flags);
730 if (list_empty(&rspi->queue)) {
731 spin_unlock_irqrestore(&rspi->lock, flags);
732 break;
733 }
734 mesg = list_entry(rspi->queue.next, struct spi_message, queue);
735 list_del_init(&mesg->queue);
736 spin_unlock_irqrestore(&rspi->lock, flags);
737
738 rspi_assert_ssl(rspi);
739
740 list_for_each_entry(t, &mesg->transfers, transfer_list) {
741 if (t->tx_buf) {
742 if (rspi_is_dma(rspi, t))
743 ret = rspi_send_dma(rspi, t);
744 else
745 ret = send_pio(rspi, mesg, t);
746 if (ret < 0)
747 goto error;
748 }
749 if (t->rx_buf) {
750 if (rspi_is_dma(rspi, t))
751 ret = rspi_receive_dma(rspi, t);
752 else
753 ret = receive_pio(rspi, mesg, t);
754 if (ret < 0)
755 goto error;
756 }
757 mesg->actual_length += t->len;
758 }
759 rspi_negate_ssl(rspi);
760
761 mesg->status = 0;
762 mesg->complete(mesg->context);
763 }
764
765 return;
766
767 error:
768 mesg->status = ret;
769 mesg->complete(mesg->context);
770 }
771
772 static int rspi_setup(struct spi_device *spi)
773 {
774 struct rspi_data *rspi = spi_master_get_devdata(spi->master);
775
776 if (!spi->bits_per_word)
777 spi->bits_per_word = 8;
778 rspi->max_speed_hz = spi->max_speed_hz;
779
780 set_config_register(rspi, 8);
781
782 return 0;
783 }
784
785 static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
786 {
787 struct rspi_data *rspi = spi_master_get_devdata(spi->master);
788 unsigned long flags;
789
790 mesg->actual_length = 0;
791 mesg->status = -EINPROGRESS;
792
793 spin_lock_irqsave(&rspi->lock, flags);
794 list_add_tail(&mesg->queue, &rspi->queue);
795 schedule_work(&rspi->ws);
796 spin_unlock_irqrestore(&rspi->lock, flags);
797
798 return 0;
799 }
800
801 static void rspi_cleanup(struct spi_device *spi)
802 {
803 }
804
805 static irqreturn_t rspi_irq(int irq, void *_sr)
806 {
807 struct rspi_data *rspi = _sr;
808 unsigned long spsr;
809 irqreturn_t ret = IRQ_NONE;
810 unsigned char disable_irq = 0;
811
812 rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
813 if (spsr & SPSR_SPRF)
814 disable_irq |= SPCR_SPRIE;
815 if (spsr & SPSR_SPTEF)
816 disable_irq |= SPCR_SPTIE;
817
818 if (disable_irq) {
819 ret = IRQ_HANDLED;
820 rspi_disable_irq(rspi, disable_irq);
821 wake_up(&rspi->wait);
822 }
823
824 return ret;
825 }
826
827 static int rspi_request_dma(struct rspi_data *rspi,
828 struct platform_device *pdev)
829 {
830 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
831 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
832 dma_cap_mask_t mask;
833 struct dma_slave_config cfg;
834 int ret;
835
836 if (!res || !rspi_pd)
837 return 0; /* The driver assumes no error. */
838
839 rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
840
841 /* If the module receives data by DMAC, it also needs TX DMAC */
842 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
843 dma_cap_zero(mask);
844 dma_cap_set(DMA_SLAVE, mask);
845 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
846 (void *)rspi_pd->dma_rx_id);
847 if (rspi->chan_rx) {
848 cfg.slave_id = rspi_pd->dma_rx_id;
849 cfg.direction = DMA_DEV_TO_MEM;
850 cfg.dst_addr = 0;
851 cfg.src_addr = res->start + RSPI_SPDR;
852 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
853 if (!ret)
854 dev_info(&pdev->dev, "Use DMA when rx.\n");
855 else
856 return ret;
857 }
858 }
859 if (rspi_pd->dma_tx_id) {
860 dma_cap_zero(mask);
861 dma_cap_set(DMA_SLAVE, mask);
862 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
863 (void *)rspi_pd->dma_tx_id);
864 if (rspi->chan_tx) {
865 cfg.slave_id = rspi_pd->dma_tx_id;
866 cfg.direction = DMA_MEM_TO_DEV;
867 cfg.dst_addr = res->start + RSPI_SPDR;
868 cfg.src_addr = 0;
869 ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
870 if (!ret)
871 dev_info(&pdev->dev, "Use DMA when tx\n");
872 else
873 return ret;
874 }
875 }
876
877 return 0;
878 }
879
880 static void rspi_release_dma(struct rspi_data *rspi)
881 {
882 if (rspi->chan_tx)
883 dma_release_channel(rspi->chan_tx);
884 if (rspi->chan_rx)
885 dma_release_channel(rspi->chan_rx);
886 }
887
888 static int rspi_remove(struct platform_device *pdev)
889 {
890 struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev));
891
892 spi_unregister_master(rspi->master);
893 rspi_release_dma(rspi);
894 free_irq(platform_get_irq(pdev, 0), rspi);
895 clk_put(rspi->clk);
896 iounmap(rspi->addr);
897 spi_master_put(rspi->master);
898
899 return 0;
900 }
901
902 static int rspi_probe(struct platform_device *pdev)
903 {
904 struct resource *res;
905 struct spi_master *master;
906 struct rspi_data *rspi;
907 int ret, irq;
908 char clk_name[16];
909 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
910 const struct spi_ops *ops;
911 const struct platform_device_id *id_entry = pdev->id_entry;
912
913 ops = (struct spi_ops *)id_entry->driver_data;
914 /* ops parameter check */
915 if (!ops->set_config_register) {
916 dev_err(&pdev->dev, "there is no set_config_register\n");
917 return -ENODEV;
918 }
919 /* get base addr */
920 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
921 if (unlikely(res == NULL)) {
922 dev_err(&pdev->dev, "invalid resource\n");
923 return -EINVAL;
924 }
925
926 irq = platform_get_irq(pdev, 0);
927 if (irq < 0) {
928 dev_err(&pdev->dev, "platform_get_irq error\n");
929 return -ENODEV;
930 }
931
932 master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
933 if (master == NULL) {
934 dev_err(&pdev->dev, "spi_alloc_master error.\n");
935 return -ENOMEM;
936 }
937
938 rspi = spi_master_get_devdata(master);
939 platform_set_drvdata(pdev, rspi);
940 rspi->ops = ops;
941 rspi->master = master;
942 rspi->addr = ioremap(res->start, resource_size(res));
943 if (rspi->addr == NULL) {
944 dev_err(&pdev->dev, "ioremap error.\n");
945 ret = -ENOMEM;
946 goto error1;
947 }
948
949 snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
950 rspi->clk = clk_get(&pdev->dev, clk_name);
951 if (IS_ERR(rspi->clk)) {
952 dev_err(&pdev->dev, "cannot get clock\n");
953 ret = PTR_ERR(rspi->clk);
954 goto error2;
955 }
956 clk_enable(rspi->clk);
957
958 INIT_LIST_HEAD(&rspi->queue);
959 spin_lock_init(&rspi->lock);
960 INIT_WORK(&rspi->ws, rspi_work);
961 init_waitqueue_head(&rspi->wait);
962
963 master->num_chipselect = rspi_pd->num_chipselect;
964 if (!master->num_chipselect)
965 master->num_chipselect = 2; /* default */
966
967 master->bus_num = pdev->id;
968 master->setup = rspi_setup;
969 master->transfer = rspi_transfer;
970 master->cleanup = rspi_cleanup;
971
972 ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
973 if (ret < 0) {
974 dev_err(&pdev->dev, "request_irq error\n");
975 goto error3;
976 }
977
978 rspi->irq = irq;
979 ret = rspi_request_dma(rspi, pdev);
980 if (ret < 0) {
981 dev_err(&pdev->dev, "rspi_request_dma failed.\n");
982 goto error4;
983 }
984
985 ret = spi_register_master(master);
986 if (ret < 0) {
987 dev_err(&pdev->dev, "spi_register_master error.\n");
988 goto error4;
989 }
990
991 dev_info(&pdev->dev, "probed\n");
992
993 return 0;
994
995 error4:
996 rspi_release_dma(rspi);
997 free_irq(irq, rspi);
998 error3:
999 clk_put(rspi->clk);
1000 error2:
1001 iounmap(rspi->addr);
1002 error1:
1003 spi_master_put(master);
1004
1005 return ret;
1006 }
1007
1008 static struct spi_ops rspi_ops = {
1009 .set_config_register = rspi_set_config_register,
1010 .send_pio = rspi_send_pio,
1011 .receive_pio = rspi_receive_pio,
1012 };
1013
1014 static struct spi_ops qspi_ops = {
1015 .set_config_register = qspi_set_config_register,
1016 .send_pio = qspi_send_pio,
1017 .receive_pio = qspi_receive_pio,
1018 };
1019
1020 static struct platform_device_id spi_driver_ids[] = {
1021 { "rspi", (kernel_ulong_t)&rspi_ops },
1022 { "qspi", (kernel_ulong_t)&qspi_ops },
1023 {},
1024 };
1025
1026 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1027
1028 static struct platform_driver rspi_driver = {
1029 .probe = rspi_probe,
1030 .remove = rspi_remove,
1031 .id_table = spi_driver_ids,
1032 .driver = {
1033 .name = "renesas_spi",
1034 .owner = THIS_MODULE,
1035 },
1036 };
1037 module_platform_driver(rspi_driver);
1038
1039 MODULE_DESCRIPTION("Renesas RSPI bus driver");
1040 MODULE_LICENSE("GPL v2");
1041 MODULE_AUTHOR("Yoshihiro Shimoda");
1042 MODULE_ALIAS("platform:rspi");
This page took 0.107008 seconds and 6 git commands to generate.