2 * Freescale SPI controller driver cpm functions.
4 * Maintainer: Kumar Gala
6 * Copyright (C) 2006 Polycom, Inc.
7 * Copyright 2010 Freescale Semiconductor, Inc.
9 * CPM SPI and QE buffer descriptors mode support:
10 * Copyright (c) 2009 MontaVista Software, Inc.
11 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
20 #include <linux/dma-mapping.h>
21 #include <linux/fsl_devices.h>
22 #include <linux/kernel.h>
23 #include <linux/of_address.h>
24 #include <linux/spi/spi.h>
25 #include <linux/types.h>
27 #include "spi-fsl-cpm.h"
28 #include "spi-fsl-lib.h"
29 #include "spi-fsl-spi.h"
31 /* CPM1 and CPM2 are mutually exclusive. */
34 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
37 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
40 #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
41 #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
43 /* SPCOM register values */
44 #define SPCOM_STR (1 << 23) /* Start transmit */
46 #define SPI_PRAM_SIZE 0x100
47 #define SPI_MRBLR ((unsigned int)PAGE_SIZE)
49 static void *fsl_dummy_rx
;
50 static DEFINE_MUTEX(fsl_dummy_rx_lock
);
51 static int fsl_dummy_rx_refcnt
;
53 void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi
*mspi
)
55 if (mspi
->flags
& SPI_QE
) {
56 qe_issue_cmd(QE_INIT_TX_RX
, mspi
->subblock
,
57 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
59 if (mspi
->flags
& SPI_CPM1
) {
60 out_be32(&mspi
->pram
->rstate
, 0);
61 out_be16(&mspi
->pram
->rbptr
,
62 in_be16(&mspi
->pram
->rbase
));
63 out_be32(&mspi
->pram
->tstate
, 0);
64 out_be16(&mspi
->pram
->tbptr
,
65 in_be16(&mspi
->pram
->tbase
));
67 cpm_command(CPM_SPI_CMD
, CPM_CR_INIT_TRX
);
72 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi
*mspi
)
74 struct cpm_buf_desc __iomem
*tx_bd
= mspi
->tx_bd
;
75 struct cpm_buf_desc __iomem
*rx_bd
= mspi
->rx_bd
;
76 unsigned int xfer_len
= min(mspi
->count
, SPI_MRBLR
);
77 unsigned int xfer_ofs
;
78 struct fsl_spi_reg
*reg_base
= mspi
->reg_base
;
80 xfer_ofs
= mspi
->xfer_in_progress
->len
- mspi
->count
;
82 if (mspi
->rx_dma
== mspi
->dma_dummy_rx
)
83 out_be32(&rx_bd
->cbd_bufaddr
, mspi
->rx_dma
);
85 out_be32(&rx_bd
->cbd_bufaddr
, mspi
->rx_dma
+ xfer_ofs
);
86 out_be16(&rx_bd
->cbd_datlen
, 0);
87 out_be16(&rx_bd
->cbd_sc
, BD_SC_EMPTY
| BD_SC_INTRPT
| BD_SC_WRAP
);
89 if (mspi
->tx_dma
== mspi
->dma_dummy_tx
)
90 out_be32(&tx_bd
->cbd_bufaddr
, mspi
->tx_dma
);
92 out_be32(&tx_bd
->cbd_bufaddr
, mspi
->tx_dma
+ xfer_ofs
);
93 out_be16(&tx_bd
->cbd_datlen
, xfer_len
);
94 out_be16(&tx_bd
->cbd_sc
, BD_SC_READY
| BD_SC_INTRPT
| BD_SC_WRAP
|
98 mpc8xxx_spi_write_reg(®_base
->command
, SPCOM_STR
);
101 int fsl_spi_cpm_bufs(struct mpc8xxx_spi
*mspi
,
102 struct spi_transfer
*t
, bool is_dma_mapped
)
104 struct device
*dev
= mspi
->dev
;
105 struct fsl_spi_reg
*reg_base
= mspi
->reg_base
;
108 mspi
->map_tx_dma
= 0;
109 mspi
->map_rx_dma
= 0;
111 mspi
->map_tx_dma
= 1;
112 mspi
->map_rx_dma
= 1;
116 mspi
->tx_dma
= mspi
->dma_dummy_tx
;
117 mspi
->map_tx_dma
= 0;
121 mspi
->rx_dma
= mspi
->dma_dummy_rx
;
122 mspi
->map_rx_dma
= 0;
125 if (mspi
->map_tx_dma
) {
126 void *nonconst_tx
= (void *)mspi
->tx
; /* shut up gcc */
128 mspi
->tx_dma
= dma_map_single(dev
, nonconst_tx
, t
->len
,
130 if (dma_mapping_error(dev
, mspi
->tx_dma
)) {
131 dev_err(dev
, "unable to map tx dma\n");
134 } else if (t
->tx_buf
) {
135 mspi
->tx_dma
= t
->tx_dma
;
138 if (mspi
->map_rx_dma
) {
139 mspi
->rx_dma
= dma_map_single(dev
, mspi
->rx
, t
->len
,
141 if (dma_mapping_error(dev
, mspi
->rx_dma
)) {
142 dev_err(dev
, "unable to map rx dma\n");
145 } else if (t
->rx_buf
) {
146 mspi
->rx_dma
= t
->rx_dma
;
150 mpc8xxx_spi_write_reg(®_base
->mask
, SPIE_RXB
);
152 mspi
->xfer_in_progress
= t
;
153 mspi
->count
= t
->len
;
155 /* start CPM transfers */
156 fsl_spi_cpm_bufs_start(mspi
);
161 if (mspi
->map_tx_dma
)
162 dma_unmap_single(dev
, mspi
->tx_dma
, t
->len
, DMA_TO_DEVICE
);
166 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi
*mspi
)
168 struct device
*dev
= mspi
->dev
;
169 struct spi_transfer
*t
= mspi
->xfer_in_progress
;
171 if (mspi
->map_tx_dma
)
172 dma_unmap_single(dev
, mspi
->tx_dma
, t
->len
, DMA_TO_DEVICE
);
173 if (mspi
->map_rx_dma
)
174 dma_unmap_single(dev
, mspi
->rx_dma
, t
->len
, DMA_FROM_DEVICE
);
175 mspi
->xfer_in_progress
= NULL
;
178 void fsl_spi_cpm_irq(struct mpc8xxx_spi
*mspi
, u32 events
)
181 struct fsl_spi_reg
*reg_base
= mspi
->reg_base
;
183 dev_dbg(mspi
->dev
, "%s: bd datlen %d, count %d\n", __func__
,
184 in_be16(&mspi
->rx_bd
->cbd_datlen
), mspi
->count
);
186 len
= in_be16(&mspi
->rx_bd
->cbd_datlen
);
187 if (len
> mspi
->count
) {
192 /* Clear the events */
193 mpc8xxx_spi_write_reg(®_base
->event
, events
);
197 fsl_spi_cpm_bufs_start(mspi
);
199 complete(&mspi
->done
);
202 static void *fsl_spi_alloc_dummy_rx(void)
204 mutex_lock(&fsl_dummy_rx_lock
);
207 fsl_dummy_rx
= kmalloc(SPI_MRBLR
, GFP_KERNEL
);
209 fsl_dummy_rx_refcnt
++;
211 mutex_unlock(&fsl_dummy_rx_lock
);
216 static void fsl_spi_free_dummy_rx(void)
218 mutex_lock(&fsl_dummy_rx_lock
);
220 switch (fsl_dummy_rx_refcnt
) {
229 fsl_dummy_rx_refcnt
--;
233 mutex_unlock(&fsl_dummy_rx_lock
);
236 static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi
*mspi
)
238 struct device
*dev
= mspi
->dev
;
239 struct device_node
*np
= dev
->of_node
;
242 void __iomem
*spi_base
;
243 unsigned long pram_ofs
= -ENOMEM
;
245 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
246 iprop
= of_get_property(np
, "reg", &size
);
248 /* QE with a fixed pram location? */
249 if (mspi
->flags
& SPI_QE
&& iprop
&& size
== sizeof(*iprop
) * 4)
250 return cpm_muram_alloc_fixed(iprop
[2], SPI_PRAM_SIZE
);
252 /* QE but with a dynamic pram location? */
253 if (mspi
->flags
& SPI_QE
) {
254 pram_ofs
= cpm_muram_alloc(SPI_PRAM_SIZE
, 64);
255 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, mspi
->subblock
,
256 QE_CR_PROTOCOL_UNSPECIFIED
, pram_ofs
);
260 spi_base
= of_iomap(np
, 1);
261 if (spi_base
== NULL
)
264 if (mspi
->flags
& SPI_CPM2
) {
265 pram_ofs
= cpm_muram_alloc(SPI_PRAM_SIZE
, 64);
266 out_be16(spi_base
, pram_ofs
);
268 struct spi_pram __iomem
*pram
= spi_base
;
269 u16 rpbase
= in_be16(&pram
->rpbase
);
271 /* Microcode relocation patch applied? */
275 pram_ofs
= cpm_muram_alloc(SPI_PRAM_SIZE
, 64);
276 out_be16(spi_base
, pram_ofs
);
284 int fsl_spi_cpm_init(struct mpc8xxx_spi
*mspi
)
286 struct device
*dev
= mspi
->dev
;
287 struct device_node
*np
= dev
->of_node
;
290 unsigned long pram_ofs
;
291 unsigned long bds_ofs
;
293 if (!(mspi
->flags
& SPI_CPM_MODE
))
296 if (!fsl_spi_alloc_dummy_rx())
299 if (mspi
->flags
& SPI_QE
) {
300 iprop
= of_get_property(np
, "cell-index", &size
);
301 if (iprop
&& size
== sizeof(*iprop
))
302 mspi
->subblock
= *iprop
;
304 switch (mspi
->subblock
) {
306 dev_warn(dev
, "cell-index unspecified, assuming SPI1\n");
309 mspi
->subblock
= QE_CR_SUBBLOCK_SPI1
;
312 mspi
->subblock
= QE_CR_SUBBLOCK_SPI2
;
317 pram_ofs
= fsl_spi_cpm_get_pram(mspi
);
318 if (IS_ERR_VALUE(pram_ofs
)) {
319 dev_err(dev
, "can't allocate spi parameter ram\n");
323 bds_ofs
= cpm_muram_alloc(sizeof(*mspi
->tx_bd
) +
324 sizeof(*mspi
->rx_bd
), 8);
325 if (IS_ERR_VALUE(bds_ofs
)) {
326 dev_err(dev
, "can't allocate bds\n");
330 mspi
->dma_dummy_tx
= dma_map_single(dev
, empty_zero_page
, PAGE_SIZE
,
332 if (dma_mapping_error(dev
, mspi
->dma_dummy_tx
)) {
333 dev_err(dev
, "unable to map dummy tx buffer\n");
337 mspi
->dma_dummy_rx
= dma_map_single(dev
, fsl_dummy_rx
, SPI_MRBLR
,
339 if (dma_mapping_error(dev
, mspi
->dma_dummy_rx
)) {
340 dev_err(dev
, "unable to map dummy rx buffer\n");
344 mspi
->pram
= cpm_muram_addr(pram_ofs
);
346 mspi
->tx_bd
= cpm_muram_addr(bds_ofs
);
347 mspi
->rx_bd
= cpm_muram_addr(bds_ofs
+ sizeof(*mspi
->tx_bd
));
349 /* Initialize parameter ram. */
350 out_be16(&mspi
->pram
->tbase
, cpm_muram_offset(mspi
->tx_bd
));
351 out_be16(&mspi
->pram
->rbase
, cpm_muram_offset(mspi
->rx_bd
));
352 out_8(&mspi
->pram
->tfcr
, CPMFCR_EB
| CPMFCR_GBL
);
353 out_8(&mspi
->pram
->rfcr
, CPMFCR_EB
| CPMFCR_GBL
);
354 out_be16(&mspi
->pram
->mrblr
, SPI_MRBLR
);
355 out_be32(&mspi
->pram
->rstate
, 0);
356 out_be32(&mspi
->pram
->rdp
, 0);
357 out_be16(&mspi
->pram
->rbptr
, 0);
358 out_be16(&mspi
->pram
->rbc
, 0);
359 out_be32(&mspi
->pram
->rxtmp
, 0);
360 out_be32(&mspi
->pram
->tstate
, 0);
361 out_be32(&mspi
->pram
->tdp
, 0);
362 out_be16(&mspi
->pram
->tbptr
, 0);
363 out_be16(&mspi
->pram
->tbc
, 0);
364 out_be32(&mspi
->pram
->txtmp
, 0);
369 dma_unmap_single(dev
, mspi
->dma_dummy_tx
, PAGE_SIZE
, DMA_TO_DEVICE
);
371 cpm_muram_free(bds_ofs
);
373 cpm_muram_free(pram_ofs
);
375 fsl_spi_free_dummy_rx();
379 void fsl_spi_cpm_free(struct mpc8xxx_spi
*mspi
)
381 struct device
*dev
= mspi
->dev
;
383 if (!(mspi
->flags
& SPI_CPM_MODE
))
386 dma_unmap_single(dev
, mspi
->dma_dummy_rx
, SPI_MRBLR
, DMA_FROM_DEVICE
);
387 dma_unmap_single(dev
, mspi
->dma_dummy_tx
, PAGE_SIZE
, DMA_TO_DEVICE
);
388 cpm_muram_free(cpm_muram_offset(mspi
->tx_bd
));
389 cpm_muram_free(cpm_muram_offset(mspi
->pram
));
390 fsl_spi_free_dummy_rx();