Merge remote-tracking branches 'spi/topic/img-spfi', 'spi/topic/imx', 'spi/topic...
[deliverable/linux.git] / drivers / spi / spi-pxa2xx-pxadma.c
CommitLineData
cd7bed00
MW
1/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
cd7bed00
MW
15 */
16
cd7bed00
MW
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/pxa2xx_ssp.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/pxa2xx_spi.h>
23
24#include "spi-pxa2xx.h"
25
26#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
27#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
28
29bool pxa2xx_spi_dma_is_possible(size_t len)
30{
31 /* Try to map dma buffer and do a dma transfer if successful, but
32 * only if the length is non-zero and less than MAX_DMA_LEN.
33 *
34 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
35 * of PIO instead. Care is needed above because the transfer may
36 * have have been passed with buffers that are already dma mapped.
37 * A zero-length transfer in PIO mode will not try to write/read
38 * to/from the buffers
39 *
40 * REVISIT large transfers are exactly where we most want to be
41 * using DMA. If this happens much, split those transfers into
42 * multiple DMA segments rather than forcing PIO.
43 */
44 return len > 0 && len <= MAX_DMA_LEN;
45}
46
47int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
48{
49 struct spi_message *msg = drv_data->cur_msg;
50 struct device *dev = &msg->spi->dev;
51
52 if (!drv_data->cur_chip->enable_dma)
53 return 0;
54
55 if (msg->is_dma_mapped)
56 return drv_data->rx_dma && drv_data->tx_dma;
57
58 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
59 return 0;
60
61 /* Modify setup if rx buffer is null */
62 if (drv_data->rx == NULL) {
63 *drv_data->null_dma_buf = 0;
64 drv_data->rx = drv_data->null_dma_buf;
65 drv_data->rx_map_len = 4;
66 } else
67 drv_data->rx_map_len = drv_data->len;
68
69
70 /* Modify setup if tx buffer is null */
71 if (drv_data->tx == NULL) {
72 *drv_data->null_dma_buf = 0;
73 drv_data->tx = drv_data->null_dma_buf;
74 drv_data->tx_map_len = 4;
75 } else
76 drv_data->tx_map_len = drv_data->len;
77
78 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
79 * so we flush the cache *before* invalidating it, in case
80 * the tx and rx buffers overlap.
81 */
82 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
83 drv_data->tx_map_len, DMA_TO_DEVICE);
84 if (dma_mapping_error(dev, drv_data->tx_dma))
85 return 0;
86
87 /* Stream map the rx buffer */
88 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
89 drv_data->rx_map_len, DMA_FROM_DEVICE);
90 if (dma_mapping_error(dev, drv_data->rx_dma)) {
91 dma_unmap_single(dev, drv_data->tx_dma,
92 drv_data->tx_map_len, DMA_TO_DEVICE);
93 return 0;
94 }
95
96 return 1;
97}
98
99static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
100{
101 struct device *dev;
102
103 if (!drv_data->dma_mapped)
104 return;
105
106 if (!drv_data->cur_msg->is_dma_mapped) {
107 dev = &drv_data->cur_msg->spi->dev;
108 dma_unmap_single(dev, drv_data->rx_dma,
109 drv_data->rx_map_len, DMA_FROM_DEVICE);
110 dma_unmap_single(dev, drv_data->tx_dma,
111 drv_data->tx_map_len, DMA_TO_DEVICE);
112 }
113
114 drv_data->dma_mapped = 0;
115}
116
117static int wait_ssp_rx_stall(void const __iomem *ioaddr)
118{
119 unsigned long limit = loops_per_jiffy << 1;
120
121 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
122 cpu_relax();
123
124 return limit;
125}
126
127static int wait_dma_channel_stop(int channel)
128{
129 unsigned long limit = loops_per_jiffy << 1;
130
131 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
132 cpu_relax();
133
134 return limit;
135}
136
137static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
138 const char *msg)
139{
140 void __iomem *reg = drv_data->ioaddr;
141
142 /* Stop and reset */
143 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
144 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
145 write_SSSR_CS(drv_data, drv_data->clear_sr);
146 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
147 if (!pxa25x_ssp_comp(drv_data))
148 write_SSTO(0, reg);
149 pxa2xx_spi_flush(drv_data);
150 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
151
152 pxa2xx_spi_unmap_dma_buffers(drv_data);
153
154 dev_err(&drv_data->pdev->dev, "%s\n", msg);
155
156 drv_data->cur_msg->state = ERROR_STATE;
157 tasklet_schedule(&drv_data->pump_transfers);
158}
159
160static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
161{
162 void __iomem *reg = drv_data->ioaddr;
163 struct spi_message *msg = drv_data->cur_msg;
164
165 /* Clear and disable interrupts on SSP and DMA channels*/
166 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
167 write_SSSR_CS(drv_data, drv_data->clear_sr);
168 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
169 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
170
171 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
172 dev_err(&drv_data->pdev->dev,
173 "dma_handler: dma rx channel stop failed\n");
174
175 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
176 dev_err(&drv_data->pdev->dev,
177 "dma_transfer: ssp rx stall failed\n");
178
179 pxa2xx_spi_unmap_dma_buffers(drv_data);
180
181 /* update the buffer pointer for the amount completed in dma */
182 drv_data->rx += drv_data->len -
183 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
184
185 /* read trailing data from fifo, it does not matter how many
186 * bytes are in the fifo just read until buffer is full
187 * or fifo is empty, which ever occurs first */
188 drv_data->read(drv_data);
189
190 /* return count of what was actually read */
191 msg->actual_length += drv_data->len -
192 (drv_data->rx_end - drv_data->rx);
193
194 /* Transfer delays and chip select release are
195 * handled in pump_transfers or giveback
196 */
197
198 /* Move to next transfer */
199 msg->state = pxa2xx_spi_next_transfer(drv_data);
200
201 /* Schedule transfer tasklet */
202 tasklet_schedule(&drv_data->pump_transfers);
203}
204
205void pxa2xx_spi_dma_handler(int channel, void *data)
206{
207 struct driver_data *drv_data = data;
208 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
209
210 if (irq_status & DCSR_BUSERR) {
211
212 if (channel == drv_data->tx_channel)
213 pxa2xx_spi_dma_error_stop(drv_data,
214 "dma_handler: bad bus address on tx channel");
215 else
216 pxa2xx_spi_dma_error_stop(drv_data,
217 "dma_handler: bad bus address on rx channel");
218 return;
219 }
220
221 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
222 if ((channel == drv_data->tx_channel)
223 && (irq_status & DCSR_ENDINTR)
224 && (drv_data->ssp_type == PXA25x_SSP)) {
225
226 /* Wait for rx to stall */
227 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
228 dev_err(&drv_data->pdev->dev,
229 "dma_handler: ssp rx stall failed\n");
230
231 /* finish this transfer, start the next */
232 pxa2xx_spi_dma_transfer_complete(drv_data);
233 }
234}
235
236irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
237{
238 u32 irq_status;
239 void __iomem *reg = drv_data->ioaddr;
240
241 irq_status = read_SSSR(reg) & drv_data->mask_sr;
242 if (irq_status & SSSR_ROR) {
243 pxa2xx_spi_dma_error_stop(drv_data,
244 "dma_transfer: fifo overrun");
245 return IRQ_HANDLED;
246 }
247
248 /* Check for false positive timeout */
249 if ((irq_status & SSSR_TINT)
250 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
251 write_SSSR(SSSR_TINT, reg);
252 return IRQ_HANDLED;
253 }
254
255 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
256
257 /* Clear and disable timeout interrupt, do the rest in
258 * dma_transfer_complete */
259 if (!pxa25x_ssp_comp(drv_data))
260 write_SSTO(0, reg);
261
262 /* finish this transfer, start the next */
263 pxa2xx_spi_dma_transfer_complete(drv_data);
264
265 return IRQ_HANDLED;
266 }
267
268 /* Opps problem detected */
269 return IRQ_NONE;
270}
271
272int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
273{
274 u32 dma_width;
275
276 switch (drv_data->n_bytes) {
277 case 1:
278 dma_width = DCMD_WIDTH1;
279 break;
280 case 2:
281 dma_width = DCMD_WIDTH2;
282 break;
283 default:
284 dma_width = DCMD_WIDTH4;
285 break;
286 }
287
288 /* Setup rx DMA Channel */
289 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
290 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
291 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
292 if (drv_data->rx == drv_data->null_dma_buf)
293 /* No target address increment */
294 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
295 | dma_width
296 | dma_burst
297 | drv_data->len;
298 else
299 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
300 | DCMD_FLOWSRC
301 | dma_width
302 | dma_burst
303 | drv_data->len;
304
305 /* Setup tx DMA Channel */
306 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
307 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
308 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
309 if (drv_data->tx == drv_data->null_dma_buf)
310 /* No source address increment */
311 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
312 | dma_width
313 | dma_burst
314 | drv_data->len;
315 else
316 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
317 | DCMD_FLOWTRG
318 | dma_width
319 | dma_burst
320 | drv_data->len;
321
322 /* Enable dma end irqs on SSP to detect end of transfer */
323 if (drv_data->ssp_type == PXA25x_SSP)
324 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
325
326 return 0;
327}
328
329void pxa2xx_spi_dma_start(struct driver_data *drv_data)
330{
331 DCSR(drv_data->rx_channel) |= DCSR_RUN;
332 DCSR(drv_data->tx_channel) |= DCSR_RUN;
333}
334
335int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
336{
337 struct device *dev = &drv_data->pdev->dev;
338 struct ssp_device *ssp = drv_data->ssp;
339
340 /* Get two DMA channels (rx and tx) */
341 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
342 DMA_PRIO_HIGH,
343 pxa2xx_spi_dma_handler,
344 drv_data);
345 if (drv_data->rx_channel < 0) {
346 dev_err(dev, "problem (%d) requesting rx channel\n",
347 drv_data->rx_channel);
348 return -ENODEV;
349 }
350 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
351 DMA_PRIO_MEDIUM,
352 pxa2xx_spi_dma_handler,
353 drv_data);
354 if (drv_data->tx_channel < 0) {
355 dev_err(dev, "problem (%d) requesting tx channel\n",
356 drv_data->tx_channel);
357 pxa_free_dma(drv_data->rx_channel);
358 return -ENODEV;
359 }
360
361 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
362 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
363
364 return 0;
365}
366
367void pxa2xx_spi_dma_release(struct driver_data *drv_data)
368{
369 struct ssp_device *ssp = drv_data->ssp;
370
371 DRCMR(ssp->drcmr_rx) = 0;
372 DRCMR(ssp->drcmr_tx) = 0;
373
374 if (drv_data->tx_channel != 0)
375 pxa_free_dma(drv_data->tx_channel);
376 if (drv_data->rx_channel != 0)
377 pxa_free_dma(drv_data->rx_channel);
378}
379
380void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
381{
382 if (drv_data->rx_channel != -1)
383 DRCMR(drv_data->ssp->drcmr_rx) =
384 DRCMR_MAPVLD | drv_data->rx_channel;
385 if (drv_data->tx_channel != -1)
386 DRCMR(drv_data->ssp->drcmr_tx) =
387 DRCMR_MAPVLD | drv_data->tx_channel;
388}
389
390int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
391 struct spi_device *spi,
392 u8 bits_per_word, u32 *burst_code,
393 u32 *threshold)
394{
395 struct pxa2xx_spi_chip *chip_info =
396 (struct pxa2xx_spi_chip *)spi->controller_data;
397 int bytes_per_word;
398 int burst_bytes;
399 int thresh_words;
400 int req_burst_size;
401 int retval = 0;
402
403 /* Set the threshold (in registers) to equal the same amount of data
404 * as represented by burst size (in bytes). The computation below
405 * is (burst_size rounded up to nearest 8 byte, word or long word)
406 * divided by (bytes/register); the tx threshold is the inverse of
407 * the rx, so that there will always be enough data in the rx fifo
408 * to satisfy a burst, and there will always be enough space in the
409 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
410 * there is not enough space), there must always remain enough empty
411 * space in the rx fifo for any data loaded to the tx fifo.
412 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
413 * will be 8, or half the fifo;
414 * The threshold can only be set to 2, 4 or 8, but not 16, because
415 * to burst 16 to the tx fifo, the fifo would have to be empty;
416 * however, the minimum fifo trigger level is 1, and the tx will
417 * request service when the fifo is at this level, with only 15 spaces.
418 */
419
420 /* find bytes/word */
421 if (bits_per_word <= 8)
422 bytes_per_word = 1;
423 else if (bits_per_word <= 16)
424 bytes_per_word = 2;
425 else
426 bytes_per_word = 4;
427
428 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
429 if (chip_info)
430 req_burst_size = chip_info->dma_burst_size;
431 else {
432 switch (chip->dma_burst_size) {
433 default:
434 /* if the default burst size is not set,
435 * do it now */
436 chip->dma_burst_size = DCMD_BURST8;
437 case DCMD_BURST8:
438 req_burst_size = 8;
439 break;
440 case DCMD_BURST16:
441 req_burst_size = 16;
442 break;
443 case DCMD_BURST32:
444 req_burst_size = 32;
445 break;
446 }
447 }
448 if (req_burst_size <= 8) {
449 *burst_code = DCMD_BURST8;
450 burst_bytes = 8;
451 } else if (req_burst_size <= 16) {
452 if (bytes_per_word == 1) {
453 /* don't burst more than 1/2 the fifo */
454 *burst_code = DCMD_BURST8;
455 burst_bytes = 8;
456 retval = 1;
457 } else {
458 *burst_code = DCMD_BURST16;
459 burst_bytes = 16;
460 }
461 } else {
462 if (bytes_per_word == 1) {
463 /* don't burst more than 1/2 the fifo */
464 *burst_code = DCMD_BURST8;
465 burst_bytes = 8;
466 retval = 1;
467 } else if (bytes_per_word == 2) {
468 /* don't burst more than 1/2 the fifo */
469 *burst_code = DCMD_BURST16;
470 burst_bytes = 16;
471 retval = 1;
472 } else {
473 *burst_code = DCMD_BURST32;
474 burst_bytes = 32;
475 }
476 }
477
478 thresh_words = burst_bytes / bytes_per_word;
479
480 /* thresh_words will be between 2 and 8 */
481 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
482 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
483
484 return retval;
485}
This page took 0.149021 seconds and 5 git commands to generate.