Commit | Line | Data |
---|---|---|
cd7bed00 MW |
1 | /* |
2 | * PXA2xx SPI private DMA support. | |
3 | * | |
4 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
19 | */ | |
20 | ||
cd7bed00 MW |
21 | #include <linux/delay.h> |
22 | #include <linux/device.h> | |
23 | #include <linux/dma-mapping.h> | |
24 | #include <linux/pxa2xx_ssp.h> | |
25 | #include <linux/spi/spi.h> | |
26 | #include <linux/spi/pxa2xx_spi.h> | |
27 | ||
28 | #include "spi-pxa2xx.h" | |
29 | ||
30 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | |
31 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | |
32 | ||
33 | bool pxa2xx_spi_dma_is_possible(size_t len) | |
34 | { | |
35 | /* Try to map dma buffer and do a dma transfer if successful, but | |
36 | * only if the length is non-zero and less than MAX_DMA_LEN. | |
37 | * | |
38 | * Zero-length non-descriptor DMA is illegal on PXA2xx; force use | |
39 | * of PIO instead. Care is needed above because the transfer may | |
40 | * have have been passed with buffers that are already dma mapped. | |
41 | * A zero-length transfer in PIO mode will not try to write/read | |
42 | * to/from the buffers | |
43 | * | |
44 | * REVISIT large transfers are exactly where we most want to be | |
45 | * using DMA. If this happens much, split those transfers into | |
46 | * multiple DMA segments rather than forcing PIO. | |
47 | */ | |
48 | return len > 0 && len <= MAX_DMA_LEN; | |
49 | } | |
50 | ||
51 | int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) | |
52 | { | |
53 | struct spi_message *msg = drv_data->cur_msg; | |
54 | struct device *dev = &msg->spi->dev; | |
55 | ||
56 | if (!drv_data->cur_chip->enable_dma) | |
57 | return 0; | |
58 | ||
59 | if (msg->is_dma_mapped) | |
60 | return drv_data->rx_dma && drv_data->tx_dma; | |
61 | ||
62 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | |
63 | return 0; | |
64 | ||
65 | /* Modify setup if rx buffer is null */ | |
66 | if (drv_data->rx == NULL) { | |
67 | *drv_data->null_dma_buf = 0; | |
68 | drv_data->rx = drv_data->null_dma_buf; | |
69 | drv_data->rx_map_len = 4; | |
70 | } else | |
71 | drv_data->rx_map_len = drv_data->len; | |
72 | ||
73 | ||
74 | /* Modify setup if tx buffer is null */ | |
75 | if (drv_data->tx == NULL) { | |
76 | *drv_data->null_dma_buf = 0; | |
77 | drv_data->tx = drv_data->null_dma_buf; | |
78 | drv_data->tx_map_len = 4; | |
79 | } else | |
80 | drv_data->tx_map_len = drv_data->len; | |
81 | ||
82 | /* Stream map the tx buffer. Always do DMA_TO_DEVICE first | |
83 | * so we flush the cache *before* invalidating it, in case | |
84 | * the tx and rx buffers overlap. | |
85 | */ | |
86 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | |
87 | drv_data->tx_map_len, DMA_TO_DEVICE); | |
88 | if (dma_mapping_error(dev, drv_data->tx_dma)) | |
89 | return 0; | |
90 | ||
91 | /* Stream map the rx buffer */ | |
92 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | |
93 | drv_data->rx_map_len, DMA_FROM_DEVICE); | |
94 | if (dma_mapping_error(dev, drv_data->rx_dma)) { | |
95 | dma_unmap_single(dev, drv_data->tx_dma, | |
96 | drv_data->tx_map_len, DMA_TO_DEVICE); | |
97 | return 0; | |
98 | } | |
99 | ||
100 | return 1; | |
101 | } | |
102 | ||
103 | static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) | |
104 | { | |
105 | struct device *dev; | |
106 | ||
107 | if (!drv_data->dma_mapped) | |
108 | return; | |
109 | ||
110 | if (!drv_data->cur_msg->is_dma_mapped) { | |
111 | dev = &drv_data->cur_msg->spi->dev; | |
112 | dma_unmap_single(dev, drv_data->rx_dma, | |
113 | drv_data->rx_map_len, DMA_FROM_DEVICE); | |
114 | dma_unmap_single(dev, drv_data->tx_dma, | |
115 | drv_data->tx_map_len, DMA_TO_DEVICE); | |
116 | } | |
117 | ||
118 | drv_data->dma_mapped = 0; | |
119 | } | |
120 | ||
121 | static int wait_ssp_rx_stall(void const __iomem *ioaddr) | |
122 | { | |
123 | unsigned long limit = loops_per_jiffy << 1; | |
124 | ||
125 | while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) | |
126 | cpu_relax(); | |
127 | ||
128 | return limit; | |
129 | } | |
130 | ||
131 | static int wait_dma_channel_stop(int channel) | |
132 | { | |
133 | unsigned long limit = loops_per_jiffy << 1; | |
134 | ||
135 | while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) | |
136 | cpu_relax(); | |
137 | ||
138 | return limit; | |
139 | } | |
140 | ||
141 | static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, | |
142 | const char *msg) | |
143 | { | |
144 | void __iomem *reg = drv_data->ioaddr; | |
145 | ||
146 | /* Stop and reset */ | |
147 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | |
148 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | |
149 | write_SSSR_CS(drv_data, drv_data->clear_sr); | |
150 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | |
151 | if (!pxa25x_ssp_comp(drv_data)) | |
152 | write_SSTO(0, reg); | |
153 | pxa2xx_spi_flush(drv_data); | |
154 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | |
155 | ||
156 | pxa2xx_spi_unmap_dma_buffers(drv_data); | |
157 | ||
158 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | |
159 | ||
160 | drv_data->cur_msg->state = ERROR_STATE; | |
161 | tasklet_schedule(&drv_data->pump_transfers); | |
162 | } | |
163 | ||
164 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) | |
165 | { | |
166 | void __iomem *reg = drv_data->ioaddr; | |
167 | struct spi_message *msg = drv_data->cur_msg; | |
168 | ||
169 | /* Clear and disable interrupts on SSP and DMA channels*/ | |
170 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | |
171 | write_SSSR_CS(drv_data, drv_data->clear_sr); | |
172 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | |
173 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | |
174 | ||
175 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | |
176 | dev_err(&drv_data->pdev->dev, | |
177 | "dma_handler: dma rx channel stop failed\n"); | |
178 | ||
179 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | |
180 | dev_err(&drv_data->pdev->dev, | |
181 | "dma_transfer: ssp rx stall failed\n"); | |
182 | ||
183 | pxa2xx_spi_unmap_dma_buffers(drv_data); | |
184 | ||
185 | /* update the buffer pointer for the amount completed in dma */ | |
186 | drv_data->rx += drv_data->len - | |
187 | (DCMD(drv_data->rx_channel) & DCMD_LENGTH); | |
188 | ||
189 | /* read trailing data from fifo, it does not matter how many | |
190 | * bytes are in the fifo just read until buffer is full | |
191 | * or fifo is empty, which ever occurs first */ | |
192 | drv_data->read(drv_data); | |
193 | ||
194 | /* return count of what was actually read */ | |
195 | msg->actual_length += drv_data->len - | |
196 | (drv_data->rx_end - drv_data->rx); | |
197 | ||
198 | /* Transfer delays and chip select release are | |
199 | * handled in pump_transfers or giveback | |
200 | */ | |
201 | ||
202 | /* Move to next transfer */ | |
203 | msg->state = pxa2xx_spi_next_transfer(drv_data); | |
204 | ||
205 | /* Schedule transfer tasklet */ | |
206 | tasklet_schedule(&drv_data->pump_transfers); | |
207 | } | |
208 | ||
209 | void pxa2xx_spi_dma_handler(int channel, void *data) | |
210 | { | |
211 | struct driver_data *drv_data = data; | |
212 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | |
213 | ||
214 | if (irq_status & DCSR_BUSERR) { | |
215 | ||
216 | if (channel == drv_data->tx_channel) | |
217 | pxa2xx_spi_dma_error_stop(drv_data, | |
218 | "dma_handler: bad bus address on tx channel"); | |
219 | else | |
220 | pxa2xx_spi_dma_error_stop(drv_data, | |
221 | "dma_handler: bad bus address on rx channel"); | |
222 | return; | |
223 | } | |
224 | ||
225 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | |
226 | if ((channel == drv_data->tx_channel) | |
227 | && (irq_status & DCSR_ENDINTR) | |
228 | && (drv_data->ssp_type == PXA25x_SSP)) { | |
229 | ||
230 | /* Wait for rx to stall */ | |
231 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | |
232 | dev_err(&drv_data->pdev->dev, | |
233 | "dma_handler: ssp rx stall failed\n"); | |
234 | ||
235 | /* finish this transfer, start the next */ | |
236 | pxa2xx_spi_dma_transfer_complete(drv_data); | |
237 | } | |
238 | } | |
239 | ||
240 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | |
241 | { | |
242 | u32 irq_status; | |
243 | void __iomem *reg = drv_data->ioaddr; | |
244 | ||
245 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | |
246 | if (irq_status & SSSR_ROR) { | |
247 | pxa2xx_spi_dma_error_stop(drv_data, | |
248 | "dma_transfer: fifo overrun"); | |
249 | return IRQ_HANDLED; | |
250 | } | |
251 | ||
252 | /* Check for false positive timeout */ | |
253 | if ((irq_status & SSSR_TINT) | |
254 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { | |
255 | write_SSSR(SSSR_TINT, reg); | |
256 | return IRQ_HANDLED; | |
257 | } | |
258 | ||
259 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | |
260 | ||
261 | /* Clear and disable timeout interrupt, do the rest in | |
262 | * dma_transfer_complete */ | |
263 | if (!pxa25x_ssp_comp(drv_data)) | |
264 | write_SSTO(0, reg); | |
265 | ||
266 | /* finish this transfer, start the next */ | |
267 | pxa2xx_spi_dma_transfer_complete(drv_data); | |
268 | ||
269 | return IRQ_HANDLED; | |
270 | } | |
271 | ||
272 | /* Opps problem detected */ | |
273 | return IRQ_NONE; | |
274 | } | |
275 | ||
276 | int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) | |
277 | { | |
278 | u32 dma_width; | |
279 | ||
280 | switch (drv_data->n_bytes) { | |
281 | case 1: | |
282 | dma_width = DCMD_WIDTH1; | |
283 | break; | |
284 | case 2: | |
285 | dma_width = DCMD_WIDTH2; | |
286 | break; | |
287 | default: | |
288 | dma_width = DCMD_WIDTH4; | |
289 | break; | |
290 | } | |
291 | ||
292 | /* Setup rx DMA Channel */ | |
293 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | |
294 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | |
295 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | |
296 | if (drv_data->rx == drv_data->null_dma_buf) | |
297 | /* No target address increment */ | |
298 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | |
299 | | dma_width | |
300 | | dma_burst | |
301 | | drv_data->len; | |
302 | else | |
303 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | |
304 | | DCMD_FLOWSRC | |
305 | | dma_width | |
306 | | dma_burst | |
307 | | drv_data->len; | |
308 | ||
309 | /* Setup tx DMA Channel */ | |
310 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | |
311 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | |
312 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | |
313 | if (drv_data->tx == drv_data->null_dma_buf) | |
314 | /* No source address increment */ | |
315 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | |
316 | | dma_width | |
317 | | dma_burst | |
318 | | drv_data->len; | |
319 | else | |
320 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | |
321 | | DCMD_FLOWTRG | |
322 | | dma_width | |
323 | | dma_burst | |
324 | | drv_data->len; | |
325 | ||
326 | /* Enable dma end irqs on SSP to detect end of transfer */ | |
327 | if (drv_data->ssp_type == PXA25x_SSP) | |
328 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | |
329 | ||
330 | return 0; | |
331 | } | |
332 | ||
333 | void pxa2xx_spi_dma_start(struct driver_data *drv_data) | |
334 | { | |
335 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | |
336 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | |
337 | } | |
338 | ||
339 | int pxa2xx_spi_dma_setup(struct driver_data *drv_data) | |
340 | { | |
341 | struct device *dev = &drv_data->pdev->dev; | |
342 | struct ssp_device *ssp = drv_data->ssp; | |
343 | ||
344 | /* Get two DMA channels (rx and tx) */ | |
345 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | |
346 | DMA_PRIO_HIGH, | |
347 | pxa2xx_spi_dma_handler, | |
348 | drv_data); | |
349 | if (drv_data->rx_channel < 0) { | |
350 | dev_err(dev, "problem (%d) requesting rx channel\n", | |
351 | drv_data->rx_channel); | |
352 | return -ENODEV; | |
353 | } | |
354 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | |
355 | DMA_PRIO_MEDIUM, | |
356 | pxa2xx_spi_dma_handler, | |
357 | drv_data); | |
358 | if (drv_data->tx_channel < 0) { | |
359 | dev_err(dev, "problem (%d) requesting tx channel\n", | |
360 | drv_data->tx_channel); | |
361 | pxa_free_dma(drv_data->rx_channel); | |
362 | return -ENODEV; | |
363 | } | |
364 | ||
365 | DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; | |
366 | DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
371 | void pxa2xx_spi_dma_release(struct driver_data *drv_data) | |
372 | { | |
373 | struct ssp_device *ssp = drv_data->ssp; | |
374 | ||
375 | DRCMR(ssp->drcmr_rx) = 0; | |
376 | DRCMR(ssp->drcmr_tx) = 0; | |
377 | ||
378 | if (drv_data->tx_channel != 0) | |
379 | pxa_free_dma(drv_data->tx_channel); | |
380 | if (drv_data->rx_channel != 0) | |
381 | pxa_free_dma(drv_data->rx_channel); | |
382 | } | |
383 | ||
384 | void pxa2xx_spi_dma_resume(struct driver_data *drv_data) | |
385 | { | |
386 | if (drv_data->rx_channel != -1) | |
387 | DRCMR(drv_data->ssp->drcmr_rx) = | |
388 | DRCMR_MAPVLD | drv_data->rx_channel; | |
389 | if (drv_data->tx_channel != -1) | |
390 | DRCMR(drv_data->ssp->drcmr_tx) = | |
391 | DRCMR_MAPVLD | drv_data->tx_channel; | |
392 | } | |
393 | ||
394 | int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, | |
395 | struct spi_device *spi, | |
396 | u8 bits_per_word, u32 *burst_code, | |
397 | u32 *threshold) | |
398 | { | |
399 | struct pxa2xx_spi_chip *chip_info = | |
400 | (struct pxa2xx_spi_chip *)spi->controller_data; | |
401 | int bytes_per_word; | |
402 | int burst_bytes; | |
403 | int thresh_words; | |
404 | int req_burst_size; | |
405 | int retval = 0; | |
406 | ||
407 | /* Set the threshold (in registers) to equal the same amount of data | |
408 | * as represented by burst size (in bytes). The computation below | |
409 | * is (burst_size rounded up to nearest 8 byte, word or long word) | |
410 | * divided by (bytes/register); the tx threshold is the inverse of | |
411 | * the rx, so that there will always be enough data in the rx fifo | |
412 | * to satisfy a burst, and there will always be enough space in the | |
413 | * tx fifo to accept a burst (a tx burst will overwrite the fifo if | |
414 | * there is not enough space), there must always remain enough empty | |
415 | * space in the rx fifo for any data loaded to the tx fifo. | |
416 | * Whenever burst_size (in bytes) equals bits/word, the fifo threshold | |
417 | * will be 8, or half the fifo; | |
418 | * The threshold can only be set to 2, 4 or 8, but not 16, because | |
419 | * to burst 16 to the tx fifo, the fifo would have to be empty; | |
420 | * however, the minimum fifo trigger level is 1, and the tx will | |
421 | * request service when the fifo is at this level, with only 15 spaces. | |
422 | */ | |
423 | ||
424 | /* find bytes/word */ | |
425 | if (bits_per_word <= 8) | |
426 | bytes_per_word = 1; | |
427 | else if (bits_per_word <= 16) | |
428 | bytes_per_word = 2; | |
429 | else | |
430 | bytes_per_word = 4; | |
431 | ||
432 | /* use struct pxa2xx_spi_chip->dma_burst_size if available */ | |
433 | if (chip_info) | |
434 | req_burst_size = chip_info->dma_burst_size; | |
435 | else { | |
436 | switch (chip->dma_burst_size) { | |
437 | default: | |
438 | /* if the default burst size is not set, | |
439 | * do it now */ | |
440 | chip->dma_burst_size = DCMD_BURST8; | |
441 | case DCMD_BURST8: | |
442 | req_burst_size = 8; | |
443 | break; | |
444 | case DCMD_BURST16: | |
445 | req_burst_size = 16; | |
446 | break; | |
447 | case DCMD_BURST32: | |
448 | req_burst_size = 32; | |
449 | break; | |
450 | } | |
451 | } | |
452 | if (req_burst_size <= 8) { | |
453 | *burst_code = DCMD_BURST8; | |
454 | burst_bytes = 8; | |
455 | } else if (req_burst_size <= 16) { | |
456 | if (bytes_per_word == 1) { | |
457 | /* don't burst more than 1/2 the fifo */ | |
458 | *burst_code = DCMD_BURST8; | |
459 | burst_bytes = 8; | |
460 | retval = 1; | |
461 | } else { | |
462 | *burst_code = DCMD_BURST16; | |
463 | burst_bytes = 16; | |
464 | } | |
465 | } else { | |
466 | if (bytes_per_word == 1) { | |
467 | /* don't burst more than 1/2 the fifo */ | |
468 | *burst_code = DCMD_BURST8; | |
469 | burst_bytes = 8; | |
470 | retval = 1; | |
471 | } else if (bytes_per_word == 2) { | |
472 | /* don't burst more than 1/2 the fifo */ | |
473 | *burst_code = DCMD_BURST16; | |
474 | burst_bytes = 16; | |
475 | retval = 1; | |
476 | } else { | |
477 | *burst_code = DCMD_BURST32; | |
478 | burst_bytes = 32; | |
479 | } | |
480 | } | |
481 | ||
482 | thresh_words = burst_bytes / bytes_per_word; | |
483 | ||
484 | /* thresh_words will be between 2 and 8 */ | |
485 | *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | |
486 | | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); | |
487 | ||
488 | return retval; | |
489 | } |