Commit | Line | Data |
---|---|---|
deba2580 AB |
1 | /* |
2 | * IMG SPFI controller driver | |
3 | * | |
4 | * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd. | |
5 | * Copyright (C) 2014 Google, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/io.h> | |
17 | #include <linux/irq.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/of.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/pm_runtime.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spi/spi.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
27 | #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x)) | |
28 | #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24 | |
29 | #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff | |
30 | #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16 | |
31 | #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff | |
32 | #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8 | |
33 | #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff | |
34 | #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0 | |
35 | #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff | |
36 | ||
37 | #define SPFI_CONTROL 0x14 | |
38 | #define SPFI_CONTROL_CONTINUE BIT(12) | |
39 | #define SPFI_CONTROL_SOFT_RESET BIT(11) | |
40 | #define SPFI_CONTROL_SEND_DMA BIT(10) | |
41 | #define SPFI_CONTROL_GET_DMA BIT(9) | |
42 | #define SPFI_CONTROL_TMODE_SHIFT 5 | |
43 | #define SPFI_CONTROL_TMODE_MASK 0x7 | |
44 | #define SPFI_CONTROL_TMODE_SINGLE 0 | |
45 | #define SPFI_CONTROL_TMODE_DUAL 1 | |
46 | #define SPFI_CONTROL_TMODE_QUAD 2 | |
47 | #define SPFI_CONTROL_SPFI_EN BIT(0) | |
48 | ||
49 | #define SPFI_TRANSACTION 0x18 | |
50 | #define SPFI_TRANSACTION_TSIZE_SHIFT 16 | |
51 | #define SPFI_TRANSACTION_TSIZE_MASK 0xffff | |
52 | ||
53 | #define SPFI_PORT_STATE 0x1c | |
54 | #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20 | |
55 | #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7 | |
56 | #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x)) | |
57 | #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x)) | |
58 | ||
59 | #define SPFI_TX_32BIT_VALID_DATA 0x20 | |
60 | #define SPFI_TX_8BIT_VALID_DATA 0x24 | |
61 | #define SPFI_RX_32BIT_VALID_DATA 0x28 | |
62 | #define SPFI_RX_8BIT_VALID_DATA 0x2c | |
63 | ||
64 | #define SPFI_INTERRUPT_STATUS 0x30 | |
65 | #define SPFI_INTERRUPT_ENABLE 0x34 | |
66 | #define SPFI_INTERRUPT_CLEAR 0x38 | |
67 | #define SPFI_INTERRUPT_IACCESS BIT(12) | |
68 | #define SPFI_INTERRUPT_GDEX8BIT BIT(11) | |
69 | #define SPFI_INTERRUPT_ALLDONETRIG BIT(9) | |
70 | #define SPFI_INTERRUPT_GDFUL BIT(8) | |
71 | #define SPFI_INTERRUPT_GDHF BIT(7) | |
72 | #define SPFI_INTERRUPT_GDEX32BIT BIT(6) | |
73 | #define SPFI_INTERRUPT_GDTRIG BIT(5) | |
74 | #define SPFI_INTERRUPT_SDFUL BIT(3) | |
75 | #define SPFI_INTERRUPT_SDHF BIT(2) | |
76 | #define SPFI_INTERRUPT_SDE BIT(1) | |
77 | #define SPFI_INTERRUPT_SDTRIG BIT(0) | |
78 | ||
79 | /* | |
80 | * There are four parallel FIFOs of 16 bytes each. The word buffer | |
81 | * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an | |
82 | * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA) | |
83 | * accesses only a single FIFO, resulting in an effective FIFO size of | |
84 | * 16 bytes. | |
85 | */ | |
86 | #define SPFI_32BIT_FIFO_SIZE 64 | |
87 | #define SPFI_8BIT_FIFO_SIZE 16 | |
88 | ||
89 | struct img_spfi { | |
90 | struct device *dev; | |
91 | struct spi_master *master; | |
92 | spinlock_t lock; | |
93 | ||
94 | void __iomem *regs; | |
95 | phys_addr_t phys; | |
96 | int irq; | |
97 | struct clk *spfi_clk; | |
98 | struct clk *sys_clk; | |
99 | ||
100 | struct dma_chan *rx_ch; | |
101 | struct dma_chan *tx_ch; | |
102 | bool tx_dma_busy; | |
103 | bool rx_dma_busy; | |
104 | }; | |
105 | ||
106 | static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg) | |
107 | { | |
108 | return readl(spfi->regs + reg); | |
109 | } | |
110 | ||
111 | static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg) | |
112 | { | |
113 | writel(val, spfi->regs + reg); | |
114 | } | |
115 | ||
116 | static inline void spfi_start(struct img_spfi *spfi) | |
117 | { | |
118 | u32 val; | |
119 | ||
120 | val = spfi_readl(spfi, SPFI_CONTROL); | |
121 | val |= SPFI_CONTROL_SPFI_EN; | |
122 | spfi_writel(spfi, val, SPFI_CONTROL); | |
123 | } | |
124 | ||
125 | static inline void spfi_stop(struct img_spfi *spfi) | |
126 | { | |
127 | u32 val; | |
128 | ||
129 | val = spfi_readl(spfi, SPFI_CONTROL); | |
130 | val &= ~SPFI_CONTROL_SPFI_EN; | |
131 | spfi_writel(spfi, val, SPFI_CONTROL); | |
132 | } | |
133 | ||
134 | static inline void spfi_reset(struct img_spfi *spfi) | |
135 | { | |
136 | spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); | |
deba2580 AB |
137 | spfi_writel(spfi, 0, SPFI_CONTROL); |
138 | } | |
139 | ||
140 | static void spfi_flush_tx_fifo(struct img_spfi *spfi) | |
141 | { | |
142 | unsigned long timeout = jiffies + msecs_to_jiffies(10); | |
143 | ||
144 | spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR); | |
145 | while (time_before(jiffies, timeout)) { | |
146 | if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) & | |
147 | SPFI_INTERRUPT_SDE) | |
148 | return; | |
149 | cpu_relax(); | |
150 | } | |
151 | ||
152 | dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n"); | |
153 | spfi_reset(spfi); | |
154 | } | |
155 | ||
156 | static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, | |
157 | unsigned int max) | |
158 | { | |
159 | unsigned int count = 0; | |
160 | u32 status; | |
161 | ||
549858ce | 162 | while (count < max / 4) { |
deba2580 AB |
163 | spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); |
164 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
165 | if (status & SPFI_INTERRUPT_SDFUL) | |
166 | break; | |
549858ce AB |
167 | spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA); |
168 | count++; | |
deba2580 AB |
169 | } |
170 | ||
549858ce | 171 | return count * 4; |
deba2580 AB |
172 | } |
173 | ||
174 | static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, | |
175 | unsigned int max) | |
176 | { | |
177 | unsigned int count = 0; | |
178 | u32 status; | |
179 | ||
180 | while (count < max) { | |
181 | spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); | |
182 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
183 | if (status & SPFI_INTERRUPT_SDFUL) | |
184 | break; | |
185 | spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA); | |
186 | count++; | |
187 | } | |
188 | ||
189 | return count; | |
190 | } | |
191 | ||
192 | static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, | |
193 | unsigned int max) | |
194 | { | |
195 | unsigned int count = 0; | |
196 | u32 status; | |
197 | ||
549858ce | 198 | while (count < max / 4) { |
deba2580 AB |
199 | spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, |
200 | SPFI_INTERRUPT_CLEAR); | |
201 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
202 | if (!(status & SPFI_INTERRUPT_GDEX32BIT)) | |
203 | break; | |
549858ce AB |
204 | buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); |
205 | count++; | |
deba2580 AB |
206 | } |
207 | ||
549858ce | 208 | return count * 4; |
deba2580 AB |
209 | } |
210 | ||
211 | static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, | |
212 | unsigned int max) | |
213 | { | |
214 | unsigned int count = 0; | |
215 | u32 status; | |
216 | ||
217 | while (count < max) { | |
218 | spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT, | |
219 | SPFI_INTERRUPT_CLEAR); | |
220 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
221 | if (!(status & SPFI_INTERRUPT_GDEX8BIT)) | |
222 | break; | |
223 | buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA); | |
224 | count++; | |
225 | } | |
226 | ||
227 | return count; | |
228 | } | |
229 | ||
230 | static int img_spfi_start_pio(struct spi_master *master, | |
231 | struct spi_device *spi, | |
232 | struct spi_transfer *xfer) | |
233 | { | |
234 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
235 | unsigned int tx_bytes = 0, rx_bytes = 0; | |
236 | const void *tx_buf = xfer->tx_buf; | |
237 | void *rx_buf = xfer->rx_buf; | |
238 | unsigned long timeout; | |
239 | ||
240 | if (tx_buf) | |
241 | tx_bytes = xfer->len; | |
242 | if (rx_buf) | |
243 | rx_bytes = xfer->len; | |
244 | ||
245 | spfi_start(spfi); | |
246 | ||
247 | timeout = jiffies + | |
248 | msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100); | |
249 | while ((tx_bytes > 0 || rx_bytes > 0) && | |
250 | time_before(jiffies, timeout)) { | |
251 | unsigned int tx_count, rx_count; | |
252 | ||
549858ce | 253 | if (tx_bytes >= 4) |
deba2580 | 254 | tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); |
549858ce | 255 | else |
deba2580 | 256 | tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); |
549858ce AB |
257 | |
258 | if (rx_bytes >= 4) | |
259 | rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); | |
260 | else | |
deba2580 | 261 | rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); |
deba2580 AB |
262 | |
263 | tx_buf += tx_count; | |
264 | rx_buf += rx_count; | |
265 | tx_bytes -= tx_count; | |
266 | rx_bytes -= rx_count; | |
267 | ||
268 | cpu_relax(); | |
269 | } | |
270 | ||
271 | if (rx_bytes > 0 || tx_bytes > 0) { | |
272 | dev_err(spfi->dev, "PIO transfer timed out\n"); | |
273 | spfi_reset(spfi); | |
274 | return -ETIMEDOUT; | |
275 | } | |
276 | ||
277 | if (tx_buf) | |
278 | spfi_flush_tx_fifo(spfi); | |
279 | spfi_stop(spfi); | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | static void img_spfi_dma_rx_cb(void *data) | |
285 | { | |
286 | struct img_spfi *spfi = data; | |
287 | unsigned long flags; | |
288 | ||
289 | spin_lock_irqsave(&spfi->lock, flags); | |
290 | ||
291 | spfi->rx_dma_busy = false; | |
292 | if (!spfi->tx_dma_busy) { | |
293 | spfi_stop(spfi); | |
294 | spi_finalize_current_transfer(spfi->master); | |
295 | } | |
296 | ||
297 | spin_unlock_irqrestore(&spfi->lock, flags); | |
298 | } | |
299 | ||
300 | static void img_spfi_dma_tx_cb(void *data) | |
301 | { | |
302 | struct img_spfi *spfi = data; | |
303 | unsigned long flags; | |
304 | ||
305 | spfi_flush_tx_fifo(spfi); | |
306 | ||
307 | spin_lock_irqsave(&spfi->lock, flags); | |
308 | ||
309 | spfi->tx_dma_busy = false; | |
310 | if (!spfi->rx_dma_busy) { | |
311 | spfi_stop(spfi); | |
312 | spi_finalize_current_transfer(spfi->master); | |
313 | } | |
314 | ||
315 | spin_unlock_irqrestore(&spfi->lock, flags); | |
316 | } | |
317 | ||
318 | static int img_spfi_start_dma(struct spi_master *master, | |
319 | struct spi_device *spi, | |
320 | struct spi_transfer *xfer) | |
321 | { | |
322 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
323 | struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; | |
324 | struct dma_slave_config rxconf, txconf; | |
325 | ||
326 | spfi->rx_dma_busy = false; | |
327 | spfi->tx_dma_busy = false; | |
328 | ||
329 | if (xfer->rx_buf) { | |
330 | rxconf.direction = DMA_DEV_TO_MEM; | |
549858ce | 331 | if (xfer->len % 4 == 0) { |
deba2580 AB |
332 | rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; |
333 | rxconf.src_addr_width = 4; | |
334 | rxconf.src_maxburst = 4; | |
549858ce | 335 | } else { |
deba2580 AB |
336 | rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; |
337 | rxconf.src_addr_width = 1; | |
76fe5e95 | 338 | rxconf.src_maxburst = 4; |
deba2580 AB |
339 | } |
340 | dmaengine_slave_config(spfi->rx_ch, &rxconf); | |
341 | ||
342 | rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl, | |
343 | xfer->rx_sg.nents, | |
344 | DMA_DEV_TO_MEM, | |
345 | DMA_PREP_INTERRUPT); | |
346 | if (!rxdesc) | |
347 | goto stop_dma; | |
348 | ||
349 | rxdesc->callback = img_spfi_dma_rx_cb; | |
350 | rxdesc->callback_param = spfi; | |
351 | } | |
352 | ||
353 | if (xfer->tx_buf) { | |
354 | txconf.direction = DMA_MEM_TO_DEV; | |
549858ce | 355 | if (xfer->len % 4 == 0) { |
deba2580 AB |
356 | txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; |
357 | txconf.dst_addr_width = 4; | |
358 | txconf.dst_maxburst = 4; | |
549858ce | 359 | } else { |
deba2580 AB |
360 | txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; |
361 | txconf.dst_addr_width = 1; | |
76fe5e95 | 362 | txconf.dst_maxburst = 4; |
deba2580 AB |
363 | } |
364 | dmaengine_slave_config(spfi->tx_ch, &txconf); | |
365 | ||
366 | txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl, | |
367 | xfer->tx_sg.nents, | |
368 | DMA_MEM_TO_DEV, | |
369 | DMA_PREP_INTERRUPT); | |
370 | if (!txdesc) | |
371 | goto stop_dma; | |
372 | ||
373 | txdesc->callback = img_spfi_dma_tx_cb; | |
374 | txdesc->callback_param = spfi; | |
375 | } | |
376 | ||
377 | if (xfer->rx_buf) { | |
378 | spfi->rx_dma_busy = true; | |
379 | dmaengine_submit(rxdesc); | |
380 | dma_async_issue_pending(spfi->rx_ch); | |
381 | } | |
382 | ||
c0e7dc21 AB |
383 | spfi_start(spfi); |
384 | ||
deba2580 AB |
385 | if (xfer->tx_buf) { |
386 | spfi->tx_dma_busy = true; | |
387 | dmaengine_submit(txdesc); | |
388 | dma_async_issue_pending(spfi->tx_ch); | |
389 | } | |
390 | ||
deba2580 AB |
391 | return 1; |
392 | ||
393 | stop_dma: | |
394 | dmaengine_terminate_all(spfi->rx_ch); | |
395 | dmaengine_terminate_all(spfi->tx_ch); | |
396 | return -EIO; | |
397 | } | |
398 | ||
b6fe3977 EG |
399 | static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg) |
400 | { | |
401 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
402 | u32 val; | |
403 | ||
404 | val = spfi_readl(spfi, SPFI_PORT_STATE); | |
405 | if (msg->spi->mode & SPI_CPHA) | |
406 | val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); | |
407 | else | |
408 | val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); | |
409 | if (msg->spi->mode & SPI_CPOL) | |
410 | val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); | |
411 | else | |
412 | val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); | |
413 | spfi_writel(spfi, val, SPFI_PORT_STATE); | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
deba2580 AB |
418 | static void img_spfi_config(struct spi_master *master, struct spi_device *spi, |
419 | struct spi_transfer *xfer) | |
420 | { | |
421 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
422 | u32 val, div; | |
423 | ||
424 | /* | |
425 | * output = spfi_clk * (BITCLK / 512), where BITCLK must be a | |
426 | * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits) | |
427 | */ | |
428 | div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz); | |
429 | div = clamp(512 / (1 << get_count_order(div)), 1, 255); | |
430 | ||
431 | val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); | |
432 | val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << | |
433 | SPFI_DEVICE_PARAMETER_BITCLK_SHIFT); | |
434 | val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; | |
435 | spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); | |
436 | ||
ede8342b SN |
437 | spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, |
438 | SPFI_TRANSACTION); | |
439 | ||
deba2580 AB |
440 | val = spfi_readl(spfi, SPFI_CONTROL); |
441 | val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); | |
442 | if (xfer->tx_buf) | |
443 | val |= SPFI_CONTROL_SEND_DMA; | |
444 | if (xfer->rx_buf) | |
445 | val |= SPFI_CONTROL_GET_DMA; | |
446 | val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT); | |
447 | if (xfer->tx_nbits == SPI_NBITS_DUAL && | |
448 | xfer->rx_nbits == SPI_NBITS_DUAL) | |
449 | val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT; | |
450 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && | |
451 | xfer->rx_nbits == SPI_NBITS_QUAD) | |
452 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; | |
453 | val &= ~SPFI_CONTROL_CONTINUE; | |
454 | if (!xfer->cs_change && !list_is_last(&xfer->transfer_list, | |
455 | &master->cur_msg->transfers)) | |
456 | val |= SPFI_CONTROL_CONTINUE; | |
457 | spfi_writel(spfi, val, SPFI_CONTROL); | |
deba2580 AB |
458 | } |
459 | ||
460 | static int img_spfi_transfer_one(struct spi_master *master, | |
461 | struct spi_device *spi, | |
462 | struct spi_transfer *xfer) | |
463 | { | |
464 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
465 | bool dma_reset = false; | |
466 | unsigned long flags; | |
467 | int ret; | |
468 | ||
469 | /* | |
470 | * Stop all DMA and reset the controller if the previous transaction | |
471 | * timed-out and never completed it's DMA. | |
472 | */ | |
473 | spin_lock_irqsave(&spfi->lock, flags); | |
474 | if (spfi->tx_dma_busy || spfi->rx_dma_busy) { | |
475 | dev_err(spfi->dev, "SPI DMA still busy\n"); | |
476 | dma_reset = true; | |
477 | } | |
478 | spin_unlock_irqrestore(&spfi->lock, flags); | |
479 | ||
480 | if (dma_reset) { | |
481 | dmaengine_terminate_all(spfi->tx_ch); | |
482 | dmaengine_terminate_all(spfi->rx_ch); | |
483 | spfi_reset(spfi); | |
484 | } | |
485 | ||
486 | img_spfi_config(master, spi, xfer); | |
487 | if (master->can_dma && master->can_dma(master, spi, xfer)) | |
488 | ret = img_spfi_start_dma(master, spi, xfer); | |
489 | else | |
490 | ret = img_spfi_start_pio(master, spi, xfer); | |
491 | ||
492 | return ret; | |
493 | } | |
494 | ||
495 | static void img_spfi_set_cs(struct spi_device *spi, bool enable) | |
496 | { | |
497 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | |
498 | u32 val; | |
499 | ||
500 | val = spfi_readl(spfi, SPFI_PORT_STATE); | |
501 | val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT); | |
502 | val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT; | |
503 | spfi_writel(spfi, val, SPFI_PORT_STATE); | |
504 | } | |
505 | ||
506 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, | |
507 | struct spi_transfer *xfer) | |
508 | { | |
549858ce | 509 | if (xfer->len > SPFI_32BIT_FIFO_SIZE) |
deba2580 AB |
510 | return true; |
511 | return false; | |
512 | } | |
513 | ||
514 | static irqreturn_t img_spfi_irq(int irq, void *dev_id) | |
515 | { | |
516 | struct img_spfi *spfi = (struct img_spfi *)dev_id; | |
517 | u32 status; | |
518 | ||
519 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | |
520 | if (status & SPFI_INTERRUPT_IACCESS) { | |
521 | spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR); | |
522 | dev_err(spfi->dev, "Illegal access interrupt"); | |
523 | return IRQ_HANDLED; | |
524 | } | |
525 | ||
526 | return IRQ_NONE; | |
527 | } | |
528 | ||
529 | static int img_spfi_probe(struct platform_device *pdev) | |
530 | { | |
531 | struct spi_master *master; | |
532 | struct img_spfi *spfi; | |
533 | struct resource *res; | |
534 | int ret; | |
535 | ||
536 | master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); | |
537 | if (!master) | |
538 | return -ENOMEM; | |
539 | platform_set_drvdata(pdev, master); | |
540 | ||
541 | spfi = spi_master_get_devdata(master); | |
542 | spfi->dev = &pdev->dev; | |
543 | spfi->master = master; | |
544 | spin_lock_init(&spfi->lock); | |
545 | ||
546 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
547 | spfi->regs = devm_ioremap_resource(spfi->dev, res); | |
548 | if (IS_ERR(spfi->regs)) { | |
549 | ret = PTR_ERR(spfi->regs); | |
550 | goto put_spi; | |
551 | } | |
552 | spfi->phys = res->start; | |
553 | ||
554 | spfi->irq = platform_get_irq(pdev, 0); | |
555 | if (spfi->irq < 0) { | |
556 | ret = spfi->irq; | |
557 | goto put_spi; | |
558 | } | |
559 | ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, | |
560 | IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); | |
561 | if (ret) | |
562 | goto put_spi; | |
563 | ||
564 | spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); | |
565 | if (IS_ERR(spfi->sys_clk)) { | |
566 | ret = PTR_ERR(spfi->sys_clk); | |
567 | goto put_spi; | |
568 | } | |
569 | spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); | |
570 | if (IS_ERR(spfi->spfi_clk)) { | |
571 | ret = PTR_ERR(spfi->spfi_clk); | |
572 | goto put_spi; | |
573 | } | |
574 | ||
575 | ret = clk_prepare_enable(spfi->sys_clk); | |
576 | if (ret) | |
577 | goto put_spi; | |
578 | ret = clk_prepare_enable(spfi->spfi_clk); | |
579 | if (ret) | |
580 | goto disable_pclk; | |
581 | ||
582 | spfi_reset(spfi); | |
583 | /* | |
584 | * Only enable the error (IACCESS) interrupt. In PIO mode we'll | |
585 | * poll the status of the FIFOs. | |
586 | */ | |
587 | spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); | |
588 | ||
589 | master->auto_runtime_pm = true; | |
590 | master->bus_num = pdev->id; | |
591 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; | |
592 | if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) | |
593 | master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; | |
594 | master->num_chipselect = 5; | |
595 | master->dev.of_node = pdev->dev.of_node; | |
596 | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); | |
597 | master->max_speed_hz = clk_get_rate(spfi->spfi_clk); | |
598 | master->min_speed_hz = master->max_speed_hz / 512; | |
599 | ||
600 | master->set_cs = img_spfi_set_cs; | |
601 | master->transfer_one = img_spfi_transfer_one; | |
b6fe3977 | 602 | master->prepare_message = img_spfi_prepare; |
deba2580 AB |
603 | |
604 | spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); | |
605 | spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); | |
606 | if (!spfi->tx_ch || !spfi->rx_ch) { | |
607 | if (spfi->tx_ch) | |
608 | dma_release_channel(spfi->tx_ch); | |
609 | if (spfi->rx_ch) | |
610 | dma_release_channel(spfi->rx_ch); | |
611 | dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); | |
612 | } else { | |
613 | master->dma_tx = spfi->tx_ch; | |
614 | master->dma_rx = spfi->rx_ch; | |
615 | master->can_dma = img_spfi_can_dma; | |
616 | } | |
617 | ||
618 | pm_runtime_set_active(spfi->dev); | |
619 | pm_runtime_enable(spfi->dev); | |
620 | ||
621 | ret = devm_spi_register_master(spfi->dev, master); | |
622 | if (ret) | |
623 | goto disable_pm; | |
624 | ||
625 | return 0; | |
626 | ||
627 | disable_pm: | |
628 | pm_runtime_disable(spfi->dev); | |
629 | if (spfi->rx_ch) | |
630 | dma_release_channel(spfi->rx_ch); | |
631 | if (spfi->tx_ch) | |
632 | dma_release_channel(spfi->tx_ch); | |
633 | clk_disable_unprepare(spfi->spfi_clk); | |
634 | disable_pclk: | |
635 | clk_disable_unprepare(spfi->sys_clk); | |
636 | put_spi: | |
637 | spi_master_put(master); | |
638 | ||
639 | return ret; | |
640 | } | |
641 | ||
642 | static int img_spfi_remove(struct platform_device *pdev) | |
643 | { | |
644 | struct spi_master *master = platform_get_drvdata(pdev); | |
645 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
646 | ||
647 | if (spfi->tx_ch) | |
648 | dma_release_channel(spfi->tx_ch); | |
649 | if (spfi->rx_ch) | |
650 | dma_release_channel(spfi->rx_ch); | |
651 | ||
652 | pm_runtime_disable(spfi->dev); | |
653 | if (!pm_runtime_status_suspended(spfi->dev)) { | |
654 | clk_disable_unprepare(spfi->spfi_clk); | |
655 | clk_disable_unprepare(spfi->sys_clk); | |
656 | } | |
657 | ||
658 | spi_master_put(master); | |
659 | ||
660 | return 0; | |
661 | } | |
662 | ||
47164fdb | 663 | #ifdef CONFIG_PM |
deba2580 AB |
664 | static int img_spfi_runtime_suspend(struct device *dev) |
665 | { | |
666 | struct spi_master *master = dev_get_drvdata(dev); | |
667 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
668 | ||
669 | clk_disable_unprepare(spfi->spfi_clk); | |
670 | clk_disable_unprepare(spfi->sys_clk); | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | static int img_spfi_runtime_resume(struct device *dev) | |
676 | { | |
677 | struct spi_master *master = dev_get_drvdata(dev); | |
678 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
679 | int ret; | |
680 | ||
681 | ret = clk_prepare_enable(spfi->sys_clk); | |
682 | if (ret) | |
683 | return ret; | |
684 | ret = clk_prepare_enable(spfi->spfi_clk); | |
685 | if (ret) { | |
686 | clk_disable_unprepare(spfi->sys_clk); | |
687 | return ret; | |
688 | } | |
689 | ||
690 | return 0; | |
691 | } | |
47164fdb | 692 | #endif /* CONFIG_PM */ |
deba2580 AB |
693 | |
694 | #ifdef CONFIG_PM_SLEEP | |
695 | static int img_spfi_suspend(struct device *dev) | |
696 | { | |
697 | struct spi_master *master = dev_get_drvdata(dev); | |
698 | ||
699 | return spi_master_suspend(master); | |
700 | } | |
701 | ||
702 | static int img_spfi_resume(struct device *dev) | |
703 | { | |
704 | struct spi_master *master = dev_get_drvdata(dev); | |
705 | struct img_spfi *spfi = spi_master_get_devdata(master); | |
706 | int ret; | |
707 | ||
708 | ret = pm_runtime_get_sync(dev); | |
709 | if (ret) | |
710 | return ret; | |
711 | spfi_reset(spfi); | |
712 | pm_runtime_put(dev); | |
713 | ||
714 | return spi_master_resume(master); | |
715 | } | |
716 | #endif /* CONFIG_PM_SLEEP */ | |
717 | ||
718 | static const struct dev_pm_ops img_spfi_pm_ops = { | |
719 | SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume, | |
720 | NULL) | |
721 | SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume) | |
722 | }; | |
723 | ||
724 | static const struct of_device_id img_spfi_of_match[] = { | |
725 | { .compatible = "img,spfi", }, | |
726 | { }, | |
727 | }; | |
728 | MODULE_DEVICE_TABLE(of, img_spfi_of_match); | |
729 | ||
730 | static struct platform_driver img_spfi_driver = { | |
731 | .driver = { | |
732 | .name = "img-spfi", | |
733 | .pm = &img_spfi_pm_ops, | |
734 | .of_match_table = of_match_ptr(img_spfi_of_match), | |
735 | }, | |
736 | .probe = img_spfi_probe, | |
737 | .remove = img_spfi_remove, | |
738 | }; | |
739 | module_platform_driver(img_spfi_driver); | |
740 | ||
741 | MODULE_DESCRIPTION("IMG SPFI controller driver"); | |
742 | MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); | |
743 | MODULE_LICENSE("GPL v2"); |