arm/pxa2xx: reorgazine SSP and SPI header files
[deliverable/linux.git] / drivers / spi / pxa2xx_spi.c
1 /*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/ioport.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/spi/pxa2xx_spi.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/workqueue.h>
30 #include <linux/delay.h>
31 #include <linux/clk.h>
32 #include <linux/gpio.h>
33 #include <linux/slab.h>
34
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/delay.h>
38
39
40 MODULE_AUTHOR("Stephen Street");
41 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
42 MODULE_LICENSE("GPL");
43 MODULE_ALIAS("platform:pxa2xx-spi");
44
45 #define MAX_BUSES 3
46
47 #define RX_THRESH_DFLT 8
48 #define TX_THRESH_DFLT 8
49 #define TIMOUT_DFLT 1000
50
51 #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
52 #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
53 #define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
54 #define MAX_DMA_LEN 8191
55 #define DMA_ALIGNMENT 8
56
57 /*
58 * for testing SSCR1 changes that require SSP restart, basically
59 * everything except the service and interrupt enables, the pxa270 developer
60 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
61 * list, but the PXA255 dev man says all bits without really meaning the
62 * service and interrupt enables
63 */
64 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
65 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
66 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
67 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
68 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
69 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
70
71 #define DEFINE_SSP_REG(reg, off) \
72 static inline u32 read_##reg(void const __iomem *p) \
73 { return __raw_readl(p + (off)); } \
74 \
75 static inline void write_##reg(u32 v, void __iomem *p) \
76 { __raw_writel(v, p + (off)); }
77
78 DEFINE_SSP_REG(SSCR0, 0x00)
79 DEFINE_SSP_REG(SSCR1, 0x04)
80 DEFINE_SSP_REG(SSSR, 0x08)
81 DEFINE_SSP_REG(SSITR, 0x0c)
82 DEFINE_SSP_REG(SSDR, 0x10)
83 DEFINE_SSP_REG(SSTO, 0x28)
84 DEFINE_SSP_REG(SSPSP, 0x2c)
85
86 #define START_STATE ((void*)0)
87 #define RUNNING_STATE ((void*)1)
88 #define DONE_STATE ((void*)2)
89 #define ERROR_STATE ((void*)-1)
90
91 #define QUEUE_RUNNING 0
92 #define QUEUE_STOPPED 1
93
94 struct driver_data {
95 /* Driver model hookup */
96 struct platform_device *pdev;
97
98 /* SSP Info */
99 struct ssp_device *ssp;
100
101 /* SPI framework hookup */
102 enum pxa_ssp_type ssp_type;
103 struct spi_master *master;
104
105 /* PXA hookup */
106 struct pxa2xx_spi_master *master_info;
107
108 /* DMA setup stuff */
109 int rx_channel;
110 int tx_channel;
111 u32 *null_dma_buf;
112
113 /* SSP register addresses */
114 void __iomem *ioaddr;
115 u32 ssdr_physical;
116
117 /* SSP masks*/
118 u32 dma_cr1;
119 u32 int_cr1;
120 u32 clear_sr;
121 u32 mask_sr;
122
123 /* Driver message queue */
124 struct workqueue_struct *workqueue;
125 struct work_struct pump_messages;
126 spinlock_t lock;
127 struct list_head queue;
128 int busy;
129 int run;
130
131 /* Message Transfer pump */
132 struct tasklet_struct pump_transfers;
133
134 /* Current message transfer state info */
135 struct spi_message* cur_msg;
136 struct spi_transfer* cur_transfer;
137 struct chip_data *cur_chip;
138 size_t len;
139 void *tx;
140 void *tx_end;
141 void *rx;
142 void *rx_end;
143 int dma_mapped;
144 dma_addr_t rx_dma;
145 dma_addr_t tx_dma;
146 size_t rx_map_len;
147 size_t tx_map_len;
148 u8 n_bytes;
149 u32 dma_width;
150 int (*write)(struct driver_data *drv_data);
151 int (*read)(struct driver_data *drv_data);
152 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
153 void (*cs_control)(u32 command);
154 };
155
156 struct chip_data {
157 u32 cr0;
158 u32 cr1;
159 u32 psp;
160 u32 timeout;
161 u8 n_bytes;
162 u32 dma_width;
163 u32 dma_burst_size;
164 u32 threshold;
165 u32 dma_threshold;
166 u8 enable_dma;
167 u8 bits_per_word;
168 u32 speed_hz;
169 int gpio_cs;
170 int gpio_cs_inverted;
171 int (*write)(struct driver_data *drv_data);
172 int (*read)(struct driver_data *drv_data);
173 void (*cs_control)(u32 command);
174 };
175
176 static void pump_messages(struct work_struct *work);
177
178 static void cs_assert(struct driver_data *drv_data)
179 {
180 struct chip_data *chip = drv_data->cur_chip;
181
182 if (chip->cs_control) {
183 chip->cs_control(PXA2XX_CS_ASSERT);
184 return;
185 }
186
187 if (gpio_is_valid(chip->gpio_cs))
188 gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
189 }
190
191 static void cs_deassert(struct driver_data *drv_data)
192 {
193 struct chip_data *chip = drv_data->cur_chip;
194
195 if (chip->cs_control) {
196 chip->cs_control(PXA2XX_CS_DEASSERT);
197 return;
198 }
199
200 if (gpio_is_valid(chip->gpio_cs))
201 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
202 }
203
204 static int flush(struct driver_data *drv_data)
205 {
206 unsigned long limit = loops_per_jiffy << 1;
207
208 void __iomem *reg = drv_data->ioaddr;
209
210 do {
211 while (read_SSSR(reg) & SSSR_RNE) {
212 read_SSDR(reg);
213 }
214 } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
215 write_SSSR(SSSR_ROR, reg);
216
217 return limit;
218 }
219
220 static int null_writer(struct driver_data *drv_data)
221 {
222 void __iomem *reg = drv_data->ioaddr;
223 u8 n_bytes = drv_data->n_bytes;
224
225 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
226 || (drv_data->tx == drv_data->tx_end))
227 return 0;
228
229 write_SSDR(0, reg);
230 drv_data->tx += n_bytes;
231
232 return 1;
233 }
234
235 static int null_reader(struct driver_data *drv_data)
236 {
237 void __iomem *reg = drv_data->ioaddr;
238 u8 n_bytes = drv_data->n_bytes;
239
240 while ((read_SSSR(reg) & SSSR_RNE)
241 && (drv_data->rx < drv_data->rx_end)) {
242 read_SSDR(reg);
243 drv_data->rx += n_bytes;
244 }
245
246 return drv_data->rx == drv_data->rx_end;
247 }
248
249 static int u8_writer(struct driver_data *drv_data)
250 {
251 void __iomem *reg = drv_data->ioaddr;
252
253 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
254 || (drv_data->tx == drv_data->tx_end))
255 return 0;
256
257 write_SSDR(*(u8 *)(drv_data->tx), reg);
258 ++drv_data->tx;
259
260 return 1;
261 }
262
263 static int u8_reader(struct driver_data *drv_data)
264 {
265 void __iomem *reg = drv_data->ioaddr;
266
267 while ((read_SSSR(reg) & SSSR_RNE)
268 && (drv_data->rx < drv_data->rx_end)) {
269 *(u8 *)(drv_data->rx) = read_SSDR(reg);
270 ++drv_data->rx;
271 }
272
273 return drv_data->rx == drv_data->rx_end;
274 }
275
276 static int u16_writer(struct driver_data *drv_data)
277 {
278 void __iomem *reg = drv_data->ioaddr;
279
280 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
281 || (drv_data->tx == drv_data->tx_end))
282 return 0;
283
284 write_SSDR(*(u16 *)(drv_data->tx), reg);
285 drv_data->tx += 2;
286
287 return 1;
288 }
289
290 static int u16_reader(struct driver_data *drv_data)
291 {
292 void __iomem *reg = drv_data->ioaddr;
293
294 while ((read_SSSR(reg) & SSSR_RNE)
295 && (drv_data->rx < drv_data->rx_end)) {
296 *(u16 *)(drv_data->rx) = read_SSDR(reg);
297 drv_data->rx += 2;
298 }
299
300 return drv_data->rx == drv_data->rx_end;
301 }
302
303 static int u32_writer(struct driver_data *drv_data)
304 {
305 void __iomem *reg = drv_data->ioaddr;
306
307 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
308 || (drv_data->tx == drv_data->tx_end))
309 return 0;
310
311 write_SSDR(*(u32 *)(drv_data->tx), reg);
312 drv_data->tx += 4;
313
314 return 1;
315 }
316
317 static int u32_reader(struct driver_data *drv_data)
318 {
319 void __iomem *reg = drv_data->ioaddr;
320
321 while ((read_SSSR(reg) & SSSR_RNE)
322 && (drv_data->rx < drv_data->rx_end)) {
323 *(u32 *)(drv_data->rx) = read_SSDR(reg);
324 drv_data->rx += 4;
325 }
326
327 return drv_data->rx == drv_data->rx_end;
328 }
329
330 static void *next_transfer(struct driver_data *drv_data)
331 {
332 struct spi_message *msg = drv_data->cur_msg;
333 struct spi_transfer *trans = drv_data->cur_transfer;
334
335 /* Move to next transfer */
336 if (trans->transfer_list.next != &msg->transfers) {
337 drv_data->cur_transfer =
338 list_entry(trans->transfer_list.next,
339 struct spi_transfer,
340 transfer_list);
341 return RUNNING_STATE;
342 } else
343 return DONE_STATE;
344 }
345
346 static int map_dma_buffers(struct driver_data *drv_data)
347 {
348 struct spi_message *msg = drv_data->cur_msg;
349 struct device *dev = &msg->spi->dev;
350
351 if (!drv_data->cur_chip->enable_dma)
352 return 0;
353
354 if (msg->is_dma_mapped)
355 return drv_data->rx_dma && drv_data->tx_dma;
356
357 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
358 return 0;
359
360 /* Modify setup if rx buffer is null */
361 if (drv_data->rx == NULL) {
362 *drv_data->null_dma_buf = 0;
363 drv_data->rx = drv_data->null_dma_buf;
364 drv_data->rx_map_len = 4;
365 } else
366 drv_data->rx_map_len = drv_data->len;
367
368
369 /* Modify setup if tx buffer is null */
370 if (drv_data->tx == NULL) {
371 *drv_data->null_dma_buf = 0;
372 drv_data->tx = drv_data->null_dma_buf;
373 drv_data->tx_map_len = 4;
374 } else
375 drv_data->tx_map_len = drv_data->len;
376
377 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
378 * so we flush the cache *before* invalidating it, in case
379 * the tx and rx buffers overlap.
380 */
381 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
382 drv_data->tx_map_len, DMA_TO_DEVICE);
383 if (dma_mapping_error(dev, drv_data->tx_dma))
384 return 0;
385
386 /* Stream map the rx buffer */
387 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
388 drv_data->rx_map_len, DMA_FROM_DEVICE);
389 if (dma_mapping_error(dev, drv_data->rx_dma)) {
390 dma_unmap_single(dev, drv_data->tx_dma,
391 drv_data->tx_map_len, DMA_TO_DEVICE);
392 return 0;
393 }
394
395 return 1;
396 }
397
398 static void unmap_dma_buffers(struct driver_data *drv_data)
399 {
400 struct device *dev;
401
402 if (!drv_data->dma_mapped)
403 return;
404
405 if (!drv_data->cur_msg->is_dma_mapped) {
406 dev = &drv_data->cur_msg->spi->dev;
407 dma_unmap_single(dev, drv_data->rx_dma,
408 drv_data->rx_map_len, DMA_FROM_DEVICE);
409 dma_unmap_single(dev, drv_data->tx_dma,
410 drv_data->tx_map_len, DMA_TO_DEVICE);
411 }
412
413 drv_data->dma_mapped = 0;
414 }
415
416 /* caller already set message->status; dma and pio irqs are blocked */
417 static void giveback(struct driver_data *drv_data)
418 {
419 struct spi_transfer* last_transfer;
420 unsigned long flags;
421 struct spi_message *msg;
422
423 spin_lock_irqsave(&drv_data->lock, flags);
424 msg = drv_data->cur_msg;
425 drv_data->cur_msg = NULL;
426 drv_data->cur_transfer = NULL;
427 queue_work(drv_data->workqueue, &drv_data->pump_messages);
428 spin_unlock_irqrestore(&drv_data->lock, flags);
429
430 last_transfer = list_entry(msg->transfers.prev,
431 struct spi_transfer,
432 transfer_list);
433
434 /* Delay if requested before any change in chip select */
435 if (last_transfer->delay_usecs)
436 udelay(last_transfer->delay_usecs);
437
438 /* Drop chip select UNLESS cs_change is true or we are returning
439 * a message with an error, or next message is for another chip
440 */
441 if (!last_transfer->cs_change)
442 cs_deassert(drv_data);
443 else {
444 struct spi_message *next_msg;
445
446 /* Holding of cs was hinted, but we need to make sure
447 * the next message is for the same chip. Don't waste
448 * time with the following tests unless this was hinted.
449 *
450 * We cannot postpone this until pump_messages, because
451 * after calling msg->complete (below) the driver that
452 * sent the current message could be unloaded, which
453 * could invalidate the cs_control() callback...
454 */
455
456 /* get a pointer to the next message, if any */
457 spin_lock_irqsave(&drv_data->lock, flags);
458 if (list_empty(&drv_data->queue))
459 next_msg = NULL;
460 else
461 next_msg = list_entry(drv_data->queue.next,
462 struct spi_message, queue);
463 spin_unlock_irqrestore(&drv_data->lock, flags);
464
465 /* see if the next and current messages point
466 * to the same chip
467 */
468 if (next_msg && next_msg->spi != msg->spi)
469 next_msg = NULL;
470 if (!next_msg || msg->state == ERROR_STATE)
471 cs_deassert(drv_data);
472 }
473
474 msg->state = NULL;
475 if (msg->complete)
476 msg->complete(msg->context);
477
478 drv_data->cur_chip = NULL;
479 }
480
481 static int wait_ssp_rx_stall(void const __iomem *ioaddr)
482 {
483 unsigned long limit = loops_per_jiffy << 1;
484
485 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
486 cpu_relax();
487
488 return limit;
489 }
490
491 static int wait_dma_channel_stop(int channel)
492 {
493 unsigned long limit = loops_per_jiffy << 1;
494
495 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
496 cpu_relax();
497
498 return limit;
499 }
500
501 static void dma_error_stop(struct driver_data *drv_data, const char *msg)
502 {
503 void __iomem *reg = drv_data->ioaddr;
504
505 /* Stop and reset */
506 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
507 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
508 write_SSSR(drv_data->clear_sr, reg);
509 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
510 if (drv_data->ssp_type != PXA25x_SSP)
511 write_SSTO(0, reg);
512 flush(drv_data);
513 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
514
515 unmap_dma_buffers(drv_data);
516
517 dev_err(&drv_data->pdev->dev, "%s\n", msg);
518
519 drv_data->cur_msg->state = ERROR_STATE;
520 tasklet_schedule(&drv_data->pump_transfers);
521 }
522
523 static void dma_transfer_complete(struct driver_data *drv_data)
524 {
525 void __iomem *reg = drv_data->ioaddr;
526 struct spi_message *msg = drv_data->cur_msg;
527
528 /* Clear and disable interrupts on SSP and DMA channels*/
529 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
530 write_SSSR(drv_data->clear_sr, reg);
531 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
532 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
533
534 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
535 dev_err(&drv_data->pdev->dev,
536 "dma_handler: dma rx channel stop failed\n");
537
538 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
539 dev_err(&drv_data->pdev->dev,
540 "dma_transfer: ssp rx stall failed\n");
541
542 unmap_dma_buffers(drv_data);
543
544 /* update the buffer pointer for the amount completed in dma */
545 drv_data->rx += drv_data->len -
546 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
547
548 /* read trailing data from fifo, it does not matter how many
549 * bytes are in the fifo just read until buffer is full
550 * or fifo is empty, which ever occurs first */
551 drv_data->read(drv_data);
552
553 /* return count of what was actually read */
554 msg->actual_length += drv_data->len -
555 (drv_data->rx_end - drv_data->rx);
556
557 /* Transfer delays and chip select release are
558 * handled in pump_transfers or giveback
559 */
560
561 /* Move to next transfer */
562 msg->state = next_transfer(drv_data);
563
564 /* Schedule transfer tasklet */
565 tasklet_schedule(&drv_data->pump_transfers);
566 }
567
568 static void dma_handler(int channel, void *data)
569 {
570 struct driver_data *drv_data = data;
571 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
572
573 if (irq_status & DCSR_BUSERR) {
574
575 if (channel == drv_data->tx_channel)
576 dma_error_stop(drv_data,
577 "dma_handler: "
578 "bad bus address on tx channel");
579 else
580 dma_error_stop(drv_data,
581 "dma_handler: "
582 "bad bus address on rx channel");
583 return;
584 }
585
586 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
587 if ((channel == drv_data->tx_channel)
588 && (irq_status & DCSR_ENDINTR)
589 && (drv_data->ssp_type == PXA25x_SSP)) {
590
591 /* Wait for rx to stall */
592 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
593 dev_err(&drv_data->pdev->dev,
594 "dma_handler: ssp rx stall failed\n");
595
596 /* finish this transfer, start the next */
597 dma_transfer_complete(drv_data);
598 }
599 }
600
601 static irqreturn_t dma_transfer(struct driver_data *drv_data)
602 {
603 u32 irq_status;
604 void __iomem *reg = drv_data->ioaddr;
605
606 irq_status = read_SSSR(reg) & drv_data->mask_sr;
607 if (irq_status & SSSR_ROR) {
608 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
609 return IRQ_HANDLED;
610 }
611
612 /* Check for false positive timeout */
613 if ((irq_status & SSSR_TINT)
614 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
615 write_SSSR(SSSR_TINT, reg);
616 return IRQ_HANDLED;
617 }
618
619 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
620
621 /* Clear and disable timeout interrupt, do the rest in
622 * dma_transfer_complete */
623 if (drv_data->ssp_type != PXA25x_SSP)
624 write_SSTO(0, reg);
625
626 /* finish this transfer, start the next */
627 dma_transfer_complete(drv_data);
628
629 return IRQ_HANDLED;
630 }
631
632 /* Opps problem detected */
633 return IRQ_NONE;
634 }
635
636 static void int_error_stop(struct driver_data *drv_data, const char* msg)
637 {
638 void __iomem *reg = drv_data->ioaddr;
639
640 /* Stop and reset SSP */
641 write_SSSR(drv_data->clear_sr, reg);
642 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
643 if (drv_data->ssp_type != PXA25x_SSP)
644 write_SSTO(0, reg);
645 flush(drv_data);
646 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
647
648 dev_err(&drv_data->pdev->dev, "%s\n", msg);
649
650 drv_data->cur_msg->state = ERROR_STATE;
651 tasklet_schedule(&drv_data->pump_transfers);
652 }
653
654 static void int_transfer_complete(struct driver_data *drv_data)
655 {
656 void __iomem *reg = drv_data->ioaddr;
657
658 /* Stop SSP */
659 write_SSSR(drv_data->clear_sr, reg);
660 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
661 if (drv_data->ssp_type != PXA25x_SSP)
662 write_SSTO(0, reg);
663
664 /* Update total byte transfered return count actual bytes read */
665 drv_data->cur_msg->actual_length += drv_data->len -
666 (drv_data->rx_end - drv_data->rx);
667
668 /* Transfer delays and chip select release are
669 * handled in pump_transfers or giveback
670 */
671
672 /* Move to next transfer */
673 drv_data->cur_msg->state = next_transfer(drv_data);
674
675 /* Schedule transfer tasklet */
676 tasklet_schedule(&drv_data->pump_transfers);
677 }
678
679 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
680 {
681 void __iomem *reg = drv_data->ioaddr;
682
683 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
684 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
685
686 u32 irq_status = read_SSSR(reg) & irq_mask;
687
688 if (irq_status & SSSR_ROR) {
689 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
690 return IRQ_HANDLED;
691 }
692
693 if (irq_status & SSSR_TINT) {
694 write_SSSR(SSSR_TINT, reg);
695 if (drv_data->read(drv_data)) {
696 int_transfer_complete(drv_data);
697 return IRQ_HANDLED;
698 }
699 }
700
701 /* Drain rx fifo, Fill tx fifo and prevent overruns */
702 do {
703 if (drv_data->read(drv_data)) {
704 int_transfer_complete(drv_data);
705 return IRQ_HANDLED;
706 }
707 } while (drv_data->write(drv_data));
708
709 if (drv_data->read(drv_data)) {
710 int_transfer_complete(drv_data);
711 return IRQ_HANDLED;
712 }
713
714 if (drv_data->tx == drv_data->tx_end) {
715 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
716 /* PXA25x_SSP has no timeout, read trailing bytes */
717 if (drv_data->ssp_type == PXA25x_SSP) {
718 if (!wait_ssp_rx_stall(reg))
719 {
720 int_error_stop(drv_data, "interrupt_transfer: "
721 "rx stall failed");
722 return IRQ_HANDLED;
723 }
724 if (!drv_data->read(drv_data))
725 {
726 int_error_stop(drv_data,
727 "interrupt_transfer: "
728 "trailing byte read failed");
729 return IRQ_HANDLED;
730 }
731 int_transfer_complete(drv_data);
732 }
733 }
734
735 /* We did something */
736 return IRQ_HANDLED;
737 }
738
739 static irqreturn_t ssp_int(int irq, void *dev_id)
740 {
741 struct driver_data *drv_data = dev_id;
742 void __iomem *reg = drv_data->ioaddr;
743 u32 sccr1_reg = read_SSCR1(reg);
744 u32 mask = drv_data->mask_sr;
745 u32 status;
746
747 status = read_SSSR(reg);
748
749 /* Ignore possible writes if we don't need to write */
750 if (!(sccr1_reg & SSCR1_TIE))
751 mask &= ~SSSR_TFS;
752
753 if (!(status & mask))
754 return IRQ_NONE;
755
756 if (!drv_data->cur_msg) {
757
758 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
759 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
760 if (drv_data->ssp_type != PXA25x_SSP)
761 write_SSTO(0, reg);
762 write_SSSR(drv_data->clear_sr, reg);
763
764 dev_err(&drv_data->pdev->dev, "bad message state "
765 "in interrupt handler\n");
766
767 /* Never fail */
768 return IRQ_HANDLED;
769 }
770
771 return drv_data->transfer_handler(drv_data);
772 }
773
774 static int set_dma_burst_and_threshold(struct chip_data *chip,
775 struct spi_device *spi,
776 u8 bits_per_word, u32 *burst_code,
777 u32 *threshold)
778 {
779 struct pxa2xx_spi_chip *chip_info =
780 (struct pxa2xx_spi_chip *)spi->controller_data;
781 int bytes_per_word;
782 int burst_bytes;
783 int thresh_words;
784 int req_burst_size;
785 int retval = 0;
786
787 /* Set the threshold (in registers) to equal the same amount of data
788 * as represented by burst size (in bytes). The computation below
789 * is (burst_size rounded up to nearest 8 byte, word or long word)
790 * divided by (bytes/register); the tx threshold is the inverse of
791 * the rx, so that there will always be enough data in the rx fifo
792 * to satisfy a burst, and there will always be enough space in the
793 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
794 * there is not enough space), there must always remain enough empty
795 * space in the rx fifo for any data loaded to the tx fifo.
796 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
797 * will be 8, or half the fifo;
798 * The threshold can only be set to 2, 4 or 8, but not 16, because
799 * to burst 16 to the tx fifo, the fifo would have to be empty;
800 * however, the minimum fifo trigger level is 1, and the tx will
801 * request service when the fifo is at this level, with only 15 spaces.
802 */
803
804 /* find bytes/word */
805 if (bits_per_word <= 8)
806 bytes_per_word = 1;
807 else if (bits_per_word <= 16)
808 bytes_per_word = 2;
809 else
810 bytes_per_word = 4;
811
812 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
813 if (chip_info)
814 req_burst_size = chip_info->dma_burst_size;
815 else {
816 switch (chip->dma_burst_size) {
817 default:
818 /* if the default burst size is not set,
819 * do it now */
820 chip->dma_burst_size = DCMD_BURST8;
821 case DCMD_BURST8:
822 req_burst_size = 8;
823 break;
824 case DCMD_BURST16:
825 req_burst_size = 16;
826 break;
827 case DCMD_BURST32:
828 req_burst_size = 32;
829 break;
830 }
831 }
832 if (req_burst_size <= 8) {
833 *burst_code = DCMD_BURST8;
834 burst_bytes = 8;
835 } else if (req_burst_size <= 16) {
836 if (bytes_per_word == 1) {
837 /* don't burst more than 1/2 the fifo */
838 *burst_code = DCMD_BURST8;
839 burst_bytes = 8;
840 retval = 1;
841 } else {
842 *burst_code = DCMD_BURST16;
843 burst_bytes = 16;
844 }
845 } else {
846 if (bytes_per_word == 1) {
847 /* don't burst more than 1/2 the fifo */
848 *burst_code = DCMD_BURST8;
849 burst_bytes = 8;
850 retval = 1;
851 } else if (bytes_per_word == 2) {
852 /* don't burst more than 1/2 the fifo */
853 *burst_code = DCMD_BURST16;
854 burst_bytes = 16;
855 retval = 1;
856 } else {
857 *burst_code = DCMD_BURST32;
858 burst_bytes = 32;
859 }
860 }
861
862 thresh_words = burst_bytes / bytes_per_word;
863
864 /* thresh_words will be between 2 and 8 */
865 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
866 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
867
868 return retval;
869 }
870
871 static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
872 {
873 unsigned long ssp_clk = clk_get_rate(ssp->clk);
874
875 if (ssp->type == PXA25x_SSP)
876 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
877 else
878 return ((ssp_clk / rate - 1) & 0xfff) << 8;
879 }
880
881 static void pump_transfers(unsigned long data)
882 {
883 struct driver_data *drv_data = (struct driver_data *)data;
884 struct spi_message *message = NULL;
885 struct spi_transfer *transfer = NULL;
886 struct spi_transfer *previous = NULL;
887 struct chip_data *chip = NULL;
888 struct ssp_device *ssp = drv_data->ssp;
889 void __iomem *reg = drv_data->ioaddr;
890 u32 clk_div = 0;
891 u8 bits = 0;
892 u32 speed = 0;
893 u32 cr0;
894 u32 cr1;
895 u32 dma_thresh = drv_data->cur_chip->dma_threshold;
896 u32 dma_burst = drv_data->cur_chip->dma_burst_size;
897
898 /* Get current state information */
899 message = drv_data->cur_msg;
900 transfer = drv_data->cur_transfer;
901 chip = drv_data->cur_chip;
902
903 /* Handle for abort */
904 if (message->state == ERROR_STATE) {
905 message->status = -EIO;
906 giveback(drv_data);
907 return;
908 }
909
910 /* Handle end of message */
911 if (message->state == DONE_STATE) {
912 message->status = 0;
913 giveback(drv_data);
914 return;
915 }
916
917 /* Delay if requested at end of transfer before CS change */
918 if (message->state == RUNNING_STATE) {
919 previous = list_entry(transfer->transfer_list.prev,
920 struct spi_transfer,
921 transfer_list);
922 if (previous->delay_usecs)
923 udelay(previous->delay_usecs);
924
925 /* Drop chip select only if cs_change is requested */
926 if (previous->cs_change)
927 cs_deassert(drv_data);
928 }
929
930 /* Check for transfers that need multiple DMA segments */
931 if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
932
933 /* reject already-mapped transfers; PIO won't always work */
934 if (message->is_dma_mapped
935 || transfer->rx_dma || transfer->tx_dma) {
936 dev_err(&drv_data->pdev->dev,
937 "pump_transfers: mapped transfer length "
938 "of %u is greater than %d\n",
939 transfer->len, MAX_DMA_LEN);
940 message->status = -EINVAL;
941 giveback(drv_data);
942 return;
943 }
944
945 /* warn ... we force this to PIO mode */
946 if (printk_ratelimit())
947 dev_warn(&message->spi->dev, "pump_transfers: "
948 "DMA disabled for transfer length %ld "
949 "greater than %d\n",
950 (long)drv_data->len, MAX_DMA_LEN);
951 }
952
953 /* Setup the transfer state based on the type of transfer */
954 if (flush(drv_data) == 0) {
955 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
956 message->status = -EIO;
957 giveback(drv_data);
958 return;
959 }
960 drv_data->n_bytes = chip->n_bytes;
961 drv_data->dma_width = chip->dma_width;
962 drv_data->tx = (void *)transfer->tx_buf;
963 drv_data->tx_end = drv_data->tx + transfer->len;
964 drv_data->rx = transfer->rx_buf;
965 drv_data->rx_end = drv_data->rx + transfer->len;
966 drv_data->rx_dma = transfer->rx_dma;
967 drv_data->tx_dma = transfer->tx_dma;
968 drv_data->len = transfer->len & DCMD_LENGTH;
969 drv_data->write = drv_data->tx ? chip->write : null_writer;
970 drv_data->read = drv_data->rx ? chip->read : null_reader;
971
972 /* Change speed and bit per word on a per transfer */
973 cr0 = chip->cr0;
974 if (transfer->speed_hz || transfer->bits_per_word) {
975
976 bits = chip->bits_per_word;
977 speed = chip->speed_hz;
978
979 if (transfer->speed_hz)
980 speed = transfer->speed_hz;
981
982 if (transfer->bits_per_word)
983 bits = transfer->bits_per_word;
984
985 clk_div = ssp_get_clk_div(ssp, speed);
986
987 if (bits <= 8) {
988 drv_data->n_bytes = 1;
989 drv_data->dma_width = DCMD_WIDTH1;
990 drv_data->read = drv_data->read != null_reader ?
991 u8_reader : null_reader;
992 drv_data->write = drv_data->write != null_writer ?
993 u8_writer : null_writer;
994 } else if (bits <= 16) {
995 drv_data->n_bytes = 2;
996 drv_data->dma_width = DCMD_WIDTH2;
997 drv_data->read = drv_data->read != null_reader ?
998 u16_reader : null_reader;
999 drv_data->write = drv_data->write != null_writer ?
1000 u16_writer : null_writer;
1001 } else if (bits <= 32) {
1002 drv_data->n_bytes = 4;
1003 drv_data->dma_width = DCMD_WIDTH4;
1004 drv_data->read = drv_data->read != null_reader ?
1005 u32_reader : null_reader;
1006 drv_data->write = drv_data->write != null_writer ?
1007 u32_writer : null_writer;
1008 }
1009 /* if bits/word is changed in dma mode, then must check the
1010 * thresholds and burst also */
1011 if (chip->enable_dma) {
1012 if (set_dma_burst_and_threshold(chip, message->spi,
1013 bits, &dma_burst,
1014 &dma_thresh))
1015 if (printk_ratelimit())
1016 dev_warn(&message->spi->dev,
1017 "pump_transfers: "
1018 "DMA burst size reduced to "
1019 "match bits_per_word\n");
1020 }
1021
1022 cr0 = clk_div
1023 | SSCR0_Motorola
1024 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
1025 | SSCR0_SSE
1026 | (bits > 16 ? SSCR0_EDSS : 0);
1027 }
1028
1029 message->state = RUNNING_STATE;
1030
1031 /* Try to map dma buffer and do a dma transfer if successful, but
1032 * only if the length is non-zero and less than MAX_DMA_LEN.
1033 *
1034 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
1035 * of PIO instead. Care is needed above because the transfer may
1036 * have have been passed with buffers that are already dma mapped.
1037 * A zero-length transfer in PIO mode will not try to write/read
1038 * to/from the buffers
1039 *
1040 * REVISIT large transfers are exactly where we most want to be
1041 * using DMA. If this happens much, split those transfers into
1042 * multiple DMA segments rather than forcing PIO.
1043 */
1044 drv_data->dma_mapped = 0;
1045 if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
1046 drv_data->dma_mapped = map_dma_buffers(drv_data);
1047 if (drv_data->dma_mapped) {
1048
1049 /* Ensure we have the correct interrupt handler */
1050 drv_data->transfer_handler = dma_transfer;
1051
1052 /* Setup rx DMA Channel */
1053 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
1054 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
1055 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
1056 if (drv_data->rx == drv_data->null_dma_buf)
1057 /* No target address increment */
1058 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
1059 | drv_data->dma_width
1060 | dma_burst
1061 | drv_data->len;
1062 else
1063 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
1064 | DCMD_FLOWSRC
1065 | drv_data->dma_width
1066 | dma_burst
1067 | drv_data->len;
1068
1069 /* Setup tx DMA Channel */
1070 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
1071 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
1072 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
1073 if (drv_data->tx == drv_data->null_dma_buf)
1074 /* No source address increment */
1075 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
1076 | drv_data->dma_width
1077 | dma_burst
1078 | drv_data->len;
1079 else
1080 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
1081 | DCMD_FLOWTRG
1082 | drv_data->dma_width
1083 | dma_burst
1084 | drv_data->len;
1085
1086 /* Enable dma end irqs on SSP to detect end of transfer */
1087 if (drv_data->ssp_type == PXA25x_SSP)
1088 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
1089
1090 /* Clear status and start DMA engine */
1091 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1092 write_SSSR(drv_data->clear_sr, reg);
1093 DCSR(drv_data->rx_channel) |= DCSR_RUN;
1094 DCSR(drv_data->tx_channel) |= DCSR_RUN;
1095 } else {
1096 /* Ensure we have the correct interrupt handler */
1097 drv_data->transfer_handler = interrupt_transfer;
1098
1099 /* Clear status */
1100 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
1101 write_SSSR(drv_data->clear_sr, reg);
1102 }
1103
1104 /* see if we need to reload the config registers */
1105 if ((read_SSCR0(reg) != cr0)
1106 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
1107 (cr1 & SSCR1_CHANGE_MASK)) {
1108
1109 /* stop the SSP, and update the other bits */
1110 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
1111 if (drv_data->ssp_type != PXA25x_SSP)
1112 write_SSTO(chip->timeout, reg);
1113 /* first set CR1 without interrupt and service enables */
1114 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
1115 /* restart the SSP */
1116 write_SSCR0(cr0, reg);
1117
1118 } else {
1119 if (drv_data->ssp_type != PXA25x_SSP)
1120 write_SSTO(chip->timeout, reg);
1121 }
1122
1123 cs_assert(drv_data);
1124
1125 /* after chip select, release the data by enabling service
1126 * requests and interrupts, without changing any mode bits */
1127 write_SSCR1(cr1, reg);
1128 }
1129
1130 static void pump_messages(struct work_struct *work)
1131 {
1132 struct driver_data *drv_data =
1133 container_of(work, struct driver_data, pump_messages);
1134 unsigned long flags;
1135
1136 /* Lock queue and check for queue work */
1137 spin_lock_irqsave(&drv_data->lock, flags);
1138 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1139 drv_data->busy = 0;
1140 spin_unlock_irqrestore(&drv_data->lock, flags);
1141 return;
1142 }
1143
1144 /* Make sure we are not already running a message */
1145 if (drv_data->cur_msg) {
1146 spin_unlock_irqrestore(&drv_data->lock, flags);
1147 return;
1148 }
1149
1150 /* Extract head of queue */
1151 drv_data->cur_msg = list_entry(drv_data->queue.next,
1152 struct spi_message, queue);
1153 list_del_init(&drv_data->cur_msg->queue);
1154
1155 /* Initial message state*/
1156 drv_data->cur_msg->state = START_STATE;
1157 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1158 struct spi_transfer,
1159 transfer_list);
1160
1161 /* prepare to setup the SSP, in pump_transfers, using the per
1162 * chip configuration */
1163 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1164
1165 /* Mark as busy and launch transfers */
1166 tasklet_schedule(&drv_data->pump_transfers);
1167
1168 drv_data->busy = 1;
1169 spin_unlock_irqrestore(&drv_data->lock, flags);
1170 }
1171
1172 static int transfer(struct spi_device *spi, struct spi_message *msg)
1173 {
1174 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1175 unsigned long flags;
1176
1177 spin_lock_irqsave(&drv_data->lock, flags);
1178
1179 if (drv_data->run == QUEUE_STOPPED) {
1180 spin_unlock_irqrestore(&drv_data->lock, flags);
1181 return -ESHUTDOWN;
1182 }
1183
1184 msg->actual_length = 0;
1185 msg->status = -EINPROGRESS;
1186 msg->state = START_STATE;
1187
1188 list_add_tail(&msg->queue, &drv_data->queue);
1189
1190 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
1191 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1192
1193 spin_unlock_irqrestore(&drv_data->lock, flags);
1194
1195 return 0;
1196 }
1197
1198 static int setup_cs(struct spi_device *spi, struct chip_data *chip,
1199 struct pxa2xx_spi_chip *chip_info)
1200 {
1201 int err = 0;
1202
1203 if (chip == NULL || chip_info == NULL)
1204 return 0;
1205
1206 /* NOTE: setup() can be called multiple times, possibly with
1207 * different chip_info, release previously requested GPIO
1208 */
1209 if (gpio_is_valid(chip->gpio_cs))
1210 gpio_free(chip->gpio_cs);
1211
1212 /* If (*cs_control) is provided, ignore GPIO chip select */
1213 if (chip_info->cs_control) {
1214 chip->cs_control = chip_info->cs_control;
1215 return 0;
1216 }
1217
1218 if (gpio_is_valid(chip_info->gpio_cs)) {
1219 err = gpio_request(chip_info->gpio_cs, "SPI_CS");
1220 if (err) {
1221 dev_err(&spi->dev, "failed to request chip select "
1222 "GPIO%d\n", chip_info->gpio_cs);
1223 return err;
1224 }
1225
1226 chip->gpio_cs = chip_info->gpio_cs;
1227 chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
1228
1229 err = gpio_direction_output(chip->gpio_cs,
1230 !chip->gpio_cs_inverted);
1231 }
1232
1233 return err;
1234 }
1235
1236 static int setup(struct spi_device *spi)
1237 {
1238 struct pxa2xx_spi_chip *chip_info = NULL;
1239 struct chip_data *chip;
1240 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1241 struct ssp_device *ssp = drv_data->ssp;
1242 unsigned int clk_div;
1243 uint tx_thres = TX_THRESH_DFLT;
1244 uint rx_thres = RX_THRESH_DFLT;
1245
1246 if (drv_data->ssp_type != PXA25x_SSP
1247 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1248 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1249 "b/w not 4-32 for type non-PXA25x_SSP\n",
1250 drv_data->ssp_type, spi->bits_per_word);
1251 return -EINVAL;
1252 }
1253 else if (drv_data->ssp_type == PXA25x_SSP
1254 && (spi->bits_per_word < 4
1255 || spi->bits_per_word > 16)) {
1256 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1257 "b/w not 4-16 for type PXA25x_SSP\n",
1258 drv_data->ssp_type, spi->bits_per_word);
1259 return -EINVAL;
1260 }
1261
1262 /* Only alloc on first setup */
1263 chip = spi_get_ctldata(spi);
1264 if (!chip) {
1265 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1266 if (!chip) {
1267 dev_err(&spi->dev,
1268 "failed setup: can't allocate chip data\n");
1269 return -ENOMEM;
1270 }
1271
1272 chip->gpio_cs = -1;
1273 chip->enable_dma = 0;
1274 chip->timeout = TIMOUT_DFLT;
1275 chip->dma_burst_size = drv_data->master_info->enable_dma ?
1276 DCMD_BURST8 : 0;
1277 }
1278
1279 /* protocol drivers may change the chip settings, so...
1280 * if chip_info exists, use it */
1281 chip_info = spi->controller_data;
1282
1283 /* chip_info isn't always needed */
1284 chip->cr1 = 0;
1285 if (chip_info) {
1286 if (chip_info->timeout)
1287 chip->timeout = chip_info->timeout;
1288 if (chip_info->tx_threshold)
1289 tx_thres = chip_info->tx_threshold;
1290 if (chip_info->rx_threshold)
1291 rx_thres = chip_info->rx_threshold;
1292 chip->enable_dma = drv_data->master_info->enable_dma;
1293 chip->dma_threshold = 0;
1294 if (chip_info->enable_loopback)
1295 chip->cr1 = SSCR1_LBM;
1296 }
1297
1298 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
1299 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
1300
1301 /* set dma burst and threshold outside of chip_info path so that if
1302 * chip_info goes away after setting chip->enable_dma, the
1303 * burst and threshold can still respond to changes in bits_per_word */
1304 if (chip->enable_dma) {
1305 /* set up legal burst and threshold for dma */
1306 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
1307 &chip->dma_burst_size,
1308 &chip->dma_threshold)) {
1309 dev_warn(&spi->dev, "in setup: DMA burst size reduced "
1310 "to match bits_per_word\n");
1311 }
1312 }
1313
1314 clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz);
1315 chip->speed_hz = spi->max_speed_hz;
1316
1317 chip->cr0 = clk_div
1318 | SSCR0_Motorola
1319 | SSCR0_DataSize(spi->bits_per_word > 16 ?
1320 spi->bits_per_word - 16 : spi->bits_per_word)
1321 | SSCR0_SSE
1322 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
1323 chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
1324 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
1325 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1326
1327 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1328 if (drv_data->ssp_type != PXA25x_SSP)
1329 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1330 clk_get_rate(ssp->clk)
1331 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1332 chip->enable_dma ? "DMA" : "PIO");
1333 else
1334 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1335 clk_get_rate(ssp->clk) / 2
1336 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1337 chip->enable_dma ? "DMA" : "PIO");
1338
1339 if (spi->bits_per_word <= 8) {
1340 chip->n_bytes = 1;
1341 chip->dma_width = DCMD_WIDTH1;
1342 chip->read = u8_reader;
1343 chip->write = u8_writer;
1344 } else if (spi->bits_per_word <= 16) {
1345 chip->n_bytes = 2;
1346 chip->dma_width = DCMD_WIDTH2;
1347 chip->read = u16_reader;
1348 chip->write = u16_writer;
1349 } else if (spi->bits_per_word <= 32) {
1350 chip->cr0 |= SSCR0_EDSS;
1351 chip->n_bytes = 4;
1352 chip->dma_width = DCMD_WIDTH4;
1353 chip->read = u32_reader;
1354 chip->write = u32_writer;
1355 } else {
1356 dev_err(&spi->dev, "invalid wordsize\n");
1357 return -ENODEV;
1358 }
1359 chip->bits_per_word = spi->bits_per_word;
1360
1361 spi_set_ctldata(spi, chip);
1362
1363 return setup_cs(spi, chip, chip_info);
1364 }
1365
1366 static void cleanup(struct spi_device *spi)
1367 {
1368 struct chip_data *chip = spi_get_ctldata(spi);
1369
1370 if (!chip)
1371 return;
1372
1373 if (gpio_is_valid(chip->gpio_cs))
1374 gpio_free(chip->gpio_cs);
1375
1376 kfree(chip);
1377 }
1378
1379 static int __devinit init_queue(struct driver_data *drv_data)
1380 {
1381 INIT_LIST_HEAD(&drv_data->queue);
1382 spin_lock_init(&drv_data->lock);
1383
1384 drv_data->run = QUEUE_STOPPED;
1385 drv_data->busy = 0;
1386
1387 tasklet_init(&drv_data->pump_transfers,
1388 pump_transfers, (unsigned long)drv_data);
1389
1390 INIT_WORK(&drv_data->pump_messages, pump_messages);
1391 drv_data->workqueue = create_singlethread_workqueue(
1392 dev_name(drv_data->master->dev.parent));
1393 if (drv_data->workqueue == NULL)
1394 return -EBUSY;
1395
1396 return 0;
1397 }
1398
1399 static int start_queue(struct driver_data *drv_data)
1400 {
1401 unsigned long flags;
1402
1403 spin_lock_irqsave(&drv_data->lock, flags);
1404
1405 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1406 spin_unlock_irqrestore(&drv_data->lock, flags);
1407 return -EBUSY;
1408 }
1409
1410 drv_data->run = QUEUE_RUNNING;
1411 drv_data->cur_msg = NULL;
1412 drv_data->cur_transfer = NULL;
1413 drv_data->cur_chip = NULL;
1414 spin_unlock_irqrestore(&drv_data->lock, flags);
1415
1416 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1417
1418 return 0;
1419 }
1420
1421 static int stop_queue(struct driver_data *drv_data)
1422 {
1423 unsigned long flags;
1424 unsigned limit = 500;
1425 int status = 0;
1426
1427 spin_lock_irqsave(&drv_data->lock, flags);
1428
1429 /* This is a bit lame, but is optimized for the common execution path.
1430 * A wait_queue on the drv_data->busy could be used, but then the common
1431 * execution path (pump_messages) would be required to call wake_up or
1432 * friends on every SPI message. Do this instead */
1433 drv_data->run = QUEUE_STOPPED;
1434 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1435 spin_unlock_irqrestore(&drv_data->lock, flags);
1436 msleep(10);
1437 spin_lock_irqsave(&drv_data->lock, flags);
1438 }
1439
1440 if (!list_empty(&drv_data->queue) || drv_data->busy)
1441 status = -EBUSY;
1442
1443 spin_unlock_irqrestore(&drv_data->lock, flags);
1444
1445 return status;
1446 }
1447
1448 static int destroy_queue(struct driver_data *drv_data)
1449 {
1450 int status;
1451
1452 status = stop_queue(drv_data);
1453 /* we are unloading the module or failing to load (only two calls
1454 * to this routine), and neither call can handle a return value.
1455 * However, destroy_workqueue calls flush_workqueue, and that will
1456 * block until all work is done. If the reason that stop_queue
1457 * timed out is that the work will never finish, then it does no
1458 * good to call destroy_workqueue, so return anyway. */
1459 if (status != 0)
1460 return status;
1461
1462 destroy_workqueue(drv_data->workqueue);
1463
1464 return 0;
1465 }
1466
1467 static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
1468 {
1469 struct device *dev = &pdev->dev;
1470 struct pxa2xx_spi_master *platform_info;
1471 struct spi_master *master;
1472 struct driver_data *drv_data;
1473 struct ssp_device *ssp;
1474 int status;
1475
1476 platform_info = dev->platform_data;
1477
1478 ssp = pxa_ssp_request(pdev->id, pdev->name);
1479 if (ssp == NULL) {
1480 dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
1481 return -ENODEV;
1482 }
1483
1484 /* Allocate master with space for drv_data and null dma buffer */
1485 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1486 if (!master) {
1487 dev_err(&pdev->dev, "cannot alloc spi_master\n");
1488 pxa_ssp_free(ssp);
1489 return -ENOMEM;
1490 }
1491 drv_data = spi_master_get_devdata(master);
1492 drv_data->master = master;
1493 drv_data->master_info = platform_info;
1494 drv_data->pdev = pdev;
1495 drv_data->ssp = ssp;
1496
1497 /* the spi->mode bits understood by this driver: */
1498 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1499
1500 master->bus_num = pdev->id;
1501 master->num_chipselect = platform_info->num_chipselect;
1502 master->dma_alignment = DMA_ALIGNMENT;
1503 master->cleanup = cleanup;
1504 master->setup = setup;
1505 master->transfer = transfer;
1506
1507 drv_data->ssp_type = ssp->type;
1508 drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
1509 sizeof(struct driver_data)), 8);
1510
1511 drv_data->ioaddr = ssp->mmio_base;
1512 drv_data->ssdr_physical = ssp->phys_base + SSDR;
1513 if (ssp->type == PXA25x_SSP) {
1514 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1515 drv_data->dma_cr1 = 0;
1516 drv_data->clear_sr = SSSR_ROR;
1517 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1518 } else {
1519 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1520 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1521 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1522 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1523 }
1524
1525 status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
1526 drv_data);
1527 if (status < 0) {
1528 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1529 goto out_error_master_alloc;
1530 }
1531
1532 /* Setup DMA if requested */
1533 drv_data->tx_channel = -1;
1534 drv_data->rx_channel = -1;
1535 if (platform_info->enable_dma) {
1536
1537 /* Get two DMA channels (rx and tx) */
1538 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1539 DMA_PRIO_HIGH,
1540 dma_handler,
1541 drv_data);
1542 if (drv_data->rx_channel < 0) {
1543 dev_err(dev, "problem (%d) requesting rx channel\n",
1544 drv_data->rx_channel);
1545 status = -ENODEV;
1546 goto out_error_irq_alloc;
1547 }
1548 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1549 DMA_PRIO_MEDIUM,
1550 dma_handler,
1551 drv_data);
1552 if (drv_data->tx_channel < 0) {
1553 dev_err(dev, "problem (%d) requesting tx channel\n",
1554 drv_data->tx_channel);
1555 status = -ENODEV;
1556 goto out_error_dma_alloc;
1557 }
1558
1559 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
1560 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
1561 }
1562
1563 /* Enable SOC clock */
1564 clk_enable(ssp->clk);
1565
1566 /* Load default SSP configuration */
1567 write_SSCR0(0, drv_data->ioaddr);
1568 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
1569 SSCR1_TxTresh(TX_THRESH_DFLT),
1570 drv_data->ioaddr);
1571 write_SSCR0(SSCR0_SCR(2)
1572 | SSCR0_Motorola
1573 | SSCR0_DataSize(8),
1574 drv_data->ioaddr);
1575 if (drv_data->ssp_type != PXA25x_SSP)
1576 write_SSTO(0, drv_data->ioaddr);
1577 write_SSPSP(0, drv_data->ioaddr);
1578
1579 /* Initial and start queue */
1580 status = init_queue(drv_data);
1581 if (status != 0) {
1582 dev_err(&pdev->dev, "problem initializing queue\n");
1583 goto out_error_clock_enabled;
1584 }
1585 status = start_queue(drv_data);
1586 if (status != 0) {
1587 dev_err(&pdev->dev, "problem starting queue\n");
1588 goto out_error_clock_enabled;
1589 }
1590
1591 /* Register with the SPI framework */
1592 platform_set_drvdata(pdev, drv_data);
1593 status = spi_register_master(master);
1594 if (status != 0) {
1595 dev_err(&pdev->dev, "problem registering spi master\n");
1596 goto out_error_queue_alloc;
1597 }
1598
1599 return status;
1600
1601 out_error_queue_alloc:
1602 destroy_queue(drv_data);
1603
1604 out_error_clock_enabled:
1605 clk_disable(ssp->clk);
1606
1607 out_error_dma_alloc:
1608 if (drv_data->tx_channel != -1)
1609 pxa_free_dma(drv_data->tx_channel);
1610 if (drv_data->rx_channel != -1)
1611 pxa_free_dma(drv_data->rx_channel);
1612
1613 out_error_irq_alloc:
1614 free_irq(ssp->irq, drv_data);
1615
1616 out_error_master_alloc:
1617 spi_master_put(master);
1618 pxa_ssp_free(ssp);
1619 return status;
1620 }
1621
1622 static int pxa2xx_spi_remove(struct platform_device *pdev)
1623 {
1624 struct driver_data *drv_data = platform_get_drvdata(pdev);
1625 struct ssp_device *ssp;
1626 int status = 0;
1627
1628 if (!drv_data)
1629 return 0;
1630 ssp = drv_data->ssp;
1631
1632 /* Remove the queue */
1633 status = destroy_queue(drv_data);
1634 if (status != 0)
1635 /* the kernel does not check the return status of this
1636 * this routine (mod->exit, within the kernel). Therefore
1637 * nothing is gained by returning from here, the module is
1638 * going away regardless, and we should not leave any more
1639 * resources allocated than necessary. We cannot free the
1640 * message memory in drv_data->queue, but we can release the
1641 * resources below. I think the kernel should honor -EBUSY
1642 * returns but... */
1643 dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
1644 "complete, message memory not freed\n");
1645
1646 /* Disable the SSP at the peripheral and SOC level */
1647 write_SSCR0(0, drv_data->ioaddr);
1648 clk_disable(ssp->clk);
1649
1650 /* Release DMA */
1651 if (drv_data->master_info->enable_dma) {
1652 DRCMR(ssp->drcmr_rx) = 0;
1653 DRCMR(ssp->drcmr_tx) = 0;
1654 pxa_free_dma(drv_data->tx_channel);
1655 pxa_free_dma(drv_data->rx_channel);
1656 }
1657
1658 /* Release IRQ */
1659 free_irq(ssp->irq, drv_data);
1660
1661 /* Release SSP */
1662 pxa_ssp_free(ssp);
1663
1664 /* Disconnect from the SPI framework */
1665 spi_unregister_master(drv_data->master);
1666
1667 /* Prevent double remove */
1668 platform_set_drvdata(pdev, NULL);
1669
1670 return 0;
1671 }
1672
1673 static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1674 {
1675 int status = 0;
1676
1677 if ((status = pxa2xx_spi_remove(pdev)) != 0)
1678 dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1679 }
1680
1681 #ifdef CONFIG_PM
1682 static int pxa2xx_spi_suspend(struct device *dev)
1683 {
1684 struct driver_data *drv_data = dev_get_drvdata(dev);
1685 struct ssp_device *ssp = drv_data->ssp;
1686 int status = 0;
1687
1688 status = stop_queue(drv_data);
1689 if (status != 0)
1690 return status;
1691 write_SSCR0(0, drv_data->ioaddr);
1692 clk_disable(ssp->clk);
1693
1694 return 0;
1695 }
1696
1697 static int pxa2xx_spi_resume(struct device *dev)
1698 {
1699 struct driver_data *drv_data = dev_get_drvdata(dev);
1700 struct ssp_device *ssp = drv_data->ssp;
1701 int status = 0;
1702
1703 if (drv_data->rx_channel != -1)
1704 DRCMR(drv_data->ssp->drcmr_rx) =
1705 DRCMR_MAPVLD | drv_data->rx_channel;
1706 if (drv_data->tx_channel != -1)
1707 DRCMR(drv_data->ssp->drcmr_tx) =
1708 DRCMR_MAPVLD | drv_data->tx_channel;
1709
1710 /* Enable the SSP clock */
1711 clk_enable(ssp->clk);
1712
1713 /* Start the queue running */
1714 status = start_queue(drv_data);
1715 if (status != 0) {
1716 dev_err(dev, "problem starting queue (%d)\n", status);
1717 return status;
1718 }
1719
1720 return 0;
1721 }
1722
1723 static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1724 .suspend = pxa2xx_spi_suspend,
1725 .resume = pxa2xx_spi_resume,
1726 };
1727 #endif
1728
1729 static struct platform_driver driver = {
1730 .driver = {
1731 .name = "pxa2xx-spi",
1732 .owner = THIS_MODULE,
1733 #ifdef CONFIG_PM
1734 .pm = &pxa2xx_spi_pm_ops,
1735 #endif
1736 },
1737 .probe = pxa2xx_spi_probe,
1738 .remove = pxa2xx_spi_remove,
1739 .shutdown = pxa2xx_spi_shutdown,
1740 };
1741
1742 static int __init pxa2xx_spi_init(void)
1743 {
1744 return platform_driver_register(&driver);
1745 }
1746 subsys_initcall(pxa2xx_spi_init);
1747
1748 static void __exit pxa2xx_spi_exit(void)
1749 {
1750 platform_driver_unregister(&driver);
1751 }
1752 module_exit(pxa2xx_spi_exit);
This page took 0.070029 seconds and 5 git commands to generate.