Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[deliverable/linux.git] / drivers / dma / sh / shdmac.c
1 /*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 *
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * - DMA of SuperH does not have Hardware DMA chain mode.
17 * - MAX DMA size is 16MB.
18 *
19 */
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/dmaengine.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sh_dma.h>
32 #include <linux/notifier.h>
33 #include <linux/kdebug.h>
34 #include <linux/spinlock.h>
35 #include <linux/rculist.h>
36
37 #include "../dmaengine.h"
38 #include "shdma.h"
39
40 /* DMA register */
41 #define SAR 0x00
42 #define DAR 0x04
43 #define TCR 0x08
44 #define CHCR 0x0C
45 #define DMAOR 0x40
46
47 #define TEND 0x18 /* USB-DMAC */
48
49 #define SH_DMAE_DRV_NAME "sh-dma-engine"
50
51 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
52 #define LOG2_DEFAULT_XFER_SIZE 2
53 #define SH_DMA_SLAVE_NUMBER 256
54 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
55
56 /*
57 * Used for write-side mutual exclusion for the global device list,
58 * read-side synchronization by way of RCU, and per-controller data.
59 */
60 static DEFINE_SPINLOCK(sh_dmae_lock);
61 static LIST_HEAD(sh_dmae_devices);
62
63 /*
64 * Different DMAC implementations provide different ways to clear DMA channels:
65 * (1) none - no CHCLR registers are available
66 * (2) one CHCLR register per channel - 0 has to be written to it to clear
67 * channel buffers
68 * (3) one CHCLR per several channels - 1 has to be written to the bit,
69 * corresponding to the specific channel to reset it
70 */
71 static void channel_clear(struct sh_dmae_chan *sh_dc)
72 {
73 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
74 const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
75 sh_dc->shdma_chan.id;
76 u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
77
78 __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
79 }
80
81 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
82 {
83 __raw_writel(data, sh_dc->base + reg);
84 }
85
86 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
87 {
88 return __raw_readl(sh_dc->base + reg);
89 }
90
91 static u16 dmaor_read(struct sh_dmae_device *shdev)
92 {
93 void __iomem *addr = shdev->chan_reg + DMAOR;
94
95 if (shdev->pdata->dmaor_is_32bit)
96 return __raw_readl(addr);
97 else
98 return __raw_readw(addr);
99 }
100
101 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
102 {
103 void __iomem *addr = shdev->chan_reg + DMAOR;
104
105 if (shdev->pdata->dmaor_is_32bit)
106 __raw_writel(data, addr);
107 else
108 __raw_writew(data, addr);
109 }
110
111 static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
112 {
113 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
114
115 __raw_writel(data, sh_dc->base + shdev->chcr_offset);
116 }
117
118 static u32 chcr_read(struct sh_dmae_chan *sh_dc)
119 {
120 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
121
122 return __raw_readl(sh_dc->base + shdev->chcr_offset);
123 }
124
125 /*
126 * Reset DMA controller
127 *
128 * SH7780 has two DMAOR register
129 */
130 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
131 {
132 unsigned short dmaor;
133 unsigned long flags;
134
135 spin_lock_irqsave(&sh_dmae_lock, flags);
136
137 dmaor = dmaor_read(shdev);
138 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
139
140 spin_unlock_irqrestore(&sh_dmae_lock, flags);
141 }
142
143 static int sh_dmae_rst(struct sh_dmae_device *shdev)
144 {
145 unsigned short dmaor;
146 unsigned long flags;
147
148 spin_lock_irqsave(&sh_dmae_lock, flags);
149
150 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
151
152 if (shdev->pdata->chclr_present) {
153 int i;
154 for (i = 0; i < shdev->pdata->channel_num; i++) {
155 struct sh_dmae_chan *sh_chan = shdev->chan[i];
156 if (sh_chan)
157 channel_clear(sh_chan);
158 }
159 }
160
161 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
162
163 dmaor = dmaor_read(shdev);
164
165 spin_unlock_irqrestore(&sh_dmae_lock, flags);
166
167 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
168 dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
169 return -EIO;
170 }
171 if (shdev->pdata->dmaor_init & ~dmaor)
172 dev_warn(shdev->shdma_dev.dma_dev.dev,
173 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
174 dmaor, shdev->pdata->dmaor_init);
175 return 0;
176 }
177
178 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
179 {
180 u32 chcr = chcr_read(sh_chan);
181
182 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
183 return true; /* working */
184
185 return false; /* waiting */
186 }
187
188 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
189 {
190 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
191 const struct sh_dmae_pdata *pdata = shdev->pdata;
192 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
193 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
194
195 if (cnt >= pdata->ts_shift_num)
196 cnt = 0;
197
198 return pdata->ts_shift[cnt];
199 }
200
201 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
202 {
203 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
204 const struct sh_dmae_pdata *pdata = shdev->pdata;
205 int i;
206
207 for (i = 0; i < pdata->ts_shift_num; i++)
208 if (pdata->ts_shift[i] == l2size)
209 break;
210
211 if (i == pdata->ts_shift_num)
212 i = 0;
213
214 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
215 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
216 }
217
218 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
219 {
220 sh_dmae_writel(sh_chan, hw->sar, SAR);
221 sh_dmae_writel(sh_chan, hw->dar, DAR);
222 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
223 }
224
225 static void dmae_start(struct sh_dmae_chan *sh_chan)
226 {
227 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
228 u32 chcr = chcr_read(sh_chan);
229
230 if (shdev->pdata->needs_tend_set)
231 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
232
233 chcr |= CHCR_DE | shdev->chcr_ie_bit;
234 chcr_write(sh_chan, chcr & ~CHCR_TE);
235 }
236
237 static void dmae_init(struct sh_dmae_chan *sh_chan)
238 {
239 /*
240 * Default configuration for dual address memory-memory transfer.
241 * 0x400 represents auto-request.
242 */
243 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
244 LOG2_DEFAULT_XFER_SIZE);
245 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
246 chcr_write(sh_chan, chcr);
247 }
248
249 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
250 {
251 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
252 if (dmae_is_busy(sh_chan))
253 return -EBUSY;
254
255 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
256 chcr_write(sh_chan, val);
257
258 return 0;
259 }
260
261 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
262 {
263 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
264 const struct sh_dmae_pdata *pdata = shdev->pdata;
265 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
266 void __iomem *addr = shdev->dmars;
267 unsigned int shift = chan_pdata->dmars_bit;
268
269 if (dmae_is_busy(sh_chan))
270 return -EBUSY;
271
272 if (pdata->no_dmars)
273 return 0;
274
275 /* in the case of a missing DMARS resource use first memory window */
276 if (!addr)
277 addr = shdev->chan_reg;
278 addr += chan_pdata->dmars;
279
280 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
281 addr);
282
283 return 0;
284 }
285
286 static void sh_dmae_start_xfer(struct shdma_chan *schan,
287 struct shdma_desc *sdesc)
288 {
289 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
290 shdma_chan);
291 struct sh_dmae_desc *sh_desc = container_of(sdesc,
292 struct sh_dmae_desc, shdma_desc);
293 dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
294 sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
295 sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
296 /* Get the ld start address from ld_queue */
297 dmae_set_reg(sh_chan, &sh_desc->hw);
298 dmae_start(sh_chan);
299 }
300
301 static bool sh_dmae_channel_busy(struct shdma_chan *schan)
302 {
303 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
304 shdma_chan);
305 return dmae_is_busy(sh_chan);
306 }
307
308 static void sh_dmae_setup_xfer(struct shdma_chan *schan,
309 int slave_id)
310 {
311 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
312 shdma_chan);
313
314 if (slave_id >= 0) {
315 const struct sh_dmae_slave_config *cfg =
316 sh_chan->config;
317
318 dmae_set_dmars(sh_chan, cfg->mid_rid);
319 dmae_set_chcr(sh_chan, cfg->chcr);
320 } else {
321 dmae_init(sh_chan);
322 }
323 }
324
325 /*
326 * Find a slave channel configuration from the contoller list by either a slave
327 * ID in the non-DT case, or by a MID/RID value in the DT case
328 */
329 static const struct sh_dmae_slave_config *dmae_find_slave(
330 struct sh_dmae_chan *sh_chan, int match)
331 {
332 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
333 const struct sh_dmae_pdata *pdata = shdev->pdata;
334 const struct sh_dmae_slave_config *cfg;
335 int i;
336
337 if (!sh_chan->shdma_chan.dev->of_node) {
338 if (match >= SH_DMA_SLAVE_NUMBER)
339 return NULL;
340
341 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
342 if (cfg->slave_id == match)
343 return cfg;
344 } else {
345 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
346 if (cfg->mid_rid == match) {
347 sh_chan->shdma_chan.slave_id = i;
348 return cfg;
349 }
350 }
351
352 return NULL;
353 }
354
355 static int sh_dmae_set_slave(struct shdma_chan *schan,
356 int slave_id, dma_addr_t slave_addr, bool try)
357 {
358 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
359 shdma_chan);
360 const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
361 if (!cfg)
362 return -ENXIO;
363
364 if (!try) {
365 sh_chan->config = cfg;
366 sh_chan->slave_addr = slave_addr ? : cfg->addr;
367 }
368
369 return 0;
370 }
371
372 static void dmae_halt(struct sh_dmae_chan *sh_chan)
373 {
374 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
375 u32 chcr = chcr_read(sh_chan);
376
377 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
378 chcr_write(sh_chan, chcr);
379 }
380
381 static int sh_dmae_desc_setup(struct shdma_chan *schan,
382 struct shdma_desc *sdesc,
383 dma_addr_t src, dma_addr_t dst, size_t *len)
384 {
385 struct sh_dmae_desc *sh_desc = container_of(sdesc,
386 struct sh_dmae_desc, shdma_desc);
387
388 if (*len > schan->max_xfer_len)
389 *len = schan->max_xfer_len;
390
391 sh_desc->hw.sar = src;
392 sh_desc->hw.dar = dst;
393 sh_desc->hw.tcr = *len;
394
395 return 0;
396 }
397
398 static void sh_dmae_halt(struct shdma_chan *schan)
399 {
400 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
401 shdma_chan);
402 dmae_halt(sh_chan);
403 }
404
405 static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
406 {
407 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
408 shdma_chan);
409
410 if (!(chcr_read(sh_chan) & CHCR_TE))
411 return false;
412
413 /* DMA stop */
414 dmae_halt(sh_chan);
415
416 return true;
417 }
418
419 static size_t sh_dmae_get_partial(struct shdma_chan *schan,
420 struct shdma_desc *sdesc)
421 {
422 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
423 shdma_chan);
424 struct sh_dmae_desc *sh_desc = container_of(sdesc,
425 struct sh_dmae_desc, shdma_desc);
426 return sh_desc->hw.tcr -
427 (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
428 }
429
430 /* Called from error IRQ or NMI */
431 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
432 {
433 bool ret;
434
435 /* halt the dma controller */
436 sh_dmae_ctl_stop(shdev);
437
438 /* We cannot detect, which channel caused the error, have to reset all */
439 ret = shdma_reset(&shdev->shdma_dev);
440
441 sh_dmae_rst(shdev);
442
443 return ret;
444 }
445
446 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
447 static irqreturn_t sh_dmae_err(int irq, void *data)
448 {
449 struct sh_dmae_device *shdev = data;
450
451 if (!(dmaor_read(shdev) & DMAOR_AE))
452 return IRQ_NONE;
453
454 sh_dmae_reset(shdev);
455 return IRQ_HANDLED;
456 }
457 #endif
458
459 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
460 struct shdma_desc *sdesc)
461 {
462 struct sh_dmae_chan *sh_chan = container_of(schan,
463 struct sh_dmae_chan, shdma_chan);
464 struct sh_dmae_desc *sh_desc = container_of(sdesc,
465 struct sh_dmae_desc, shdma_desc);
466 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
467 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
468
469 return (sdesc->direction == DMA_DEV_TO_MEM &&
470 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
471 (sdesc->direction != DMA_DEV_TO_MEM &&
472 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
473 }
474
475 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
476 {
477 /* Fast path out if NMIF is not asserted for this controller */
478 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
479 return false;
480
481 return sh_dmae_reset(shdev);
482 }
483
484 static int sh_dmae_nmi_handler(struct notifier_block *self,
485 unsigned long cmd, void *data)
486 {
487 struct sh_dmae_device *shdev;
488 int ret = NOTIFY_DONE;
489 bool triggered;
490
491 /*
492 * Only concern ourselves with NMI events.
493 *
494 * Normally we would check the die chain value, but as this needs
495 * to be architecture independent, check for NMI context instead.
496 */
497 if (!in_nmi())
498 return NOTIFY_DONE;
499
500 rcu_read_lock();
501 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
502 /*
503 * Only stop if one of the controllers has NMIF asserted,
504 * we do not want to interfere with regular address error
505 * handling or NMI events that don't concern the DMACs.
506 */
507 triggered = sh_dmae_nmi_notify(shdev);
508 if (triggered == true)
509 ret = NOTIFY_OK;
510 }
511 rcu_read_unlock();
512
513 return ret;
514 }
515
516 static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
517 .notifier_call = sh_dmae_nmi_handler,
518
519 /* Run before NMI debug handler and KGDB */
520 .priority = 1,
521 };
522
523 static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
524 int irq, unsigned long flags)
525 {
526 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
527 struct shdma_dev *sdev = &shdev->shdma_dev;
528 struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
529 struct sh_dmae_chan *sh_chan;
530 struct shdma_chan *schan;
531 int err;
532
533 sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
534 GFP_KERNEL);
535 if (!sh_chan) {
536 dev_err(sdev->dma_dev.dev,
537 "No free memory for allocating dma channels!\n");
538 return -ENOMEM;
539 }
540
541 schan = &sh_chan->shdma_chan;
542 schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
543
544 shdma_chan_probe(sdev, schan, id);
545
546 sh_chan->base = shdev->chan_reg + chan_pdata->offset;
547
548 /* set up channel irq */
549 if (pdev->id >= 0)
550 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
551 "sh-dmae%d.%d", pdev->id, id);
552 else
553 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
554 "sh-dma%d", id);
555
556 err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
557 if (err) {
558 dev_err(sdev->dma_dev.dev,
559 "DMA channel %d request_irq error %d\n",
560 id, err);
561 goto err_no_irq;
562 }
563
564 shdev->chan[id] = sh_chan;
565 return 0;
566
567 err_no_irq:
568 /* remove from dmaengine device node */
569 shdma_chan_remove(schan);
570 return err;
571 }
572
573 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
574 {
575 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
576 struct shdma_chan *schan;
577 int i;
578
579 shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
580 BUG_ON(!schan);
581
582 shdma_chan_remove(schan);
583 }
584 dma_dev->chancnt = 0;
585 }
586
587 static void sh_dmae_shutdown(struct platform_device *pdev)
588 {
589 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
590 sh_dmae_ctl_stop(shdev);
591 }
592
593 static int sh_dmae_runtime_suspend(struct device *dev)
594 {
595 return 0;
596 }
597
598 static int sh_dmae_runtime_resume(struct device *dev)
599 {
600 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
601
602 return sh_dmae_rst(shdev);
603 }
604
605 #ifdef CONFIG_PM
606 static int sh_dmae_suspend(struct device *dev)
607 {
608 return 0;
609 }
610
611 static int sh_dmae_resume(struct device *dev)
612 {
613 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
614 int i, ret;
615
616 ret = sh_dmae_rst(shdev);
617 if (ret < 0)
618 dev_err(dev, "Failed to reset!\n");
619
620 for (i = 0; i < shdev->pdata->channel_num; i++) {
621 struct sh_dmae_chan *sh_chan = shdev->chan[i];
622
623 if (!sh_chan->shdma_chan.desc_num)
624 continue;
625
626 if (sh_chan->shdma_chan.slave_id >= 0) {
627 const struct sh_dmae_slave_config *cfg = sh_chan->config;
628 dmae_set_dmars(sh_chan, cfg->mid_rid);
629 dmae_set_chcr(sh_chan, cfg->chcr);
630 } else {
631 dmae_init(sh_chan);
632 }
633 }
634
635 return 0;
636 }
637 #else
638 #define sh_dmae_suspend NULL
639 #define sh_dmae_resume NULL
640 #endif
641
642 static const struct dev_pm_ops sh_dmae_pm = {
643 .suspend = sh_dmae_suspend,
644 .resume = sh_dmae_resume,
645 .runtime_suspend = sh_dmae_runtime_suspend,
646 .runtime_resume = sh_dmae_runtime_resume,
647 };
648
649 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
650 {
651 struct sh_dmae_chan *sh_chan = container_of(schan,
652 struct sh_dmae_chan, shdma_chan);
653
654 /*
655 * Implicit BUG_ON(!sh_chan->config)
656 * This is an exclusive slave DMA operation, may only be called after a
657 * successful slave configuration.
658 */
659 return sh_chan->slave_addr;
660 }
661
662 static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
663 {
664 return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
665 }
666
667 static const struct shdma_ops sh_dmae_shdma_ops = {
668 .desc_completed = sh_dmae_desc_completed,
669 .halt_channel = sh_dmae_halt,
670 .channel_busy = sh_dmae_channel_busy,
671 .slave_addr = sh_dmae_slave_addr,
672 .desc_setup = sh_dmae_desc_setup,
673 .set_slave = sh_dmae_set_slave,
674 .setup_xfer = sh_dmae_setup_xfer,
675 .start_xfer = sh_dmae_start_xfer,
676 .embedded_desc = sh_dmae_embedded_desc,
677 .chan_irq = sh_dmae_chan_irq,
678 .get_partial = sh_dmae_get_partial,
679 };
680
681 static const struct of_device_id sh_dmae_of_match[] = {
682 {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
683 {}
684 };
685 MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
686
687 static int sh_dmae_probe(struct platform_device *pdev)
688 {
689 const struct sh_dmae_pdata *pdata;
690 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
691 int chan_irq[SH_DMAE_MAX_CHANNELS];
692 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
693 unsigned long irqflags = 0;
694 int errirq;
695 #endif
696 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
697 struct sh_dmae_device *shdev;
698 struct dma_device *dma_dev;
699 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
700
701 if (pdev->dev.of_node)
702 pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
703 else
704 pdata = dev_get_platdata(&pdev->dev);
705
706 /* get platform data */
707 if (!pdata || !pdata->channel_num)
708 return -ENODEV;
709
710 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
711 /* DMARS area is optional */
712 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
713 /*
714 * IRQ resources:
715 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
716 * the error IRQ, in which case it is the only IRQ in this resource:
717 * start == end. If it is the only IRQ resource, all channels also
718 * use the same IRQ.
719 * 2. DMA channel IRQ resources can be specified one per resource or in
720 * ranges (start != end)
721 * 3. iff all events (channels and, optionally, error) on this
722 * controller use the same IRQ, only one IRQ resource can be
723 * specified, otherwise there must be one IRQ per channel, even if
724 * some of them are equal
725 * 4. if all IRQs on this controller are equal or if some specific IRQs
726 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
727 * requested with the IRQF_SHARED flag
728 */
729 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
730 if (!chan || !errirq_res)
731 return -ENODEV;
732
733 shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
734 GFP_KERNEL);
735 if (!shdev) {
736 dev_err(&pdev->dev, "Not enough memory\n");
737 return -ENOMEM;
738 }
739
740 dma_dev = &shdev->shdma_dev.dma_dev;
741
742 shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
743 if (IS_ERR(shdev->chan_reg))
744 return PTR_ERR(shdev->chan_reg);
745 if (dmars) {
746 shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
747 if (IS_ERR(shdev->dmars))
748 return PTR_ERR(shdev->dmars);
749 }
750
751 if (!pdata->slave_only)
752 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
753 if (pdata->slave && pdata->slave_num)
754 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
755
756 /* Default transfer size of 32 bytes requires 32-byte alignment */
757 dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
758
759 shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
760 shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
761 err = shdma_init(&pdev->dev, &shdev->shdma_dev,
762 pdata->channel_num);
763 if (err < 0)
764 goto eshdma;
765
766 /* platform data */
767 shdev->pdata = pdata;
768
769 if (pdata->chcr_offset)
770 shdev->chcr_offset = pdata->chcr_offset;
771 else
772 shdev->chcr_offset = CHCR;
773
774 if (pdata->chcr_ie_bit)
775 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
776 else
777 shdev->chcr_ie_bit = CHCR_IE;
778
779 platform_set_drvdata(pdev, shdev);
780
781 pm_runtime_enable(&pdev->dev);
782 err = pm_runtime_get_sync(&pdev->dev);
783 if (err < 0)
784 dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
785
786 spin_lock_irq(&sh_dmae_lock);
787 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
788 spin_unlock_irq(&sh_dmae_lock);
789
790 /* reset dma controller - only needed as a test */
791 err = sh_dmae_rst(shdev);
792 if (err)
793 goto rst_err;
794
795 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
796 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
797
798 if (!chanirq_res)
799 chanirq_res = errirq_res;
800 else
801 irqres++;
802
803 if (chanirq_res == errirq_res ||
804 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
805 irqflags = IRQF_SHARED;
806
807 errirq = errirq_res->start;
808
809 err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
810 "DMAC Address Error", shdev);
811 if (err) {
812 dev_err(&pdev->dev,
813 "DMA failed requesting irq #%d, error %d\n",
814 errirq, err);
815 goto eirq_err;
816 }
817
818 #else
819 chanirq_res = errirq_res;
820 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
821
822 if (chanirq_res->start == chanirq_res->end &&
823 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
824 /* Special case - all multiplexed */
825 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
826 if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
827 chan_irq[irq_cnt] = chanirq_res->start;
828 chan_flag[irq_cnt] = IRQF_SHARED;
829 } else {
830 irq_cap = 1;
831 break;
832 }
833 }
834 } else {
835 do {
836 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
837 if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
838 irq_cap = 1;
839 break;
840 }
841
842 if ((errirq_res->flags & IORESOURCE_BITS) ==
843 IORESOURCE_IRQ_SHAREABLE)
844 chan_flag[irq_cnt] = IRQF_SHARED;
845 else
846 chan_flag[irq_cnt] = 0;
847 dev_dbg(&pdev->dev,
848 "Found IRQ %d for channel %d\n",
849 i, irq_cnt);
850 chan_irq[irq_cnt++] = i;
851 }
852
853 if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
854 break;
855
856 chanirq_res = platform_get_resource(pdev,
857 IORESOURCE_IRQ, ++irqres);
858 } while (irq_cnt < pdata->channel_num && chanirq_res);
859 }
860
861 /* Create DMA Channel */
862 for (i = 0; i < irq_cnt; i++) {
863 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
864 if (err)
865 goto chan_probe_err;
866 }
867
868 if (irq_cap)
869 dev_notice(&pdev->dev, "Attempting to register %d DMA "
870 "channels when a maximum of %d are supported.\n",
871 pdata->channel_num, SH_DMAE_MAX_CHANNELS);
872
873 pm_runtime_put(&pdev->dev);
874
875 err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
876 if (err < 0)
877 goto edmadevreg;
878
879 return err;
880
881 edmadevreg:
882 pm_runtime_get(&pdev->dev);
883
884 chan_probe_err:
885 sh_dmae_chan_remove(shdev);
886
887 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
888 eirq_err:
889 #endif
890 rst_err:
891 spin_lock_irq(&sh_dmae_lock);
892 list_del_rcu(&shdev->node);
893 spin_unlock_irq(&sh_dmae_lock);
894
895 pm_runtime_put(&pdev->dev);
896 pm_runtime_disable(&pdev->dev);
897
898 shdma_cleanup(&shdev->shdma_dev);
899 eshdma:
900 synchronize_rcu();
901
902 return err;
903 }
904
905 static int sh_dmae_remove(struct platform_device *pdev)
906 {
907 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
908 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
909
910 dma_async_device_unregister(dma_dev);
911
912 spin_lock_irq(&sh_dmae_lock);
913 list_del_rcu(&shdev->node);
914 spin_unlock_irq(&sh_dmae_lock);
915
916 pm_runtime_disable(&pdev->dev);
917
918 sh_dmae_chan_remove(shdev);
919 shdma_cleanup(&shdev->shdma_dev);
920
921 synchronize_rcu();
922
923 return 0;
924 }
925
926 static struct platform_driver sh_dmae_driver = {
927 .driver = {
928 .owner = THIS_MODULE,
929 .pm = &sh_dmae_pm,
930 .name = SH_DMAE_DRV_NAME,
931 .of_match_table = sh_dmae_of_match,
932 },
933 .remove = sh_dmae_remove,
934 .shutdown = sh_dmae_shutdown,
935 };
936
937 static int __init sh_dmae_init(void)
938 {
939 /* Wire up NMI handling */
940 int err = register_die_notifier(&sh_dmae_nmi_notifier);
941 if (err)
942 return err;
943
944 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
945 }
946 module_init(sh_dmae_init);
947
948 static void __exit sh_dmae_exit(void)
949 {
950 platform_driver_unregister(&sh_dmae_driver);
951
952 unregister_die_notifier(&sh_dmae_nmi_notifier);
953 }
954 module_exit(sh_dmae_exit);
955
956 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
957 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
958 MODULE_LICENSE("GPL");
959 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
This page took 0.049055 seconds and 6 git commands to generate.