Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux...
[deliverable/linux.git] / drivers / serial / sh-sci.c
1 /*
2 * drivers/serial/sh-sci.c
3 *
4 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
5 *
6 * Copyright (C) 2002 - 2011 Paul Mundt
7 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
8 *
9 * based off of the old drivers/char/sh-sci.c by:
10 *
11 * Copyright (C) 1999, 2000 Niibe Yutaka
12 * Copyright (C) 2000 Sugioka Toshinobu
13 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
14 * Modified to support SecureEdge. David McCullough (2002)
15 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
16 * Removed SH7300 support (Jul 2007).
17 *
18 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file "COPYING" in the main directory of this archive
20 * for more details.
21 */
22 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
23 #define SUPPORT_SYSRQ
24 #endif
25
26 #undef DEBUG
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/timer.h>
31 #include <linux/interrupt.h>
32 #include <linux/tty.h>
33 #include <linux/tty_flip.h>
34 #include <linux/serial.h>
35 #include <linux/major.h>
36 #include <linux/string.h>
37 #include <linux/sysrq.h>
38 #include <linux/ioport.h>
39 #include <linux/mm.h>
40 #include <linux/init.h>
41 #include <linux/delay.h>
42 #include <linux/console.h>
43 #include <linux/platform_device.h>
44 #include <linux/serial_sci.h>
45 #include <linux/notifier.h>
46 #include <linux/cpufreq.h>
47 #include <linux/clk.h>
48 #include <linux/ctype.h>
49 #include <linux/err.h>
50 #include <linux/list.h>
51 #include <linux/dmaengine.h>
52 #include <linux/scatterlist.h>
53 #include <linux/slab.h>
54
55 #ifdef CONFIG_SUPERH
56 #include <asm/sh_bios.h>
57 #endif
58
59 #ifdef CONFIG_H8300
60 #include <asm/gpio.h>
61 #endif
62
63 #include "sh-sci.h"
64
65 struct sci_port {
66 struct uart_port port;
67
68 /* Port type */
69 unsigned int type;
70
71 /* Port IRQs: ERI, RXI, TXI, BRI (optional) */
72 unsigned int irqs[SCIx_NR_IRQS];
73
74 /* Port enable callback */
75 void (*enable)(struct uart_port *port);
76
77 /* Port disable callback */
78 void (*disable)(struct uart_port *port);
79
80 /* Break timer */
81 struct timer_list break_timer;
82 int break_flag;
83
84 /* SCSCR initialization */
85 unsigned int scscr;
86
87 /* SCBRR calculation algo */
88 unsigned int scbrr_algo_id;
89
90 /* Interface clock */
91 struct clk *iclk;
92 /* Function clock */
93 struct clk *fclk;
94
95 struct list_head node;
96
97 struct dma_chan *chan_tx;
98 struct dma_chan *chan_rx;
99
100 #ifdef CONFIG_SERIAL_SH_SCI_DMA
101 struct device *dma_dev;
102 unsigned int slave_tx;
103 unsigned int slave_rx;
104 struct dma_async_tx_descriptor *desc_tx;
105 struct dma_async_tx_descriptor *desc_rx[2];
106 dma_cookie_t cookie_tx;
107 dma_cookie_t cookie_rx[2];
108 dma_cookie_t active_rx;
109 struct scatterlist sg_tx;
110 unsigned int sg_len_tx;
111 struct scatterlist sg_rx[2];
112 size_t buf_len_rx;
113 struct sh_dmae_slave param_tx;
114 struct sh_dmae_slave param_rx;
115 struct work_struct work_tx;
116 struct work_struct work_rx;
117 struct timer_list rx_timer;
118 unsigned int rx_timeout;
119 #endif
120 };
121
122 struct sh_sci_priv {
123 spinlock_t lock;
124 struct list_head ports;
125 struct notifier_block clk_nb;
126 };
127
128 /* Function prototypes */
129 static void sci_stop_tx(struct uart_port *port);
130
131 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
132
133 static struct sci_port sci_ports[SCI_NPORTS];
134 static struct uart_driver sci_uart_driver;
135
136 static inline struct sci_port *
137 to_sci_port(struct uart_port *uart)
138 {
139 return container_of(uart, struct sci_port, port);
140 }
141
142 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
143
144 #ifdef CONFIG_CONSOLE_POLL
145 static inline void handle_error(struct uart_port *port)
146 {
147 /* Clear error flags */
148 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
149 }
150
151 static int sci_poll_get_char(struct uart_port *port)
152 {
153 unsigned short status;
154 int c;
155
156 do {
157 status = sci_in(port, SCxSR);
158 if (status & SCxSR_ERRORS(port)) {
159 handle_error(port);
160 continue;
161 }
162 break;
163 } while (1);
164
165 if (!(status & SCxSR_RDxF(port)))
166 return NO_POLL_CHAR;
167
168 c = sci_in(port, SCxRDR);
169
170 /* Dummy read */
171 sci_in(port, SCxSR);
172 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
173
174 return c;
175 }
176 #endif
177
178 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
179 {
180 unsigned short status;
181
182 do {
183 status = sci_in(port, SCxSR);
184 } while (!(status & SCxSR_TDxE(port)));
185
186 sci_out(port, SCxTDR, c);
187 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
188 }
189 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
190
191 #if defined(__H8300H__) || defined(__H8300S__)
192 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
193 {
194 int ch = (port->mapbase - SMR0) >> 3;
195
196 /* set DDR regs */
197 H8300_GPIO_DDR(h8300_sci_pins[ch].port,
198 h8300_sci_pins[ch].rx,
199 H8300_GPIO_INPUT);
200 H8300_GPIO_DDR(h8300_sci_pins[ch].port,
201 h8300_sci_pins[ch].tx,
202 H8300_GPIO_OUTPUT);
203
204 /* tx mark output*/
205 H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
206 }
207 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
208 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
209 {
210 if (port->mapbase == 0xA4400000) {
211 __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
212 __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
213 } else if (port->mapbase == 0xA4410000)
214 __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
215 }
216 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
217 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
218 {
219 unsigned short data;
220
221 if (cflag & CRTSCTS) {
222 /* enable RTS/CTS */
223 if (port->mapbase == 0xa4430000) { /* SCIF0 */
224 /* Clear PTCR bit 9-2; enable all scif pins but sck */
225 data = __raw_readw(PORT_PTCR);
226 __raw_writew((data & 0xfc03), PORT_PTCR);
227 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
228 /* Clear PVCR bit 9-2 */
229 data = __raw_readw(PORT_PVCR);
230 __raw_writew((data & 0xfc03), PORT_PVCR);
231 }
232 } else {
233 if (port->mapbase == 0xa4430000) { /* SCIF0 */
234 /* Clear PTCR bit 5-2; enable only tx and rx */
235 data = __raw_readw(PORT_PTCR);
236 __raw_writew((data & 0xffc3), PORT_PTCR);
237 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
238 /* Clear PVCR bit 5-2 */
239 data = __raw_readw(PORT_PVCR);
240 __raw_writew((data & 0xffc3), PORT_PVCR);
241 }
242 }
243 }
244 #elif defined(CONFIG_CPU_SH3)
245 /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
246 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
247 {
248 unsigned short data;
249
250 /* We need to set SCPCR to enable RTS/CTS */
251 data = __raw_readw(SCPCR);
252 /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
253 __raw_writew(data & 0x0fcf, SCPCR);
254
255 if (!(cflag & CRTSCTS)) {
256 /* We need to set SCPCR to enable RTS/CTS */
257 data = __raw_readw(SCPCR);
258 /* Clear out SCP7MD1,0, SCP4MD1,0,
259 Set SCP6MD1,0 = {01} (output) */
260 __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
261
262 data = __raw_readb(SCPDR);
263 /* Set /RTS2 (bit6) = 0 */
264 __raw_writeb(data & 0xbf, SCPDR);
265 }
266 }
267 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
268 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
269 {
270 unsigned short data;
271
272 if (port->mapbase == 0xffe00000) {
273 data = __raw_readw(PSCR);
274 data &= ~0x03cf;
275 if (!(cflag & CRTSCTS))
276 data |= 0x0340;
277
278 __raw_writew(data, PSCR);
279 }
280 }
281 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
282 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
283 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
284 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
285 defined(CONFIG_CPU_SUBTYPE_SH7786) || \
286 defined(CONFIG_CPU_SUBTYPE_SHX3)
287 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
288 {
289 if (!(cflag & CRTSCTS))
290 __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
291 }
292 #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
293 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
294 {
295 if (!(cflag & CRTSCTS))
296 __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
297 }
298 #else
299 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
300 {
301 /* Nothing to do */
302 }
303 #endif
304
305 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
306 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
307 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
308 defined(CONFIG_CPU_SUBTYPE_SH7786)
309 static int scif_txfill(struct uart_port *port)
310 {
311 return sci_in(port, SCTFDR) & 0xff;
312 }
313
314 static int scif_txroom(struct uart_port *port)
315 {
316 return SCIF_TXROOM_MAX - scif_txfill(port);
317 }
318
319 static int scif_rxfill(struct uart_port *port)
320 {
321 return sci_in(port, SCRFDR) & 0xff;
322 }
323 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
324 static int scif_txfill(struct uart_port *port)
325 {
326 if (port->mapbase == 0xffe00000 ||
327 port->mapbase == 0xffe08000)
328 /* SCIF0/1*/
329 return sci_in(port, SCTFDR) & 0xff;
330 else
331 /* SCIF2 */
332 return sci_in(port, SCFDR) >> 8;
333 }
334
335 static int scif_txroom(struct uart_port *port)
336 {
337 if (port->mapbase == 0xffe00000 ||
338 port->mapbase == 0xffe08000)
339 /* SCIF0/1*/
340 return SCIF_TXROOM_MAX - scif_txfill(port);
341 else
342 /* SCIF2 */
343 return SCIF2_TXROOM_MAX - scif_txfill(port);
344 }
345
346 static int scif_rxfill(struct uart_port *port)
347 {
348 if ((port->mapbase == 0xffe00000) ||
349 (port->mapbase == 0xffe08000)) {
350 /* SCIF0/1*/
351 return sci_in(port, SCRFDR) & 0xff;
352 } else {
353 /* SCIF2 */
354 return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
355 }
356 }
357 #elif defined(CONFIG_ARCH_SH7372)
358 static int scif_txfill(struct uart_port *port)
359 {
360 if (port->type == PORT_SCIFA)
361 return sci_in(port, SCFDR) >> 8;
362 else
363 return sci_in(port, SCTFDR);
364 }
365
366 static int scif_txroom(struct uart_port *port)
367 {
368 return port->fifosize - scif_txfill(port);
369 }
370
371 static int scif_rxfill(struct uart_port *port)
372 {
373 if (port->type == PORT_SCIFA)
374 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
375 else
376 return sci_in(port, SCRFDR);
377 }
378 #else
379 static int scif_txfill(struct uart_port *port)
380 {
381 return sci_in(port, SCFDR) >> 8;
382 }
383
384 static int scif_txroom(struct uart_port *port)
385 {
386 return SCIF_TXROOM_MAX - scif_txfill(port);
387 }
388
389 static int scif_rxfill(struct uart_port *port)
390 {
391 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
392 }
393 #endif
394
395 static int sci_txfill(struct uart_port *port)
396 {
397 return !(sci_in(port, SCxSR) & SCI_TDRE);
398 }
399
400 static int sci_txroom(struct uart_port *port)
401 {
402 return !sci_txfill(port);
403 }
404
405 static int sci_rxfill(struct uart_port *port)
406 {
407 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
408 }
409
410 /* ********************************************************************** *
411 * the interrupt related routines *
412 * ********************************************************************** */
413
414 static void sci_transmit_chars(struct uart_port *port)
415 {
416 struct circ_buf *xmit = &port->state->xmit;
417 unsigned int stopped = uart_tx_stopped(port);
418 unsigned short status;
419 unsigned short ctrl;
420 int count;
421
422 status = sci_in(port, SCxSR);
423 if (!(status & SCxSR_TDxE(port))) {
424 ctrl = sci_in(port, SCSCR);
425 if (uart_circ_empty(xmit))
426 ctrl &= ~SCSCR_TIE;
427 else
428 ctrl |= SCSCR_TIE;
429 sci_out(port, SCSCR, ctrl);
430 return;
431 }
432
433 if (port->type == PORT_SCI)
434 count = sci_txroom(port);
435 else
436 count = scif_txroom(port);
437
438 do {
439 unsigned char c;
440
441 if (port->x_char) {
442 c = port->x_char;
443 port->x_char = 0;
444 } else if (!uart_circ_empty(xmit) && !stopped) {
445 c = xmit->buf[xmit->tail];
446 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
447 } else {
448 break;
449 }
450
451 sci_out(port, SCxTDR, c);
452
453 port->icount.tx++;
454 } while (--count > 0);
455
456 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
457
458 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
459 uart_write_wakeup(port);
460 if (uart_circ_empty(xmit)) {
461 sci_stop_tx(port);
462 } else {
463 ctrl = sci_in(port, SCSCR);
464
465 if (port->type != PORT_SCI) {
466 sci_in(port, SCxSR); /* Dummy read */
467 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
468 }
469
470 ctrl |= SCSCR_TIE;
471 sci_out(port, SCSCR, ctrl);
472 }
473 }
474
475 /* On SH3, SCIF may read end-of-break as a space->mark char */
476 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
477
478 static inline void sci_receive_chars(struct uart_port *port)
479 {
480 struct sci_port *sci_port = to_sci_port(port);
481 struct tty_struct *tty = port->state->port.tty;
482 int i, count, copied = 0;
483 unsigned short status;
484 unsigned char flag;
485
486 status = sci_in(port, SCxSR);
487 if (!(status & SCxSR_RDxF(port)))
488 return;
489
490 while (1) {
491 if (port->type == PORT_SCI)
492 count = sci_rxfill(port);
493 else
494 count = scif_rxfill(port);
495
496 /* Don't copy more bytes than there is room for in the buffer */
497 count = tty_buffer_request_room(tty, count);
498
499 /* If for any reason we can't copy more data, we're done! */
500 if (count == 0)
501 break;
502
503 if (port->type == PORT_SCI) {
504 char c = sci_in(port, SCxRDR);
505 if (uart_handle_sysrq_char(port, c) ||
506 sci_port->break_flag)
507 count = 0;
508 else
509 tty_insert_flip_char(tty, c, TTY_NORMAL);
510 } else {
511 for (i = 0; i < count; i++) {
512 char c = sci_in(port, SCxRDR);
513 status = sci_in(port, SCxSR);
514 #if defined(CONFIG_CPU_SH3)
515 /* Skip "chars" during break */
516 if (sci_port->break_flag) {
517 if ((c == 0) &&
518 (status & SCxSR_FER(port))) {
519 count--; i--;
520 continue;
521 }
522
523 /* Nonzero => end-of-break */
524 dev_dbg(port->dev, "debounce<%02x>\n", c);
525 sci_port->break_flag = 0;
526
527 if (STEPFN(c)) {
528 count--; i--;
529 continue;
530 }
531 }
532 #endif /* CONFIG_CPU_SH3 */
533 if (uart_handle_sysrq_char(port, c)) {
534 count--; i--;
535 continue;
536 }
537
538 /* Store data and status */
539 if (status & SCxSR_FER(port)) {
540 flag = TTY_FRAME;
541 dev_notice(port->dev, "frame error\n");
542 } else if (status & SCxSR_PER(port)) {
543 flag = TTY_PARITY;
544 dev_notice(port->dev, "parity error\n");
545 } else
546 flag = TTY_NORMAL;
547
548 tty_insert_flip_char(tty, c, flag);
549 }
550 }
551
552 sci_in(port, SCxSR); /* dummy read */
553 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
554
555 copied += count;
556 port->icount.rx += count;
557 }
558
559 if (copied) {
560 /* Tell the rest of the system the news. New characters! */
561 tty_flip_buffer_push(tty);
562 } else {
563 sci_in(port, SCxSR); /* dummy read */
564 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
565 }
566 }
567
568 #define SCI_BREAK_JIFFIES (HZ/20)
569 /* The sci generates interrupts during the break,
570 * 1 per millisecond or so during the break period, for 9600 baud.
571 * So dont bother disabling interrupts.
572 * But dont want more than 1 break event.
573 * Use a kernel timer to periodically poll the rx line until
574 * the break is finished.
575 */
576 static void sci_schedule_break_timer(struct sci_port *port)
577 {
578 port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES;
579 add_timer(&port->break_timer);
580 }
581 /* Ensure that two consecutive samples find the break over. */
582 static void sci_break_timer(unsigned long data)
583 {
584 struct sci_port *port = (struct sci_port *)data;
585
586 if (sci_rxd_in(&port->port) == 0) {
587 port->break_flag = 1;
588 sci_schedule_break_timer(port);
589 } else if (port->break_flag == 1) {
590 /* break is over. */
591 port->break_flag = 2;
592 sci_schedule_break_timer(port);
593 } else
594 port->break_flag = 0;
595 }
596
597 static inline int sci_handle_errors(struct uart_port *port)
598 {
599 int copied = 0;
600 unsigned short status = sci_in(port, SCxSR);
601 struct tty_struct *tty = port->state->port.tty;
602
603 if (status & SCxSR_ORER(port)) {
604 /* overrun error */
605 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
606 copied++;
607
608 dev_notice(port->dev, "overrun error");
609 }
610
611 if (status & SCxSR_FER(port)) {
612 if (sci_rxd_in(port) == 0) {
613 /* Notify of BREAK */
614 struct sci_port *sci_port = to_sci_port(port);
615
616 if (!sci_port->break_flag) {
617 sci_port->break_flag = 1;
618 sci_schedule_break_timer(sci_port);
619
620 /* Do sysrq handling. */
621 if (uart_handle_break(port))
622 return 0;
623
624 dev_dbg(port->dev, "BREAK detected\n");
625
626 if (tty_insert_flip_char(tty, 0, TTY_BREAK))
627 copied++;
628 }
629
630 } else {
631 /* frame error */
632 if (tty_insert_flip_char(tty, 0, TTY_FRAME))
633 copied++;
634
635 dev_notice(port->dev, "frame error\n");
636 }
637 }
638
639 if (status & SCxSR_PER(port)) {
640 /* parity error */
641 if (tty_insert_flip_char(tty, 0, TTY_PARITY))
642 copied++;
643
644 dev_notice(port->dev, "parity error");
645 }
646
647 if (copied)
648 tty_flip_buffer_push(tty);
649
650 return copied;
651 }
652
653 static inline int sci_handle_fifo_overrun(struct uart_port *port)
654 {
655 struct tty_struct *tty = port->state->port.tty;
656 int copied = 0;
657
658 if (port->type != PORT_SCIF)
659 return 0;
660
661 if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
662 sci_out(port, SCLSR, 0);
663
664 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
665 tty_flip_buffer_push(tty);
666
667 dev_notice(port->dev, "overrun error\n");
668 copied++;
669 }
670
671 return copied;
672 }
673
674 static inline int sci_handle_breaks(struct uart_port *port)
675 {
676 int copied = 0;
677 unsigned short status = sci_in(port, SCxSR);
678 struct tty_struct *tty = port->state->port.tty;
679 struct sci_port *s = to_sci_port(port);
680
681 if (uart_handle_break(port))
682 return 0;
683
684 if (!s->break_flag && status & SCxSR_BRK(port)) {
685 #if defined(CONFIG_CPU_SH3)
686 /* Debounce break */
687 s->break_flag = 1;
688 #endif
689 /* Notify of BREAK */
690 if (tty_insert_flip_char(tty, 0, TTY_BREAK))
691 copied++;
692
693 dev_dbg(port->dev, "BREAK detected\n");
694 }
695
696 if (copied)
697 tty_flip_buffer_push(tty);
698
699 copied += sci_handle_fifo_overrun(port);
700
701 return copied;
702 }
703
704 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
705 {
706 #ifdef CONFIG_SERIAL_SH_SCI_DMA
707 struct uart_port *port = ptr;
708 struct sci_port *s = to_sci_port(port);
709
710 if (s->chan_rx) {
711 u16 scr = sci_in(port, SCSCR);
712 u16 ssr = sci_in(port, SCxSR);
713
714 /* Disable future Rx interrupts */
715 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
716 disable_irq_nosync(irq);
717 scr |= 0x4000;
718 } else {
719 scr &= ~SCSCR_RIE;
720 }
721 sci_out(port, SCSCR, scr);
722 /* Clear current interrupt */
723 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
724 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
725 jiffies, s->rx_timeout);
726 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
727
728 return IRQ_HANDLED;
729 }
730 #endif
731
732 /* I think sci_receive_chars has to be called irrespective
733 * of whether the I_IXOFF is set, otherwise, how is the interrupt
734 * to be disabled?
735 */
736 sci_receive_chars(ptr);
737
738 return IRQ_HANDLED;
739 }
740
741 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
742 {
743 struct uart_port *port = ptr;
744 unsigned long flags;
745
746 spin_lock_irqsave(&port->lock, flags);
747 sci_transmit_chars(port);
748 spin_unlock_irqrestore(&port->lock, flags);
749
750 return IRQ_HANDLED;
751 }
752
753 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
754 {
755 struct uart_port *port = ptr;
756
757 /* Handle errors */
758 if (port->type == PORT_SCI) {
759 if (sci_handle_errors(port)) {
760 /* discard character in rx buffer */
761 sci_in(port, SCxSR);
762 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
763 }
764 } else {
765 sci_handle_fifo_overrun(port);
766 sci_rx_interrupt(irq, ptr);
767 }
768
769 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
770
771 /* Kick the transmission */
772 sci_tx_interrupt(irq, ptr);
773
774 return IRQ_HANDLED;
775 }
776
777 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
778 {
779 struct uart_port *port = ptr;
780
781 /* Handle BREAKs */
782 sci_handle_breaks(port);
783 sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
784
785 return IRQ_HANDLED;
786 }
787
788 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
789 {
790 /*
791 * Not all ports (such as SCIFA) will support REIE. Rather than
792 * special-casing the port type, we check the port initialization
793 * IRQ enable mask to see whether the IRQ is desired at all. If
794 * it's unset, it's logically inferred that there's no point in
795 * testing for it.
796 */
797 return SCSCR_RIE | (to_sci_port(port)->scscr & SCSR_REIE);
798 }
799
800 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
801 {
802 unsigned short ssr_status, scr_status, err_enabled;
803 struct uart_port *port = ptr;
804 struct sci_port *s = to_sci_port(port);
805 irqreturn_t ret = IRQ_NONE;
806
807 ssr_status = sci_in(port, SCxSR);
808 scr_status = sci_in(port, SCSCR);
809 err_enabled = scr_status & port_rx_irq_mask(port);
810
811 /* Tx Interrupt */
812 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
813 !s->chan_tx)
814 ret = sci_tx_interrupt(irq, ptr);
815
816 /*
817 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
818 * DR flags
819 */
820 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
821 (scr_status & SCSCR_RIE))
822 ret = sci_rx_interrupt(irq, ptr);
823
824 /* Error Interrupt */
825 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
826 ret = sci_er_interrupt(irq, ptr);
827
828 /* Break Interrupt */
829 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
830 ret = sci_br_interrupt(irq, ptr);
831
832 return ret;
833 }
834
835 /*
836 * Here we define a transistion notifier so that we can update all of our
837 * ports' baud rate when the peripheral clock changes.
838 */
839 static int sci_notifier(struct notifier_block *self,
840 unsigned long phase, void *p)
841 {
842 struct sh_sci_priv *priv = container_of(self,
843 struct sh_sci_priv, clk_nb);
844 struct sci_port *sci_port;
845 unsigned long flags;
846
847 if ((phase == CPUFREQ_POSTCHANGE) ||
848 (phase == CPUFREQ_RESUMECHANGE)) {
849 spin_lock_irqsave(&priv->lock, flags);
850 list_for_each_entry(sci_port, &priv->ports, node)
851 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
852 spin_unlock_irqrestore(&priv->lock, flags);
853 }
854
855 return NOTIFY_OK;
856 }
857
858 static void sci_clk_enable(struct uart_port *port)
859 {
860 struct sci_port *sci_port = to_sci_port(port);
861
862 clk_enable(sci_port->iclk);
863 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
864 clk_enable(sci_port->fclk);
865 }
866
867 static void sci_clk_disable(struct uart_port *port)
868 {
869 struct sci_port *sci_port = to_sci_port(port);
870
871 clk_disable(sci_port->fclk);
872 clk_disable(sci_port->iclk);
873 }
874
875 static int sci_request_irq(struct sci_port *port)
876 {
877 int i;
878 irqreturn_t (*handlers[4])(int irq, void *ptr) = {
879 sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
880 sci_br_interrupt,
881 };
882 const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
883 "SCI Transmit Data Empty", "SCI Break" };
884
885 if (port->irqs[0] == port->irqs[1]) {
886 if (unlikely(!port->irqs[0]))
887 return -ENODEV;
888
889 if (request_irq(port->irqs[0], sci_mpxed_interrupt,
890 IRQF_DISABLED, "sci", port)) {
891 dev_err(port->port.dev, "Can't allocate IRQ\n");
892 return -ENODEV;
893 }
894 } else {
895 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
896 if (unlikely(!port->irqs[i]))
897 continue;
898
899 if (request_irq(port->irqs[i], handlers[i],
900 IRQF_DISABLED, desc[i], port)) {
901 dev_err(port->port.dev, "Can't allocate IRQ\n");
902 return -ENODEV;
903 }
904 }
905 }
906
907 return 0;
908 }
909
910 static void sci_free_irq(struct sci_port *port)
911 {
912 int i;
913
914 if (port->irqs[0] == port->irqs[1])
915 free_irq(port->irqs[0], port);
916 else {
917 for (i = 0; i < ARRAY_SIZE(port->irqs); i++) {
918 if (!port->irqs[i])
919 continue;
920
921 free_irq(port->irqs[i], port);
922 }
923 }
924 }
925
926 static unsigned int sci_tx_empty(struct uart_port *port)
927 {
928 unsigned short status = sci_in(port, SCxSR);
929 unsigned short in_tx_fifo = scif_txfill(port);
930
931 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
932 }
933
934 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
935 {
936 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
937 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
938 /* If you have signals for DTR and DCD, please implement here. */
939 }
940
941 static unsigned int sci_get_mctrl(struct uart_port *port)
942 {
943 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
944 and CTS/RTS */
945
946 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
947 }
948
949 #ifdef CONFIG_SERIAL_SH_SCI_DMA
950 static void sci_dma_tx_complete(void *arg)
951 {
952 struct sci_port *s = arg;
953 struct uart_port *port = &s->port;
954 struct circ_buf *xmit = &port->state->xmit;
955 unsigned long flags;
956
957 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
958
959 spin_lock_irqsave(&port->lock, flags);
960
961 xmit->tail += sg_dma_len(&s->sg_tx);
962 xmit->tail &= UART_XMIT_SIZE - 1;
963
964 port->icount.tx += sg_dma_len(&s->sg_tx);
965
966 async_tx_ack(s->desc_tx);
967 s->cookie_tx = -EINVAL;
968 s->desc_tx = NULL;
969
970 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
971 uart_write_wakeup(port);
972
973 if (!uart_circ_empty(xmit)) {
974 schedule_work(&s->work_tx);
975 } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
976 u16 ctrl = sci_in(port, SCSCR);
977 sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
978 }
979
980 spin_unlock_irqrestore(&port->lock, flags);
981 }
982
983 /* Locking: called with port lock held */
984 static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
985 size_t count)
986 {
987 struct uart_port *port = &s->port;
988 int i, active, room;
989
990 room = tty_buffer_request_room(tty, count);
991
992 if (s->active_rx == s->cookie_rx[0]) {
993 active = 0;
994 } else if (s->active_rx == s->cookie_rx[1]) {
995 active = 1;
996 } else {
997 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
998 return 0;
999 }
1000
1001 if (room < count)
1002 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
1003 count - room);
1004 if (!room)
1005 return room;
1006
1007 for (i = 0; i < room; i++)
1008 tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1009 TTY_NORMAL);
1010
1011 port->icount.rx += room;
1012
1013 return room;
1014 }
1015
1016 static void sci_dma_rx_complete(void *arg)
1017 {
1018 struct sci_port *s = arg;
1019 struct uart_port *port = &s->port;
1020 struct tty_struct *tty = port->state->port.tty;
1021 unsigned long flags;
1022 int count;
1023
1024 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1025
1026 spin_lock_irqsave(&port->lock, flags);
1027
1028 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
1029
1030 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1031
1032 spin_unlock_irqrestore(&port->lock, flags);
1033
1034 if (count)
1035 tty_flip_buffer_push(tty);
1036
1037 schedule_work(&s->work_rx);
1038 }
1039
1040 static void sci_start_rx(struct uart_port *port);
1041 static void sci_start_tx(struct uart_port *port);
1042
1043 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1044 {
1045 struct dma_chan *chan = s->chan_rx;
1046 struct uart_port *port = &s->port;
1047
1048 s->chan_rx = NULL;
1049 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1050 dma_release_channel(chan);
1051 if (sg_dma_address(&s->sg_rx[0]))
1052 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1053 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1054 if (enable_pio)
1055 sci_start_rx(port);
1056 }
1057
1058 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1059 {
1060 struct dma_chan *chan = s->chan_tx;
1061 struct uart_port *port = &s->port;
1062
1063 s->chan_tx = NULL;
1064 s->cookie_tx = -EINVAL;
1065 dma_release_channel(chan);
1066 if (enable_pio)
1067 sci_start_tx(port);
1068 }
1069
1070 static void sci_submit_rx(struct sci_port *s)
1071 {
1072 struct dma_chan *chan = s->chan_rx;
1073 int i;
1074
1075 for (i = 0; i < 2; i++) {
1076 struct scatterlist *sg = &s->sg_rx[i];
1077 struct dma_async_tx_descriptor *desc;
1078
1079 desc = chan->device->device_prep_slave_sg(chan,
1080 sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
1081
1082 if (desc) {
1083 s->desc_rx[i] = desc;
1084 desc->callback = sci_dma_rx_complete;
1085 desc->callback_param = s;
1086 s->cookie_rx[i] = desc->tx_submit(desc);
1087 }
1088
1089 if (!desc || s->cookie_rx[i] < 0) {
1090 if (i) {
1091 async_tx_ack(s->desc_rx[0]);
1092 s->cookie_rx[0] = -EINVAL;
1093 }
1094 if (desc) {
1095 async_tx_ack(desc);
1096 s->cookie_rx[i] = -EINVAL;
1097 }
1098 dev_warn(s->port.dev,
1099 "failed to re-start DMA, using PIO\n");
1100 sci_rx_dma_release(s, true);
1101 return;
1102 }
1103 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1104 s->cookie_rx[i], i);
1105 }
1106
1107 s->active_rx = s->cookie_rx[0];
1108
1109 dma_async_issue_pending(chan);
1110 }
1111
1112 static void work_fn_rx(struct work_struct *work)
1113 {
1114 struct sci_port *s = container_of(work, struct sci_port, work_rx);
1115 struct uart_port *port = &s->port;
1116 struct dma_async_tx_descriptor *desc;
1117 int new;
1118
1119 if (s->active_rx == s->cookie_rx[0]) {
1120 new = 0;
1121 } else if (s->active_rx == s->cookie_rx[1]) {
1122 new = 1;
1123 } else {
1124 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1125 return;
1126 }
1127 desc = s->desc_rx[new];
1128
1129 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1130 DMA_SUCCESS) {
1131 /* Handle incomplete DMA receive */
1132 struct tty_struct *tty = port->state->port.tty;
1133 struct dma_chan *chan = s->chan_rx;
1134 struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
1135 async_tx);
1136 unsigned long flags;
1137 int count;
1138
1139 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1140 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1141 sh_desc->partial, sh_desc->cookie);
1142
1143 spin_lock_irqsave(&port->lock, flags);
1144 count = sci_dma_rx_push(s, tty, sh_desc->partial);
1145 spin_unlock_irqrestore(&port->lock, flags);
1146
1147 if (count)
1148 tty_flip_buffer_push(tty);
1149
1150 sci_submit_rx(s);
1151
1152 return;
1153 }
1154
1155 s->cookie_rx[new] = desc->tx_submit(desc);
1156 if (s->cookie_rx[new] < 0) {
1157 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1158 sci_rx_dma_release(s, true);
1159 return;
1160 }
1161
1162 s->active_rx = s->cookie_rx[!new];
1163
1164 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1165 s->cookie_rx[new], new, s->active_rx);
1166 }
1167
1168 static void work_fn_tx(struct work_struct *work)
1169 {
1170 struct sci_port *s = container_of(work, struct sci_port, work_tx);
1171 struct dma_async_tx_descriptor *desc;
1172 struct dma_chan *chan = s->chan_tx;
1173 struct uart_port *port = &s->port;
1174 struct circ_buf *xmit = &port->state->xmit;
1175 struct scatterlist *sg = &s->sg_tx;
1176
1177 /*
1178 * DMA is idle now.
1179 * Port xmit buffer is already mapped, and it is one page... Just adjust
1180 * offsets and lengths. Since it is a circular buffer, we have to
1181 * transmit till the end, and then the rest. Take the port lock to get a
1182 * consistent xmit buffer state.
1183 */
1184 spin_lock_irq(&port->lock);
1185 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1186 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1187 sg->offset;
1188 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1189 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1190 spin_unlock_irq(&port->lock);
1191
1192 BUG_ON(!sg_dma_len(sg));
1193
1194 desc = chan->device->device_prep_slave_sg(chan,
1195 sg, s->sg_len_tx, DMA_TO_DEVICE,
1196 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1197 if (!desc) {
1198 /* switch to PIO */
1199 sci_tx_dma_release(s, true);
1200 return;
1201 }
1202
1203 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1204
1205 spin_lock_irq(&port->lock);
1206 s->desc_tx = desc;
1207 desc->callback = sci_dma_tx_complete;
1208 desc->callback_param = s;
1209 spin_unlock_irq(&port->lock);
1210 s->cookie_tx = desc->tx_submit(desc);
1211 if (s->cookie_tx < 0) {
1212 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1213 /* switch to PIO */
1214 sci_tx_dma_release(s, true);
1215 return;
1216 }
1217
1218 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1219 xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1220
1221 dma_async_issue_pending(chan);
1222 }
1223 #endif
1224
1225 static void sci_start_tx(struct uart_port *port)
1226 {
1227 struct sci_port *s = to_sci_port(port);
1228 unsigned short ctrl;
1229
1230 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1231 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1232 u16 new, scr = sci_in(port, SCSCR);
1233 if (s->chan_tx)
1234 new = scr | 0x8000;
1235 else
1236 new = scr & ~0x8000;
1237 if (new != scr)
1238 sci_out(port, SCSCR, new);
1239 }
1240
1241 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1242 s->cookie_tx < 0)
1243 schedule_work(&s->work_tx);
1244 #endif
1245
1246 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1247 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1248 ctrl = sci_in(port, SCSCR);
1249 sci_out(port, SCSCR, ctrl | SCSCR_TIE);
1250 }
1251 }
1252
1253 static void sci_stop_tx(struct uart_port *port)
1254 {
1255 unsigned short ctrl;
1256
1257 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1258 ctrl = sci_in(port, SCSCR);
1259
1260 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1261 ctrl &= ~0x8000;
1262
1263 ctrl &= ~SCSCR_TIE;
1264
1265 sci_out(port, SCSCR, ctrl);
1266 }
1267
1268 static void sci_start_rx(struct uart_port *port)
1269 {
1270 unsigned short ctrl;
1271
1272 ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
1273
1274 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1275 ctrl &= ~0x4000;
1276
1277 sci_out(port, SCSCR, ctrl);
1278 }
1279
1280 static void sci_stop_rx(struct uart_port *port)
1281 {
1282 unsigned short ctrl;
1283
1284 ctrl = sci_in(port, SCSCR);
1285
1286 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1287 ctrl &= ~0x4000;
1288
1289 ctrl &= ~port_rx_irq_mask(port);
1290
1291 sci_out(port, SCSCR, ctrl);
1292 }
1293
1294 static void sci_enable_ms(struct uart_port *port)
1295 {
1296 /* Nothing here yet .. */
1297 }
1298
1299 static void sci_break_ctl(struct uart_port *port, int break_state)
1300 {
1301 /* Nothing here yet .. */
1302 }
1303
1304 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1305 static bool filter(struct dma_chan *chan, void *slave)
1306 {
1307 struct sh_dmae_slave *param = slave;
1308
1309 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1310 param->slave_id);
1311
1312 if (param->dma_dev == chan->device->dev) {
1313 chan->private = param;
1314 return true;
1315 } else {
1316 return false;
1317 }
1318 }
1319
1320 static void rx_timer_fn(unsigned long arg)
1321 {
1322 struct sci_port *s = (struct sci_port *)arg;
1323 struct uart_port *port = &s->port;
1324 u16 scr = sci_in(port, SCSCR);
1325
1326 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1327 scr &= ~0x4000;
1328 enable_irq(s->irqs[1]);
1329 }
1330 sci_out(port, SCSCR, scr | SCSCR_RIE);
1331 dev_dbg(port->dev, "DMA Rx timed out\n");
1332 schedule_work(&s->work_rx);
1333 }
1334
1335 static void sci_request_dma(struct uart_port *port)
1336 {
1337 struct sci_port *s = to_sci_port(port);
1338 struct sh_dmae_slave *param;
1339 struct dma_chan *chan;
1340 dma_cap_mask_t mask;
1341 int nent;
1342
1343 dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
1344 port->line, s->dma_dev);
1345
1346 if (!s->dma_dev)
1347 return;
1348
1349 dma_cap_zero(mask);
1350 dma_cap_set(DMA_SLAVE, mask);
1351
1352 param = &s->param_tx;
1353
1354 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1355 param->slave_id = s->slave_tx;
1356 param->dma_dev = s->dma_dev;
1357
1358 s->cookie_tx = -EINVAL;
1359 chan = dma_request_channel(mask, filter, param);
1360 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1361 if (chan) {
1362 s->chan_tx = chan;
1363 sg_init_table(&s->sg_tx, 1);
1364 /* UART circular tx buffer is an aligned page. */
1365 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1366 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1367 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1368 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1369 if (!nent)
1370 sci_tx_dma_release(s, false);
1371 else
1372 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1373 sg_dma_len(&s->sg_tx),
1374 port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1375
1376 s->sg_len_tx = nent;
1377
1378 INIT_WORK(&s->work_tx, work_fn_tx);
1379 }
1380
1381 param = &s->param_rx;
1382
1383 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1384 param->slave_id = s->slave_rx;
1385 param->dma_dev = s->dma_dev;
1386
1387 chan = dma_request_channel(mask, filter, param);
1388 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1389 if (chan) {
1390 dma_addr_t dma[2];
1391 void *buf[2];
1392 int i;
1393
1394 s->chan_rx = chan;
1395
1396 s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1397 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1398 &dma[0], GFP_KERNEL);
1399
1400 if (!buf[0]) {
1401 dev_warn(port->dev,
1402 "failed to allocate dma buffer, using PIO\n");
1403 sci_rx_dma_release(s, true);
1404 return;
1405 }
1406
1407 buf[1] = buf[0] + s->buf_len_rx;
1408 dma[1] = dma[0] + s->buf_len_rx;
1409
1410 for (i = 0; i < 2; i++) {
1411 struct scatterlist *sg = &s->sg_rx[i];
1412
1413 sg_init_table(sg, 1);
1414 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1415 (int)buf[i] & ~PAGE_MASK);
1416 sg_dma_address(sg) = dma[i];
1417 }
1418
1419 INIT_WORK(&s->work_rx, work_fn_rx);
1420 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1421
1422 sci_submit_rx(s);
1423 }
1424 }
1425
1426 static void sci_free_dma(struct uart_port *port)
1427 {
1428 struct sci_port *s = to_sci_port(port);
1429
1430 if (!s->dma_dev)
1431 return;
1432
1433 if (s->chan_tx)
1434 sci_tx_dma_release(s, false);
1435 if (s->chan_rx)
1436 sci_rx_dma_release(s, false);
1437 }
1438 #endif
1439
1440 static int sci_startup(struct uart_port *port)
1441 {
1442 struct sci_port *s = to_sci_port(port);
1443
1444 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1445
1446 if (s->enable)
1447 s->enable(port);
1448
1449 sci_request_irq(s);
1450 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1451 sci_request_dma(port);
1452 #endif
1453 sci_start_tx(port);
1454 sci_start_rx(port);
1455
1456 return 0;
1457 }
1458
1459 static void sci_shutdown(struct uart_port *port)
1460 {
1461 struct sci_port *s = to_sci_port(port);
1462
1463 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1464
1465 sci_stop_rx(port);
1466 sci_stop_tx(port);
1467 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1468 sci_free_dma(port);
1469 #endif
1470 sci_free_irq(s);
1471
1472 if (s->disable)
1473 s->disable(port);
1474 }
1475
1476 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1477 unsigned long freq)
1478 {
1479 switch (algo_id) {
1480 case SCBRR_ALGO_1:
1481 return ((freq + 16 * bps) / (16 * bps) - 1);
1482 case SCBRR_ALGO_2:
1483 return ((freq + 16 * bps) / (32 * bps) - 1);
1484 case SCBRR_ALGO_3:
1485 return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
1486 case SCBRR_ALGO_4:
1487 return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
1488 case SCBRR_ALGO_5:
1489 return (((freq * 1000 / 32) / bps) - 1);
1490 }
1491
1492 /* Warn, but use a safe default */
1493 WARN_ON(1);
1494 return ((freq + 16 * bps) / (32 * bps) - 1);
1495 }
1496
1497 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1498 struct ktermios *old)
1499 {
1500 struct sci_port *s = to_sci_port(port);
1501 unsigned int status, baud, smr_val, max_baud;
1502 int t = -1;
1503 u16 scfcr = 0;
1504
1505 /*
1506 * earlyprintk comes here early on with port->uartclk set to zero.
1507 * the clock framework is not up and running at this point so here
1508 * we assume that 115200 is the maximum baud rate. please note that
1509 * the baud rate is not programmed during earlyprintk - it is assumed
1510 * that the previous boot loader has enabled required clocks and
1511 * setup the baud rate generator hardware for us already.
1512 */
1513 max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1514
1515 baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1516 if (likely(baud && port->uartclk))
1517 t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
1518
1519 do {
1520 status = sci_in(port, SCxSR);
1521 } while (!(status & SCxSR_TEND(port)));
1522
1523 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1524
1525 if (port->type != PORT_SCI)
1526 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1527
1528 smr_val = sci_in(port, SCSMR) & 3;
1529 if ((termios->c_cflag & CSIZE) == CS7)
1530 smr_val |= 0x40;
1531 if (termios->c_cflag & PARENB)
1532 smr_val |= 0x20;
1533 if (termios->c_cflag & PARODD)
1534 smr_val |= 0x30;
1535 if (termios->c_cflag & CSTOPB)
1536 smr_val |= 0x08;
1537
1538 uart_update_timeout(port, termios->c_cflag, baud);
1539
1540 sci_out(port, SCSMR, smr_val);
1541
1542 dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
1543 SCSCR_INIT(port));
1544
1545 if (t > 0) {
1546 if (t >= 256) {
1547 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
1548 t >>= 2;
1549 } else
1550 sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
1551
1552 sci_out(port, SCBRR, t);
1553 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1554 }
1555
1556 sci_init_pins(port, termios->c_cflag);
1557 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1558
1559 sci_out(port, SCSCR, s->scscr);
1560
1561 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1562 /*
1563 * Calculate delay for 1.5 DMA buffers: see
1564 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1565 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1566 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1567 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1568 * sizes), but it has been found out experimentally, that this is not
1569 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1570 * as a minimum seem to work perfectly.
1571 */
1572 if (s->chan_rx) {
1573 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1574 port->fifosize / 2;
1575 dev_dbg(port->dev,
1576 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1577 s->rx_timeout * 1000 / HZ, port->timeout);
1578 if (s->rx_timeout < msecs_to_jiffies(20))
1579 s->rx_timeout = msecs_to_jiffies(20);
1580 }
1581 #endif
1582
1583 if ((termios->c_cflag & CREAD) != 0)
1584 sci_start_rx(port);
1585 }
1586
1587 static const char *sci_type(struct uart_port *port)
1588 {
1589 switch (port->type) {
1590 case PORT_IRDA:
1591 return "irda";
1592 case PORT_SCI:
1593 return "sci";
1594 case PORT_SCIF:
1595 return "scif";
1596 case PORT_SCIFA:
1597 return "scifa";
1598 case PORT_SCIFB:
1599 return "scifb";
1600 }
1601
1602 return NULL;
1603 }
1604
1605 static void sci_release_port(struct uart_port *port)
1606 {
1607 /* Nothing here yet .. */
1608 }
1609
1610 static int sci_request_port(struct uart_port *port)
1611 {
1612 /* Nothing here yet .. */
1613 return 0;
1614 }
1615
1616 static void sci_config_port(struct uart_port *port, int flags)
1617 {
1618 struct sci_port *s = to_sci_port(port);
1619
1620 port->type = s->type;
1621
1622 if (port->membase)
1623 return;
1624
1625 if (port->flags & UPF_IOREMAP) {
1626 port->membase = ioremap_nocache(port->mapbase, 0x40);
1627
1628 if (IS_ERR(port->membase))
1629 dev_err(port->dev, "can't remap port#%d\n", port->line);
1630 } else {
1631 /*
1632 * For the simple (and majority of) cases where we don't
1633 * need to do any remapping, just cast the cookie
1634 * directly.
1635 */
1636 port->membase = (void __iomem *)port->mapbase;
1637 }
1638 }
1639
1640 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1641 {
1642 struct sci_port *s = to_sci_port(port);
1643
1644 if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1645 return -EINVAL;
1646 if (ser->baud_base < 2400)
1647 /* No paper tape reader for Mitch.. */
1648 return -EINVAL;
1649
1650 return 0;
1651 }
1652
1653 static struct uart_ops sci_uart_ops = {
1654 .tx_empty = sci_tx_empty,
1655 .set_mctrl = sci_set_mctrl,
1656 .get_mctrl = sci_get_mctrl,
1657 .start_tx = sci_start_tx,
1658 .stop_tx = sci_stop_tx,
1659 .stop_rx = sci_stop_rx,
1660 .enable_ms = sci_enable_ms,
1661 .break_ctl = sci_break_ctl,
1662 .startup = sci_startup,
1663 .shutdown = sci_shutdown,
1664 .set_termios = sci_set_termios,
1665 .type = sci_type,
1666 .release_port = sci_release_port,
1667 .request_port = sci_request_port,
1668 .config_port = sci_config_port,
1669 .verify_port = sci_verify_port,
1670 #ifdef CONFIG_CONSOLE_POLL
1671 .poll_get_char = sci_poll_get_char,
1672 .poll_put_char = sci_poll_put_char,
1673 #endif
1674 };
1675
1676 static int __devinit sci_init_single(struct platform_device *dev,
1677 struct sci_port *sci_port,
1678 unsigned int index,
1679 struct plat_sci_port *p)
1680 {
1681 struct uart_port *port = &sci_port->port;
1682
1683 port->ops = &sci_uart_ops;
1684 port->iotype = UPIO_MEM;
1685 port->line = index;
1686
1687 switch (p->type) {
1688 case PORT_SCIFB:
1689 port->fifosize = 256;
1690 break;
1691 case PORT_SCIFA:
1692 port->fifosize = 64;
1693 break;
1694 case PORT_SCIF:
1695 port->fifosize = 16;
1696 break;
1697 default:
1698 port->fifosize = 1;
1699 break;
1700 }
1701
1702 if (dev) {
1703 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1704 if (IS_ERR(sci_port->iclk)) {
1705 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1706 if (IS_ERR(sci_port->iclk)) {
1707 dev_err(&dev->dev, "can't get iclk\n");
1708 return PTR_ERR(sci_port->iclk);
1709 }
1710 }
1711
1712 /*
1713 * The function clock is optional, ignore it if we can't
1714 * find it.
1715 */
1716 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1717 if (IS_ERR(sci_port->fclk))
1718 sci_port->fclk = NULL;
1719
1720 sci_port->enable = sci_clk_enable;
1721 sci_port->disable = sci_clk_disable;
1722 port->dev = &dev->dev;
1723 }
1724
1725 sci_port->break_timer.data = (unsigned long)sci_port;
1726 sci_port->break_timer.function = sci_break_timer;
1727 init_timer(&sci_port->break_timer);
1728
1729 port->mapbase = p->mapbase;
1730 port->membase = p->membase;
1731
1732 port->irq = p->irqs[SCIx_TXI_IRQ];
1733 port->flags = p->flags;
1734 sci_port->type = port->type = p->type;
1735 sci_port->scscr = p->scscr;
1736 sci_port->scbrr_algo_id = p->scbrr_algo_id;
1737
1738 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1739 sci_port->dma_dev = p->dma_dev;
1740 sci_port->slave_tx = p->dma_slave_tx;
1741 sci_port->slave_rx = p->dma_slave_rx;
1742
1743 dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__,
1744 p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
1745 #endif
1746
1747 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1748 return 0;
1749 }
1750
1751 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1752 static struct tty_driver *serial_console_device(struct console *co, int *index)
1753 {
1754 struct uart_driver *p = &sci_uart_driver;
1755 *index = co->index;
1756 return p->tty_driver;
1757 }
1758
1759 static void serial_console_putchar(struct uart_port *port, int ch)
1760 {
1761 sci_poll_put_char(port, ch);
1762 }
1763
1764 /*
1765 * Print a string to the serial port trying not to disturb
1766 * any possible real use of the port...
1767 */
1768 static void serial_console_write(struct console *co, const char *s,
1769 unsigned count)
1770 {
1771 struct uart_port *port = co->data;
1772 struct sci_port *sci_port = to_sci_port(port);
1773 unsigned short bits;
1774
1775 if (sci_port->enable)
1776 sci_port->enable(port);
1777
1778 uart_console_write(port, s, count, serial_console_putchar);
1779
1780 /* wait until fifo is empty and last bit has been transmitted */
1781 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
1782 while ((sci_in(port, SCxSR) & bits) != bits)
1783 cpu_relax();
1784
1785 if (sci_port->disable)
1786 sci_port->disable(port);
1787 }
1788
1789 static int __devinit serial_console_setup(struct console *co, char *options)
1790 {
1791 struct sci_port *sci_port;
1792 struct uart_port *port;
1793 int baud = 115200;
1794 int bits = 8;
1795 int parity = 'n';
1796 int flow = 'n';
1797 int ret;
1798
1799 /*
1800 * Check whether an invalid uart number has been specified, and
1801 * if so, search for the first available port that does have
1802 * console support.
1803 */
1804 if (co->index >= SCI_NPORTS)
1805 co->index = 0;
1806
1807 if (co->data) {
1808 port = co->data;
1809 sci_port = to_sci_port(port);
1810 } else {
1811 sci_port = &sci_ports[co->index];
1812 port = &sci_port->port;
1813 co->data = port;
1814 }
1815
1816 /*
1817 * Also need to check port->type, we don't actually have any
1818 * UPIO_PORT ports, but uart_report_port() handily misreports
1819 * it anyways if we don't have a port available by the time this is
1820 * called.
1821 */
1822 if (!port->type)
1823 return -ENODEV;
1824
1825 sci_config_port(port, 0);
1826
1827 if (sci_port->enable)
1828 sci_port->enable(port);
1829
1830 if (options)
1831 uart_parse_options(options, &baud, &parity, &bits, &flow);
1832
1833 ret = uart_set_options(port, co, baud, parity, bits, flow);
1834 #if defined(__H8300H__) || defined(__H8300S__)
1835 /* disable rx interrupt */
1836 if (ret == 0)
1837 sci_stop_rx(port);
1838 #endif
1839 /* TODO: disable clock */
1840 return ret;
1841 }
1842
1843 static struct console serial_console = {
1844 .name = "ttySC",
1845 .device = serial_console_device,
1846 .write = serial_console_write,
1847 .setup = serial_console_setup,
1848 .flags = CON_PRINTBUFFER,
1849 .index = -1,
1850 };
1851
1852 static int __init sci_console_init(void)
1853 {
1854 register_console(&serial_console);
1855 return 0;
1856 }
1857 console_initcall(sci_console_init);
1858
1859 static struct sci_port early_serial_port;
1860 static struct console early_serial_console = {
1861 .name = "early_ttySC",
1862 .write = serial_console_write,
1863 .flags = CON_PRINTBUFFER,
1864 };
1865 static char early_serial_buf[32];
1866
1867 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1868
1869 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
1870 #define SCI_CONSOLE (&serial_console)
1871 #else
1872 #define SCI_CONSOLE 0
1873 #endif
1874
1875 static char banner[] __initdata =
1876 KERN_INFO "SuperH SCI(F) driver initialized\n";
1877
1878 static struct uart_driver sci_uart_driver = {
1879 .owner = THIS_MODULE,
1880 .driver_name = "sci",
1881 .dev_name = "ttySC",
1882 .major = SCI_MAJOR,
1883 .minor = SCI_MINOR_START,
1884 .nr = SCI_NPORTS,
1885 .cons = SCI_CONSOLE,
1886 };
1887
1888
1889 static int sci_remove(struct platform_device *dev)
1890 {
1891 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1892 struct sci_port *p;
1893 unsigned long flags;
1894
1895 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1896
1897 spin_lock_irqsave(&priv->lock, flags);
1898 list_for_each_entry(p, &priv->ports, node) {
1899 uart_remove_one_port(&sci_uart_driver, &p->port);
1900 clk_put(p->iclk);
1901 clk_put(p->fclk);
1902 }
1903 spin_unlock_irqrestore(&priv->lock, flags);
1904
1905 kfree(priv);
1906 return 0;
1907 }
1908
1909 static int __devinit sci_probe_single(struct platform_device *dev,
1910 unsigned int index,
1911 struct plat_sci_port *p,
1912 struct sci_port *sciport)
1913 {
1914 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1915 unsigned long flags;
1916 int ret;
1917
1918 /* Sanity check */
1919 if (unlikely(index >= SCI_NPORTS)) {
1920 dev_notice(&dev->dev, "Attempting to register port "
1921 "%d when only %d are available.\n",
1922 index+1, SCI_NPORTS);
1923 dev_notice(&dev->dev, "Consider bumping "
1924 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1925 return 0;
1926 }
1927
1928 ret = sci_init_single(dev, sciport, index, p);
1929 if (ret)
1930 return ret;
1931
1932 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1933 if (ret)
1934 return ret;
1935
1936 INIT_LIST_HEAD(&sciport->node);
1937
1938 spin_lock_irqsave(&priv->lock, flags);
1939 list_add(&sciport->node, &priv->ports);
1940 spin_unlock_irqrestore(&priv->lock, flags);
1941
1942 return 0;
1943 }
1944
1945 /*
1946 * Register a set of serial devices attached to a platform device. The
1947 * list is terminated with a zero flags entry, which means we expect
1948 * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need
1949 * remapping (such as sh64) should also set UPF_IOREMAP.
1950 */
1951 static int __devinit sci_probe(struct platform_device *dev)
1952 {
1953 struct plat_sci_port *p = dev->dev.platform_data;
1954 struct sh_sci_priv *priv;
1955 int i, ret = -EINVAL;
1956
1957 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1958 if (is_early_platform_device(dev)) {
1959 if (dev->id == -1)
1960 return -ENOTSUPP;
1961 early_serial_console.index = dev->id;
1962 early_serial_console.data = &early_serial_port.port;
1963 sci_init_single(NULL, &early_serial_port, dev->id, p);
1964 serial_console_setup(&early_serial_console, early_serial_buf);
1965 if (!strstr(early_serial_buf, "keep"))
1966 early_serial_console.flags |= CON_BOOT;
1967 register_console(&early_serial_console);
1968 return 0;
1969 }
1970 #endif
1971
1972 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1973 if (!priv)
1974 return -ENOMEM;
1975
1976 INIT_LIST_HEAD(&priv->ports);
1977 spin_lock_init(&priv->lock);
1978 platform_set_drvdata(dev, priv);
1979
1980 priv->clk_nb.notifier_call = sci_notifier;
1981 cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1982
1983 if (dev->id != -1) {
1984 ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
1985 if (ret)
1986 goto err_unreg;
1987 } else {
1988 for (i = 0; p && p->flags != 0; p++, i++) {
1989 ret = sci_probe_single(dev, i, p, &sci_ports[i]);
1990 if (ret)
1991 goto err_unreg;
1992 }
1993 }
1994
1995 #ifdef CONFIG_SH_STANDARD_BIOS
1996 sh_bios_gdb_detach();
1997 #endif
1998
1999 return 0;
2000
2001 err_unreg:
2002 sci_remove(dev);
2003 return ret;
2004 }
2005
2006 static int sci_suspend(struct device *dev)
2007 {
2008 struct sh_sci_priv *priv = dev_get_drvdata(dev);
2009 struct sci_port *p;
2010 unsigned long flags;
2011
2012 spin_lock_irqsave(&priv->lock, flags);
2013 list_for_each_entry(p, &priv->ports, node)
2014 uart_suspend_port(&sci_uart_driver, &p->port);
2015 spin_unlock_irqrestore(&priv->lock, flags);
2016
2017 return 0;
2018 }
2019
2020 static int sci_resume(struct device *dev)
2021 {
2022 struct sh_sci_priv *priv = dev_get_drvdata(dev);
2023 struct sci_port *p;
2024 unsigned long flags;
2025
2026 spin_lock_irqsave(&priv->lock, flags);
2027 list_for_each_entry(p, &priv->ports, node)
2028 uart_resume_port(&sci_uart_driver, &p->port);
2029 spin_unlock_irqrestore(&priv->lock, flags);
2030
2031 return 0;
2032 }
2033
2034 static const struct dev_pm_ops sci_dev_pm_ops = {
2035 .suspend = sci_suspend,
2036 .resume = sci_resume,
2037 };
2038
2039 static struct platform_driver sci_driver = {
2040 .probe = sci_probe,
2041 .remove = sci_remove,
2042 .driver = {
2043 .name = "sh-sci",
2044 .owner = THIS_MODULE,
2045 .pm = &sci_dev_pm_ops,
2046 },
2047 };
2048
2049 static int __init sci_init(void)
2050 {
2051 int ret;
2052
2053 printk(banner);
2054
2055 ret = uart_register_driver(&sci_uart_driver);
2056 if (likely(ret == 0)) {
2057 ret = platform_driver_register(&sci_driver);
2058 if (unlikely(ret))
2059 uart_unregister_driver(&sci_uart_driver);
2060 }
2061
2062 return ret;
2063 }
2064
2065 static void __exit sci_exit(void)
2066 {
2067 platform_driver_unregister(&sci_driver);
2068 uart_unregister_driver(&sci_uart_driver);
2069 }
2070
2071 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2072 early_platform_init_buffer("earlyprintk", &sci_driver,
2073 early_serial_buf, ARRAY_SIZE(early_serial_buf));
2074 #endif
2075 module_init(sci_init);
2076 module_exit(sci_exit);
2077
2078 MODULE_LICENSE("GPL");
2079 MODULE_ALIAS("platform:sh-sci");
This page took 0.090508 seconds and 5 git commands to generate.