serial: sh-sci: Fix up SH-2A SCIF support.
[deliverable/linux.git] / drivers / tty / serial / sh-sci.c
1 /*
2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
3 *
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
6 *
7 * based off of the old drivers/char/sh-sci.c by:
8 *
9 * Copyright (C) 1999, 2000 Niibe Yutaka
10 * Copyright (C) 2000 Sugioka Toshinobu
11 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 * Modified to support SecureEdge. David McCullough (2002)
13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 * Removed SH7300 support (Jul 2007).
15 *
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
18 * for more details.
19 */
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21 #define SUPPORT_SYSRQ
22 #endif
23
24 #undef DEBUG
25
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/timer.h>
29 #include <linux/interrupt.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/sysrq.h>
36 #include <linux/ioport.h>
37 #include <linux/mm.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/console.h>
41 #include <linux/platform_device.h>
42 #include <linux/serial_sci.h>
43 #include <linux/notifier.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/cpufreq.h>
46 #include <linux/clk.h>
47 #include <linux/ctype.h>
48 #include <linux/err.h>
49 #include <linux/dmaengine.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/scatterlist.h>
52 #include <linux/slab.h>
53
54 #ifdef CONFIG_SUPERH
55 #include <asm/sh_bios.h>
56 #endif
57
58 #include "sh-sci.h"
59
60 struct sci_port {
61 struct uart_port port;
62
63 /* Platform configuration */
64 struct plat_sci_port *cfg;
65
66 /* Break timer */
67 struct timer_list break_timer;
68 int break_flag;
69
70 /* Interface clock */
71 struct clk *iclk;
72 /* Function clock */
73 struct clk *fclk;
74
75 char *irqstr[SCIx_NR_IRQS];
76
77 struct dma_chan *chan_tx;
78 struct dma_chan *chan_rx;
79
80 #ifdef CONFIG_SERIAL_SH_SCI_DMA
81 struct dma_async_tx_descriptor *desc_tx;
82 struct dma_async_tx_descriptor *desc_rx[2];
83 dma_cookie_t cookie_tx;
84 dma_cookie_t cookie_rx[2];
85 dma_cookie_t active_rx;
86 struct scatterlist sg_tx;
87 unsigned int sg_len_tx;
88 struct scatterlist sg_rx[2];
89 size_t buf_len_rx;
90 struct sh_dmae_slave param_tx;
91 struct sh_dmae_slave param_rx;
92 struct work_struct work_tx;
93 struct work_struct work_rx;
94 struct timer_list rx_timer;
95 unsigned int rx_timeout;
96 #endif
97
98 struct notifier_block freq_transition;
99
100 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
101 unsigned short saved_smr;
102 unsigned short saved_fcr;
103 unsigned char saved_brr;
104 #endif
105 };
106
107 /* Function prototypes */
108 static void sci_start_tx(struct uart_port *port);
109 static void sci_stop_tx(struct uart_port *port);
110 static void sci_start_rx(struct uart_port *port);
111
112 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
113
114 static struct sci_port sci_ports[SCI_NPORTS];
115 static struct uart_driver sci_uart_driver;
116
117 static inline struct sci_port *
118 to_sci_port(struct uart_port *uart)
119 {
120 return container_of(uart, struct sci_port, port);
121 }
122
123 struct plat_sci_reg {
124 u8 offset, size;
125 };
126
127 /* Helper for invalidating specific entries of an inherited map. */
128 #define sci_reg_invalid { .offset = 0, .size = 0 }
129
130 static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
131 [SCIx_PROBE_REGTYPE] = {
132 [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
133 },
134
135 /*
136 * Common SCI definitions, dependent on the port's regshift
137 * value.
138 */
139 [SCIx_SCI_REGTYPE] = {
140 [SCSMR] = { 0x00, 8 },
141 [SCBRR] = { 0x01, 8 },
142 [SCSCR] = { 0x02, 8 },
143 [SCxTDR] = { 0x03, 8 },
144 [SCxSR] = { 0x04, 8 },
145 [SCxRDR] = { 0x05, 8 },
146 [SCFCR] = sci_reg_invalid,
147 [SCFDR] = sci_reg_invalid,
148 [SCTFDR] = sci_reg_invalid,
149 [SCRFDR] = sci_reg_invalid,
150 [SCSPTR] = sci_reg_invalid,
151 [SCLSR] = sci_reg_invalid,
152 },
153
154 /*
155 * Common definitions for legacy IrDA ports, dependent on
156 * regshift value.
157 */
158 [SCIx_IRDA_REGTYPE] = {
159 [SCSMR] = { 0x00, 8 },
160 [SCBRR] = { 0x01, 8 },
161 [SCSCR] = { 0x02, 8 },
162 [SCxTDR] = { 0x03, 8 },
163 [SCxSR] = { 0x04, 8 },
164 [SCxRDR] = { 0x05, 8 },
165 [SCFCR] = { 0x06, 8 },
166 [SCFDR] = { 0x07, 16 },
167 [SCTFDR] = sci_reg_invalid,
168 [SCRFDR] = sci_reg_invalid,
169 [SCSPTR] = sci_reg_invalid,
170 [SCLSR] = sci_reg_invalid,
171 },
172
173 /*
174 * Common SCIFA definitions.
175 */
176 [SCIx_SCIFA_REGTYPE] = {
177 [SCSMR] = { 0x00, 16 },
178 [SCBRR] = { 0x04, 8 },
179 [SCSCR] = { 0x08, 16 },
180 [SCxTDR] = { 0x20, 8 },
181 [SCxSR] = { 0x14, 16 },
182 [SCxRDR] = { 0x24, 8 },
183 [SCFCR] = { 0x18, 16 },
184 [SCFDR] = { 0x1c, 16 },
185 [SCTFDR] = sci_reg_invalid,
186 [SCRFDR] = sci_reg_invalid,
187 [SCSPTR] = sci_reg_invalid,
188 [SCLSR] = sci_reg_invalid,
189 },
190
191 /*
192 * Common SCIFB definitions.
193 */
194 [SCIx_SCIFB_REGTYPE] = {
195 [SCSMR] = { 0x00, 16 },
196 [SCBRR] = { 0x04, 8 },
197 [SCSCR] = { 0x08, 16 },
198 [SCxTDR] = { 0x40, 8 },
199 [SCxSR] = { 0x14, 16 },
200 [SCxRDR] = { 0x60, 8 },
201 [SCFCR] = { 0x18, 16 },
202 [SCFDR] = { 0x1c, 16 },
203 [SCTFDR] = sci_reg_invalid,
204 [SCRFDR] = sci_reg_invalid,
205 [SCSPTR] = sci_reg_invalid,
206 [SCLSR] = sci_reg_invalid,
207 },
208
209 /*
210 * Common SH-2(A) SCIF definitions for ports with FIFO data
211 * count registers.
212 */
213 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
214 [SCSMR] = { 0x00, 16 },
215 [SCBRR] = { 0x04, 8 },
216 [SCSCR] = { 0x08, 16 },
217 [SCxTDR] = { 0x0c, 8 },
218 [SCxSR] = { 0x10, 16 },
219 [SCxRDR] = { 0x14, 8 },
220 [SCFCR] = { 0x18, 16 },
221 [SCFDR] = { 0x1c, 16 },
222 [SCTFDR] = sci_reg_invalid,
223 [SCRFDR] = sci_reg_invalid,
224 [SCSPTR] = { 0x20, 16 },
225 [SCLSR] = { 0x24, 16 },
226 },
227
228 /*
229 * Common SH-3 SCIF definitions.
230 */
231 [SCIx_SH3_SCIF_REGTYPE] = {
232 [SCSMR] = { 0x00, 8 },
233 [SCBRR] = { 0x02, 8 },
234 [SCSCR] = { 0x04, 8 },
235 [SCxTDR] = { 0x06, 8 },
236 [SCxSR] = { 0x08, 16 },
237 [SCxRDR] = { 0x0a, 8 },
238 [SCFCR] = { 0x0c, 8 },
239 [SCFDR] = { 0x0e, 16 },
240 [SCTFDR] = sci_reg_invalid,
241 [SCRFDR] = sci_reg_invalid,
242 [SCSPTR] = sci_reg_invalid,
243 [SCLSR] = sci_reg_invalid,
244 },
245
246 /*
247 * Common SH-4(A) SCIF(B) definitions.
248 */
249 [SCIx_SH4_SCIF_REGTYPE] = {
250 [SCSMR] = { 0x00, 16 },
251 [SCBRR] = { 0x04, 8 },
252 [SCSCR] = { 0x08, 16 },
253 [SCxTDR] = { 0x0c, 8 },
254 [SCxSR] = { 0x10, 16 },
255 [SCxRDR] = { 0x14, 8 },
256 [SCFCR] = { 0x18, 16 },
257 [SCFDR] = { 0x1c, 16 },
258 [SCTFDR] = sci_reg_invalid,
259 [SCRFDR] = sci_reg_invalid,
260 [SCSPTR] = { 0x20, 16 },
261 [SCLSR] = { 0x24, 16 },
262 },
263
264 /*
265 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
266 * register.
267 */
268 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
269 [SCSMR] = { 0x00, 16 },
270 [SCBRR] = { 0x04, 8 },
271 [SCSCR] = { 0x08, 16 },
272 [SCxTDR] = { 0x0c, 8 },
273 [SCxSR] = { 0x10, 16 },
274 [SCxRDR] = { 0x14, 8 },
275 [SCFCR] = { 0x18, 16 },
276 [SCFDR] = { 0x1c, 16 },
277 [SCTFDR] = sci_reg_invalid,
278 [SCRFDR] = sci_reg_invalid,
279 [SCSPTR] = sci_reg_invalid,
280 [SCLSR] = { 0x24, 16 },
281 },
282
283 /*
284 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
285 * count registers.
286 */
287 [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
288 [SCSMR] = { 0x00, 16 },
289 [SCBRR] = { 0x04, 8 },
290 [SCSCR] = { 0x08, 16 },
291 [SCxTDR] = { 0x0c, 8 },
292 [SCxSR] = { 0x10, 16 },
293 [SCxRDR] = { 0x14, 8 },
294 [SCFCR] = { 0x18, 16 },
295 [SCFDR] = { 0x1c, 16 },
296 [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
297 [SCRFDR] = { 0x20, 16 },
298 [SCSPTR] = { 0x24, 16 },
299 [SCLSR] = { 0x28, 16 },
300 },
301
302 /*
303 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
304 * registers.
305 */
306 [SCIx_SH7705_SCIF_REGTYPE] = {
307 [SCSMR] = { 0x00, 16 },
308 [SCBRR] = { 0x04, 8 },
309 [SCSCR] = { 0x08, 16 },
310 [SCxTDR] = { 0x20, 8 },
311 [SCxSR] = { 0x14, 16 },
312 [SCxRDR] = { 0x24, 8 },
313 [SCFCR] = { 0x18, 16 },
314 [SCFDR] = { 0x1c, 16 },
315 [SCTFDR] = sci_reg_invalid,
316 [SCRFDR] = sci_reg_invalid,
317 [SCSPTR] = sci_reg_invalid,
318 [SCLSR] = sci_reg_invalid,
319 },
320 };
321
322 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
323
324 /*
325 * The "offset" here is rather misleading, in that it refers to an enum
326 * value relative to the port mapping rather than the fixed offset
327 * itself, which needs to be manually retrieved from the platform's
328 * register map for the given port.
329 */
330 static unsigned int sci_serial_in(struct uart_port *p, int offset)
331 {
332 struct plat_sci_reg *reg = sci_getreg(p, offset);
333
334 if (reg->size == 8)
335 return ioread8(p->membase + (reg->offset << p->regshift));
336 else if (reg->size == 16)
337 return ioread16(p->membase + (reg->offset << p->regshift));
338 else
339 WARN(1, "Invalid register access\n");
340
341 return 0;
342 }
343
344 static void sci_serial_out(struct uart_port *p, int offset, int value)
345 {
346 struct plat_sci_reg *reg = sci_getreg(p, offset);
347
348 if (reg->size == 8)
349 iowrite8(value, p->membase + (reg->offset << p->regshift));
350 else if (reg->size == 16)
351 iowrite16(value, p->membase + (reg->offset << p->regshift));
352 else
353 WARN(1, "Invalid register access\n");
354 }
355
356 #define sci_in(up, offset) (up->serial_in(up, offset))
357 #define sci_out(up, offset, value) (up->serial_out(up, offset, value))
358
359 static int sci_probe_regmap(struct plat_sci_port *cfg)
360 {
361 switch (cfg->type) {
362 case PORT_SCI:
363 cfg->regtype = SCIx_SCI_REGTYPE;
364 break;
365 case PORT_IRDA:
366 cfg->regtype = SCIx_IRDA_REGTYPE;
367 break;
368 case PORT_SCIFA:
369 cfg->regtype = SCIx_SCIFA_REGTYPE;
370 break;
371 case PORT_SCIFB:
372 cfg->regtype = SCIx_SCIFB_REGTYPE;
373 break;
374 case PORT_SCIF:
375 /*
376 * The SH-4 is a bit of a misnomer here, although that's
377 * where this particular port layout originated. This
378 * configuration (or some slight variation thereof)
379 * remains the dominant model for all SCIFs.
380 */
381 cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
382 break;
383 default:
384 printk(KERN_ERR "Can't probe register map for given port\n");
385 return -EINVAL;
386 }
387
388 return 0;
389 }
390
391 static void sci_port_enable(struct sci_port *sci_port)
392 {
393 if (!sci_port->port.dev)
394 return;
395
396 pm_runtime_get_sync(sci_port->port.dev);
397
398 clk_enable(sci_port->iclk);
399 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
400 clk_enable(sci_port->fclk);
401 }
402
403 static void sci_port_disable(struct sci_port *sci_port)
404 {
405 if (!sci_port->port.dev)
406 return;
407
408 clk_disable(sci_port->fclk);
409 clk_disable(sci_port->iclk);
410
411 pm_runtime_put_sync(sci_port->port.dev);
412 }
413
414 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
415
416 #ifdef CONFIG_CONSOLE_POLL
417 static int sci_poll_get_char(struct uart_port *port)
418 {
419 unsigned short status;
420 int c;
421
422 do {
423 status = sci_in(port, SCxSR);
424 if (status & SCxSR_ERRORS(port)) {
425 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
426 continue;
427 }
428 break;
429 } while (1);
430
431 if (!(status & SCxSR_RDxF(port)))
432 return NO_POLL_CHAR;
433
434 c = sci_in(port, SCxRDR);
435
436 /* Dummy read */
437 sci_in(port, SCxSR);
438 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
439
440 return c;
441 }
442 #endif
443
444 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
445 {
446 unsigned short status;
447
448 do {
449 status = sci_in(port, SCxSR);
450 } while (!(status & SCxSR_TDxE(port)));
451
452 sci_out(port, SCxTDR, c);
453 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
454 }
455 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
456
457 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
458 {
459 struct sci_port *s = to_sci_port(port);
460 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
461
462 /*
463 * Use port-specific handler if provided.
464 */
465 if (s->cfg->ops && s->cfg->ops->init_pins) {
466 s->cfg->ops->init_pins(port, cflag);
467 return;
468 }
469
470 /*
471 * For the generic path SCSPTR is necessary. Bail out if that's
472 * unavailable, too.
473 */
474 if (!reg->size)
475 return;
476
477 if (!(cflag & CRTSCTS))
478 sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
479 }
480
481 static int sci_txfill(struct uart_port *port)
482 {
483 struct plat_sci_reg *reg;
484
485 reg = sci_getreg(port, SCTFDR);
486 if (reg->size)
487 return sci_in(port, SCTFDR) & 0xff;
488
489 reg = sci_getreg(port, SCFDR);
490 if (reg->size)
491 return sci_in(port, SCFDR) >> 8;
492
493 return !(sci_in(port, SCxSR) & SCI_TDRE);
494 }
495
496 static int sci_txroom(struct uart_port *port)
497 {
498 return port->fifosize - sci_txfill(port);
499 }
500
501 static int sci_rxfill(struct uart_port *port)
502 {
503 struct plat_sci_reg *reg;
504
505 reg = sci_getreg(port, SCRFDR);
506 if (reg->size)
507 return sci_in(port, SCRFDR) & 0xff;
508
509 reg = sci_getreg(port, SCFDR);
510 if (reg->size)
511 return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
512
513 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
514 }
515
516 /*
517 * SCI helper for checking the state of the muxed port/RXD pins.
518 */
519 static inline int sci_rxd_in(struct uart_port *port)
520 {
521 struct sci_port *s = to_sci_port(port);
522
523 if (s->cfg->port_reg <= 0)
524 return 1;
525
526 return !!__raw_readb(s->cfg->port_reg);
527 }
528
529 /* ********************************************************************** *
530 * the interrupt related routines *
531 * ********************************************************************** */
532
533 static void sci_transmit_chars(struct uart_port *port)
534 {
535 struct circ_buf *xmit = &port->state->xmit;
536 unsigned int stopped = uart_tx_stopped(port);
537 unsigned short status;
538 unsigned short ctrl;
539 int count;
540
541 status = sci_in(port, SCxSR);
542 if (!(status & SCxSR_TDxE(port))) {
543 ctrl = sci_in(port, SCSCR);
544 if (uart_circ_empty(xmit))
545 ctrl &= ~SCSCR_TIE;
546 else
547 ctrl |= SCSCR_TIE;
548 sci_out(port, SCSCR, ctrl);
549 return;
550 }
551
552 count = sci_txroom(port);
553
554 do {
555 unsigned char c;
556
557 if (port->x_char) {
558 c = port->x_char;
559 port->x_char = 0;
560 } else if (!uart_circ_empty(xmit) && !stopped) {
561 c = xmit->buf[xmit->tail];
562 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
563 } else {
564 break;
565 }
566
567 sci_out(port, SCxTDR, c);
568
569 port->icount.tx++;
570 } while (--count > 0);
571
572 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
573
574 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
575 uart_write_wakeup(port);
576 if (uart_circ_empty(xmit)) {
577 sci_stop_tx(port);
578 } else {
579 ctrl = sci_in(port, SCSCR);
580
581 if (port->type != PORT_SCI) {
582 sci_in(port, SCxSR); /* Dummy read */
583 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
584 }
585
586 ctrl |= SCSCR_TIE;
587 sci_out(port, SCSCR, ctrl);
588 }
589 }
590
591 /* On SH3, SCIF may read end-of-break as a space->mark char */
592 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
593
594 static void sci_receive_chars(struct uart_port *port)
595 {
596 struct sci_port *sci_port = to_sci_port(port);
597 struct tty_struct *tty = port->state->port.tty;
598 int i, count, copied = 0;
599 unsigned short status;
600 unsigned char flag;
601
602 status = sci_in(port, SCxSR);
603 if (!(status & SCxSR_RDxF(port)))
604 return;
605
606 while (1) {
607 /* Don't copy more bytes than there is room for in the buffer */
608 count = tty_buffer_request_room(tty, sci_rxfill(port));
609
610 /* If for any reason we can't copy more data, we're done! */
611 if (count == 0)
612 break;
613
614 if (port->type == PORT_SCI) {
615 char c = sci_in(port, SCxRDR);
616 if (uart_handle_sysrq_char(port, c) ||
617 sci_port->break_flag)
618 count = 0;
619 else
620 tty_insert_flip_char(tty, c, TTY_NORMAL);
621 } else {
622 for (i = 0; i < count; i++) {
623 char c = sci_in(port, SCxRDR);
624 status = sci_in(port, SCxSR);
625 #if defined(CONFIG_CPU_SH3)
626 /* Skip "chars" during break */
627 if (sci_port->break_flag) {
628 if ((c == 0) &&
629 (status & SCxSR_FER(port))) {
630 count--; i--;
631 continue;
632 }
633
634 /* Nonzero => end-of-break */
635 dev_dbg(port->dev, "debounce<%02x>\n", c);
636 sci_port->break_flag = 0;
637
638 if (STEPFN(c)) {
639 count--; i--;
640 continue;
641 }
642 }
643 #endif /* CONFIG_CPU_SH3 */
644 if (uart_handle_sysrq_char(port, c)) {
645 count--; i--;
646 continue;
647 }
648
649 /* Store data and status */
650 if (status & SCxSR_FER(port)) {
651 flag = TTY_FRAME;
652 dev_notice(port->dev, "frame error\n");
653 } else if (status & SCxSR_PER(port)) {
654 flag = TTY_PARITY;
655 dev_notice(port->dev, "parity error\n");
656 } else
657 flag = TTY_NORMAL;
658
659 tty_insert_flip_char(tty, c, flag);
660 }
661 }
662
663 sci_in(port, SCxSR); /* dummy read */
664 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
665
666 copied += count;
667 port->icount.rx += count;
668 }
669
670 if (copied) {
671 /* Tell the rest of the system the news. New characters! */
672 tty_flip_buffer_push(tty);
673 } else {
674 sci_in(port, SCxSR); /* dummy read */
675 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
676 }
677 }
678
679 #define SCI_BREAK_JIFFIES (HZ/20)
680
681 /*
682 * The sci generates interrupts during the break,
683 * 1 per millisecond or so during the break period, for 9600 baud.
684 * So dont bother disabling interrupts.
685 * But dont want more than 1 break event.
686 * Use a kernel timer to periodically poll the rx line until
687 * the break is finished.
688 */
689 static inline void sci_schedule_break_timer(struct sci_port *port)
690 {
691 mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
692 }
693
694 /* Ensure that two consecutive samples find the break over. */
695 static void sci_break_timer(unsigned long data)
696 {
697 struct sci_port *port = (struct sci_port *)data;
698
699 sci_port_enable(port);
700
701 if (sci_rxd_in(&port->port) == 0) {
702 port->break_flag = 1;
703 sci_schedule_break_timer(port);
704 } else if (port->break_flag == 1) {
705 /* break is over. */
706 port->break_flag = 2;
707 sci_schedule_break_timer(port);
708 } else
709 port->break_flag = 0;
710
711 sci_port_disable(port);
712 }
713
714 static int sci_handle_errors(struct uart_port *port)
715 {
716 int copied = 0;
717 unsigned short status = sci_in(port, SCxSR);
718 struct tty_struct *tty = port->state->port.tty;
719 struct sci_port *s = to_sci_port(port);
720
721 /*
722 * Handle overruns, if supported.
723 */
724 if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
725 if (status & (1 << s->cfg->overrun_bit)) {
726 /* overrun error */
727 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
728 copied++;
729
730 dev_notice(port->dev, "overrun error");
731 }
732 }
733
734 if (status & SCxSR_FER(port)) {
735 if (sci_rxd_in(port) == 0) {
736 /* Notify of BREAK */
737 struct sci_port *sci_port = to_sci_port(port);
738
739 if (!sci_port->break_flag) {
740 sci_port->break_flag = 1;
741 sci_schedule_break_timer(sci_port);
742
743 /* Do sysrq handling. */
744 if (uart_handle_break(port))
745 return 0;
746
747 dev_dbg(port->dev, "BREAK detected\n");
748
749 if (tty_insert_flip_char(tty, 0, TTY_BREAK))
750 copied++;
751 }
752
753 } else {
754 /* frame error */
755 if (tty_insert_flip_char(tty, 0, TTY_FRAME))
756 copied++;
757
758 dev_notice(port->dev, "frame error\n");
759 }
760 }
761
762 if (status & SCxSR_PER(port)) {
763 /* parity error */
764 if (tty_insert_flip_char(tty, 0, TTY_PARITY))
765 copied++;
766
767 dev_notice(port->dev, "parity error");
768 }
769
770 if (copied)
771 tty_flip_buffer_push(tty);
772
773 return copied;
774 }
775
776 static int sci_handle_fifo_overrun(struct uart_port *port)
777 {
778 struct tty_struct *tty = port->state->port.tty;
779 struct sci_port *s = to_sci_port(port);
780 struct plat_sci_reg *reg;
781 int copied = 0;
782
783 reg = sci_getreg(port, SCLSR);
784 if (!reg->size)
785 return 0;
786
787 if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
788 sci_out(port, SCLSR, 0);
789
790 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
791 tty_flip_buffer_push(tty);
792
793 dev_notice(port->dev, "overrun error\n");
794 copied++;
795 }
796
797 return copied;
798 }
799
800 static int sci_handle_breaks(struct uart_port *port)
801 {
802 int copied = 0;
803 unsigned short status = sci_in(port, SCxSR);
804 struct tty_struct *tty = port->state->port.tty;
805 struct sci_port *s = to_sci_port(port);
806
807 if (uart_handle_break(port))
808 return 0;
809
810 if (!s->break_flag && status & SCxSR_BRK(port)) {
811 #if defined(CONFIG_CPU_SH3)
812 /* Debounce break */
813 s->break_flag = 1;
814 #endif
815 /* Notify of BREAK */
816 if (tty_insert_flip_char(tty, 0, TTY_BREAK))
817 copied++;
818
819 dev_dbg(port->dev, "BREAK detected\n");
820 }
821
822 if (copied)
823 tty_flip_buffer_push(tty);
824
825 copied += sci_handle_fifo_overrun(port);
826
827 return copied;
828 }
829
830 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
831 {
832 #ifdef CONFIG_SERIAL_SH_SCI_DMA
833 struct uart_port *port = ptr;
834 struct sci_port *s = to_sci_port(port);
835
836 if (s->chan_rx) {
837 u16 scr = sci_in(port, SCSCR);
838 u16 ssr = sci_in(port, SCxSR);
839
840 /* Disable future Rx interrupts */
841 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
842 disable_irq_nosync(irq);
843 scr |= 0x4000;
844 } else {
845 scr &= ~SCSCR_RIE;
846 }
847 sci_out(port, SCSCR, scr);
848 /* Clear current interrupt */
849 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
850 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
851 jiffies, s->rx_timeout);
852 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
853
854 return IRQ_HANDLED;
855 }
856 #endif
857
858 /* I think sci_receive_chars has to be called irrespective
859 * of whether the I_IXOFF is set, otherwise, how is the interrupt
860 * to be disabled?
861 */
862 sci_receive_chars(ptr);
863
864 return IRQ_HANDLED;
865 }
866
867 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
868 {
869 struct uart_port *port = ptr;
870 unsigned long flags;
871
872 spin_lock_irqsave(&port->lock, flags);
873 sci_transmit_chars(port);
874 spin_unlock_irqrestore(&port->lock, flags);
875
876 return IRQ_HANDLED;
877 }
878
879 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
880 {
881 struct uart_port *port = ptr;
882
883 /* Handle errors */
884 if (port->type == PORT_SCI) {
885 if (sci_handle_errors(port)) {
886 /* discard character in rx buffer */
887 sci_in(port, SCxSR);
888 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
889 }
890 } else {
891 sci_handle_fifo_overrun(port);
892 sci_rx_interrupt(irq, ptr);
893 }
894
895 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
896
897 /* Kick the transmission */
898 sci_tx_interrupt(irq, ptr);
899
900 return IRQ_HANDLED;
901 }
902
903 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
904 {
905 struct uart_port *port = ptr;
906
907 /* Handle BREAKs */
908 sci_handle_breaks(port);
909 sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
910
911 return IRQ_HANDLED;
912 }
913
914 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
915 {
916 /*
917 * Not all ports (such as SCIFA) will support REIE. Rather than
918 * special-casing the port type, we check the port initialization
919 * IRQ enable mask to see whether the IRQ is desired at all. If
920 * it's unset, it's logically inferred that there's no point in
921 * testing for it.
922 */
923 return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
924 }
925
926 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
927 {
928 unsigned short ssr_status, scr_status, err_enabled;
929 struct uart_port *port = ptr;
930 struct sci_port *s = to_sci_port(port);
931 irqreturn_t ret = IRQ_NONE;
932
933 ssr_status = sci_in(port, SCxSR);
934 scr_status = sci_in(port, SCSCR);
935 err_enabled = scr_status & port_rx_irq_mask(port);
936
937 /* Tx Interrupt */
938 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
939 !s->chan_tx)
940 ret = sci_tx_interrupt(irq, ptr);
941
942 /*
943 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
944 * DR flags
945 */
946 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
947 (scr_status & SCSCR_RIE))
948 ret = sci_rx_interrupt(irq, ptr);
949
950 /* Error Interrupt */
951 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
952 ret = sci_er_interrupt(irq, ptr);
953
954 /* Break Interrupt */
955 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
956 ret = sci_br_interrupt(irq, ptr);
957
958 return ret;
959 }
960
961 /*
962 * Here we define a transition notifier so that we can update all of our
963 * ports' baud rate when the peripheral clock changes.
964 */
965 static int sci_notifier(struct notifier_block *self,
966 unsigned long phase, void *p)
967 {
968 struct sci_port *sci_port;
969 unsigned long flags;
970
971 sci_port = container_of(self, struct sci_port, freq_transition);
972
973 if ((phase == CPUFREQ_POSTCHANGE) ||
974 (phase == CPUFREQ_RESUMECHANGE)) {
975 struct uart_port *port = &sci_port->port;
976
977 spin_lock_irqsave(&port->lock, flags);
978 port->uartclk = clk_get_rate(sci_port->iclk);
979 spin_unlock_irqrestore(&port->lock, flags);
980 }
981
982 return NOTIFY_OK;
983 }
984
985 static struct sci_irq_desc {
986 const char *desc;
987 irq_handler_t handler;
988 } sci_irq_desc[] = {
989 /*
990 * Split out handlers, the default case.
991 */
992 [SCIx_ERI_IRQ] = {
993 .desc = "rx err",
994 .handler = sci_er_interrupt,
995 },
996
997 [SCIx_RXI_IRQ] = {
998 .desc = "rx full",
999 .handler = sci_rx_interrupt,
1000 },
1001
1002 [SCIx_TXI_IRQ] = {
1003 .desc = "tx empty",
1004 .handler = sci_tx_interrupt,
1005 },
1006
1007 [SCIx_BRI_IRQ] = {
1008 .desc = "break",
1009 .handler = sci_br_interrupt,
1010 },
1011
1012 /*
1013 * Special muxed handler.
1014 */
1015 [SCIx_MUX_IRQ] = {
1016 .desc = "mux",
1017 .handler = sci_mpxed_interrupt,
1018 },
1019 };
1020
1021 static int sci_request_irq(struct sci_port *port)
1022 {
1023 struct uart_port *up = &port->port;
1024 int i, j, ret = 0;
1025
1026 for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
1027 struct sci_irq_desc *desc;
1028 unsigned int irq;
1029
1030 if (SCIx_IRQ_IS_MUXED(port)) {
1031 i = SCIx_MUX_IRQ;
1032 irq = up->irq;
1033 } else
1034 irq = port->cfg->irqs[i];
1035
1036 desc = sci_irq_desc + i;
1037 port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
1038 dev_name(up->dev), desc->desc);
1039 if (!port->irqstr[j]) {
1040 dev_err(up->dev, "Failed to allocate %s IRQ string\n",
1041 desc->desc);
1042 goto out_nomem;
1043 }
1044
1045 ret = request_irq(irq, desc->handler, up->irqflags,
1046 port->irqstr[j], port);
1047 if (unlikely(ret)) {
1048 dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
1049 goto out_noirq;
1050 }
1051 }
1052
1053 return 0;
1054
1055 out_noirq:
1056 while (--i >= 0)
1057 free_irq(port->cfg->irqs[i], port);
1058
1059 out_nomem:
1060 while (--j >= 0)
1061 kfree(port->irqstr[j]);
1062
1063 return ret;
1064 }
1065
1066 static void sci_free_irq(struct sci_port *port)
1067 {
1068 int i;
1069
1070 /*
1071 * Intentionally in reverse order so we iterate over the muxed
1072 * IRQ first.
1073 */
1074 for (i = 0; i < SCIx_NR_IRQS; i++) {
1075 free_irq(port->cfg->irqs[i], port);
1076 kfree(port->irqstr[i]);
1077
1078 if (SCIx_IRQ_IS_MUXED(port)) {
1079 /* If there's only one IRQ, we're done. */
1080 return;
1081 }
1082 }
1083 }
1084
1085 static unsigned int sci_tx_empty(struct uart_port *port)
1086 {
1087 unsigned short status = sci_in(port, SCxSR);
1088 unsigned short in_tx_fifo = sci_txfill(port);
1089
1090 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
1091 }
1092
1093 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
1094 {
1095 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
1096 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
1097 /* If you have signals for DTR and DCD, please implement here. */
1098 }
1099
1100 static unsigned int sci_get_mctrl(struct uart_port *port)
1101 {
1102 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
1103 and CTS/RTS */
1104
1105 return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
1106 }
1107
1108 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1109 static void sci_dma_tx_complete(void *arg)
1110 {
1111 struct sci_port *s = arg;
1112 struct uart_port *port = &s->port;
1113 struct circ_buf *xmit = &port->state->xmit;
1114 unsigned long flags;
1115
1116 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1117
1118 spin_lock_irqsave(&port->lock, flags);
1119
1120 xmit->tail += sg_dma_len(&s->sg_tx);
1121 xmit->tail &= UART_XMIT_SIZE - 1;
1122
1123 port->icount.tx += sg_dma_len(&s->sg_tx);
1124
1125 async_tx_ack(s->desc_tx);
1126 s->cookie_tx = -EINVAL;
1127 s->desc_tx = NULL;
1128
1129 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1130 uart_write_wakeup(port);
1131
1132 if (!uart_circ_empty(xmit)) {
1133 schedule_work(&s->work_tx);
1134 } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1135 u16 ctrl = sci_in(port, SCSCR);
1136 sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1137 }
1138
1139 spin_unlock_irqrestore(&port->lock, flags);
1140 }
1141
1142 /* Locking: called with port lock held */
1143 static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
1144 size_t count)
1145 {
1146 struct uart_port *port = &s->port;
1147 int i, active, room;
1148
1149 room = tty_buffer_request_room(tty, count);
1150
1151 if (s->active_rx == s->cookie_rx[0]) {
1152 active = 0;
1153 } else if (s->active_rx == s->cookie_rx[1]) {
1154 active = 1;
1155 } else {
1156 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1157 return 0;
1158 }
1159
1160 if (room < count)
1161 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
1162 count - room);
1163 if (!room)
1164 return room;
1165
1166 for (i = 0; i < room; i++)
1167 tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1168 TTY_NORMAL);
1169
1170 port->icount.rx += room;
1171
1172 return room;
1173 }
1174
1175 static void sci_dma_rx_complete(void *arg)
1176 {
1177 struct sci_port *s = arg;
1178 struct uart_port *port = &s->port;
1179 struct tty_struct *tty = port->state->port.tty;
1180 unsigned long flags;
1181 int count;
1182
1183 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1184
1185 spin_lock_irqsave(&port->lock, flags);
1186
1187 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
1188
1189 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1190
1191 spin_unlock_irqrestore(&port->lock, flags);
1192
1193 if (count)
1194 tty_flip_buffer_push(tty);
1195
1196 schedule_work(&s->work_rx);
1197 }
1198
1199 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1200 {
1201 struct dma_chan *chan = s->chan_rx;
1202 struct uart_port *port = &s->port;
1203
1204 s->chan_rx = NULL;
1205 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1206 dma_release_channel(chan);
1207 if (sg_dma_address(&s->sg_rx[0]))
1208 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1209 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1210 if (enable_pio)
1211 sci_start_rx(port);
1212 }
1213
1214 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1215 {
1216 struct dma_chan *chan = s->chan_tx;
1217 struct uart_port *port = &s->port;
1218
1219 s->chan_tx = NULL;
1220 s->cookie_tx = -EINVAL;
1221 dma_release_channel(chan);
1222 if (enable_pio)
1223 sci_start_tx(port);
1224 }
1225
1226 static void sci_submit_rx(struct sci_port *s)
1227 {
1228 struct dma_chan *chan = s->chan_rx;
1229 int i;
1230
1231 for (i = 0; i < 2; i++) {
1232 struct scatterlist *sg = &s->sg_rx[i];
1233 struct dma_async_tx_descriptor *desc;
1234
1235 desc = chan->device->device_prep_slave_sg(chan,
1236 sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
1237
1238 if (desc) {
1239 s->desc_rx[i] = desc;
1240 desc->callback = sci_dma_rx_complete;
1241 desc->callback_param = s;
1242 s->cookie_rx[i] = desc->tx_submit(desc);
1243 }
1244
1245 if (!desc || s->cookie_rx[i] < 0) {
1246 if (i) {
1247 async_tx_ack(s->desc_rx[0]);
1248 s->cookie_rx[0] = -EINVAL;
1249 }
1250 if (desc) {
1251 async_tx_ack(desc);
1252 s->cookie_rx[i] = -EINVAL;
1253 }
1254 dev_warn(s->port.dev,
1255 "failed to re-start DMA, using PIO\n");
1256 sci_rx_dma_release(s, true);
1257 return;
1258 }
1259 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1260 s->cookie_rx[i], i);
1261 }
1262
1263 s->active_rx = s->cookie_rx[0];
1264
1265 dma_async_issue_pending(chan);
1266 }
1267
1268 static void work_fn_rx(struct work_struct *work)
1269 {
1270 struct sci_port *s = container_of(work, struct sci_port, work_rx);
1271 struct uart_port *port = &s->port;
1272 struct dma_async_tx_descriptor *desc;
1273 int new;
1274
1275 if (s->active_rx == s->cookie_rx[0]) {
1276 new = 0;
1277 } else if (s->active_rx == s->cookie_rx[1]) {
1278 new = 1;
1279 } else {
1280 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1281 return;
1282 }
1283 desc = s->desc_rx[new];
1284
1285 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1286 DMA_SUCCESS) {
1287 /* Handle incomplete DMA receive */
1288 struct tty_struct *tty = port->state->port.tty;
1289 struct dma_chan *chan = s->chan_rx;
1290 struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
1291 async_tx);
1292 unsigned long flags;
1293 int count;
1294
1295 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1296 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1297 sh_desc->partial, sh_desc->cookie);
1298
1299 spin_lock_irqsave(&port->lock, flags);
1300 count = sci_dma_rx_push(s, tty, sh_desc->partial);
1301 spin_unlock_irqrestore(&port->lock, flags);
1302
1303 if (count)
1304 tty_flip_buffer_push(tty);
1305
1306 sci_submit_rx(s);
1307
1308 return;
1309 }
1310
1311 s->cookie_rx[new] = desc->tx_submit(desc);
1312 if (s->cookie_rx[new] < 0) {
1313 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1314 sci_rx_dma_release(s, true);
1315 return;
1316 }
1317
1318 s->active_rx = s->cookie_rx[!new];
1319
1320 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1321 s->cookie_rx[new], new, s->active_rx);
1322 }
1323
1324 static void work_fn_tx(struct work_struct *work)
1325 {
1326 struct sci_port *s = container_of(work, struct sci_port, work_tx);
1327 struct dma_async_tx_descriptor *desc;
1328 struct dma_chan *chan = s->chan_tx;
1329 struct uart_port *port = &s->port;
1330 struct circ_buf *xmit = &port->state->xmit;
1331 struct scatterlist *sg = &s->sg_tx;
1332
1333 /*
1334 * DMA is idle now.
1335 * Port xmit buffer is already mapped, and it is one page... Just adjust
1336 * offsets and lengths. Since it is a circular buffer, we have to
1337 * transmit till the end, and then the rest. Take the port lock to get a
1338 * consistent xmit buffer state.
1339 */
1340 spin_lock_irq(&port->lock);
1341 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1342 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1343 sg->offset;
1344 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1345 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1346 spin_unlock_irq(&port->lock);
1347
1348 BUG_ON(!sg_dma_len(sg));
1349
1350 desc = chan->device->device_prep_slave_sg(chan,
1351 sg, s->sg_len_tx, DMA_TO_DEVICE,
1352 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1353 if (!desc) {
1354 /* switch to PIO */
1355 sci_tx_dma_release(s, true);
1356 return;
1357 }
1358
1359 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1360
1361 spin_lock_irq(&port->lock);
1362 s->desc_tx = desc;
1363 desc->callback = sci_dma_tx_complete;
1364 desc->callback_param = s;
1365 spin_unlock_irq(&port->lock);
1366 s->cookie_tx = desc->tx_submit(desc);
1367 if (s->cookie_tx < 0) {
1368 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1369 /* switch to PIO */
1370 sci_tx_dma_release(s, true);
1371 return;
1372 }
1373
1374 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1375 xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1376
1377 dma_async_issue_pending(chan);
1378 }
1379 #endif
1380
1381 static void sci_start_tx(struct uart_port *port)
1382 {
1383 struct sci_port *s = to_sci_port(port);
1384 unsigned short ctrl;
1385
1386 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1387 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1388 u16 new, scr = sci_in(port, SCSCR);
1389 if (s->chan_tx)
1390 new = scr | 0x8000;
1391 else
1392 new = scr & ~0x8000;
1393 if (new != scr)
1394 sci_out(port, SCSCR, new);
1395 }
1396
1397 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1398 s->cookie_tx < 0)
1399 schedule_work(&s->work_tx);
1400 #endif
1401
1402 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1403 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1404 ctrl = sci_in(port, SCSCR);
1405 sci_out(port, SCSCR, ctrl | SCSCR_TIE);
1406 }
1407 }
1408
1409 static void sci_stop_tx(struct uart_port *port)
1410 {
1411 unsigned short ctrl;
1412
1413 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1414 ctrl = sci_in(port, SCSCR);
1415
1416 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1417 ctrl &= ~0x8000;
1418
1419 ctrl &= ~SCSCR_TIE;
1420
1421 sci_out(port, SCSCR, ctrl);
1422 }
1423
1424 static void sci_start_rx(struct uart_port *port)
1425 {
1426 unsigned short ctrl;
1427
1428 ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
1429
1430 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1431 ctrl &= ~0x4000;
1432
1433 sci_out(port, SCSCR, ctrl);
1434 }
1435
1436 static void sci_stop_rx(struct uart_port *port)
1437 {
1438 unsigned short ctrl;
1439
1440 ctrl = sci_in(port, SCSCR);
1441
1442 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1443 ctrl &= ~0x4000;
1444
1445 ctrl &= ~port_rx_irq_mask(port);
1446
1447 sci_out(port, SCSCR, ctrl);
1448 }
1449
1450 static void sci_enable_ms(struct uart_port *port)
1451 {
1452 /* Nothing here yet .. */
1453 }
1454
1455 static void sci_break_ctl(struct uart_port *port, int break_state)
1456 {
1457 /* Nothing here yet .. */
1458 }
1459
1460 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1461 static bool filter(struct dma_chan *chan, void *slave)
1462 {
1463 struct sh_dmae_slave *param = slave;
1464
1465 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1466 param->slave_id);
1467
1468 if (param->dma_dev == chan->device->dev) {
1469 chan->private = param;
1470 return true;
1471 } else {
1472 return false;
1473 }
1474 }
1475
1476 static void rx_timer_fn(unsigned long arg)
1477 {
1478 struct sci_port *s = (struct sci_port *)arg;
1479 struct uart_port *port = &s->port;
1480 u16 scr = sci_in(port, SCSCR);
1481
1482 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1483 scr &= ~0x4000;
1484 enable_irq(s->cfg->irqs[1]);
1485 }
1486 sci_out(port, SCSCR, scr | SCSCR_RIE);
1487 dev_dbg(port->dev, "DMA Rx timed out\n");
1488 schedule_work(&s->work_rx);
1489 }
1490
1491 static void sci_request_dma(struct uart_port *port)
1492 {
1493 struct sci_port *s = to_sci_port(port);
1494 struct sh_dmae_slave *param;
1495 struct dma_chan *chan;
1496 dma_cap_mask_t mask;
1497 int nent;
1498
1499 dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
1500 port->line, s->cfg->dma_dev);
1501
1502 if (!s->cfg->dma_dev)
1503 return;
1504
1505 dma_cap_zero(mask);
1506 dma_cap_set(DMA_SLAVE, mask);
1507
1508 param = &s->param_tx;
1509
1510 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1511 param->slave_id = s->cfg->dma_slave_tx;
1512 param->dma_dev = s->cfg->dma_dev;
1513
1514 s->cookie_tx = -EINVAL;
1515 chan = dma_request_channel(mask, filter, param);
1516 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1517 if (chan) {
1518 s->chan_tx = chan;
1519 sg_init_table(&s->sg_tx, 1);
1520 /* UART circular tx buffer is an aligned page. */
1521 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1522 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1523 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1524 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1525 if (!nent)
1526 sci_tx_dma_release(s, false);
1527 else
1528 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1529 sg_dma_len(&s->sg_tx),
1530 port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1531
1532 s->sg_len_tx = nent;
1533
1534 INIT_WORK(&s->work_tx, work_fn_tx);
1535 }
1536
1537 param = &s->param_rx;
1538
1539 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1540 param->slave_id = s->cfg->dma_slave_rx;
1541 param->dma_dev = s->cfg->dma_dev;
1542
1543 chan = dma_request_channel(mask, filter, param);
1544 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1545 if (chan) {
1546 dma_addr_t dma[2];
1547 void *buf[2];
1548 int i;
1549
1550 s->chan_rx = chan;
1551
1552 s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1553 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1554 &dma[0], GFP_KERNEL);
1555
1556 if (!buf[0]) {
1557 dev_warn(port->dev,
1558 "failed to allocate dma buffer, using PIO\n");
1559 sci_rx_dma_release(s, true);
1560 return;
1561 }
1562
1563 buf[1] = buf[0] + s->buf_len_rx;
1564 dma[1] = dma[0] + s->buf_len_rx;
1565
1566 for (i = 0; i < 2; i++) {
1567 struct scatterlist *sg = &s->sg_rx[i];
1568
1569 sg_init_table(sg, 1);
1570 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1571 (int)buf[i] & ~PAGE_MASK);
1572 sg_dma_address(sg) = dma[i];
1573 }
1574
1575 INIT_WORK(&s->work_rx, work_fn_rx);
1576 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1577
1578 sci_submit_rx(s);
1579 }
1580 }
1581
1582 static void sci_free_dma(struct uart_port *port)
1583 {
1584 struct sci_port *s = to_sci_port(port);
1585
1586 if (!s->cfg->dma_dev)
1587 return;
1588
1589 if (s->chan_tx)
1590 sci_tx_dma_release(s, false);
1591 if (s->chan_rx)
1592 sci_rx_dma_release(s, false);
1593 }
1594 #else
1595 static inline void sci_request_dma(struct uart_port *port)
1596 {
1597 }
1598
1599 static inline void sci_free_dma(struct uart_port *port)
1600 {
1601 }
1602 #endif
1603
1604 static int sci_startup(struct uart_port *port)
1605 {
1606 struct sci_port *s = to_sci_port(port);
1607 int ret;
1608
1609 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1610
1611 sci_port_enable(s);
1612
1613 ret = sci_request_irq(s);
1614 if (unlikely(ret < 0))
1615 return ret;
1616
1617 sci_request_dma(port);
1618
1619 sci_start_tx(port);
1620 sci_start_rx(port);
1621
1622 return 0;
1623 }
1624
1625 static void sci_shutdown(struct uart_port *port)
1626 {
1627 struct sci_port *s = to_sci_port(port);
1628
1629 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1630
1631 sci_stop_rx(port);
1632 sci_stop_tx(port);
1633
1634 sci_free_dma(port);
1635 sci_free_irq(s);
1636
1637 sci_port_disable(s);
1638 }
1639
1640 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1641 unsigned long freq)
1642 {
1643 switch (algo_id) {
1644 case SCBRR_ALGO_1:
1645 return ((freq + 16 * bps) / (16 * bps) - 1);
1646 case SCBRR_ALGO_2:
1647 return ((freq + 16 * bps) / (32 * bps) - 1);
1648 case SCBRR_ALGO_3:
1649 return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
1650 case SCBRR_ALGO_4:
1651 return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
1652 case SCBRR_ALGO_5:
1653 return (((freq * 1000 / 32) / bps) - 1);
1654 }
1655
1656 /* Warn, but use a safe default */
1657 WARN_ON(1);
1658
1659 return ((freq + 16 * bps) / (32 * bps) - 1);
1660 }
1661
1662 static void sci_reset(struct uart_port *port)
1663 {
1664 unsigned int status;
1665
1666 do {
1667 status = sci_in(port, SCxSR);
1668 } while (!(status & SCxSR_TEND(port)));
1669
1670 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1671
1672 if (port->type != PORT_SCI)
1673 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1674 }
1675
1676 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1677 struct ktermios *old)
1678 {
1679 struct sci_port *s = to_sci_port(port);
1680 unsigned int baud, smr_val, max_baud;
1681 int t = -1;
1682 u16 scfcr = 0;
1683
1684 /*
1685 * earlyprintk comes here early on with port->uartclk set to zero.
1686 * the clock framework is not up and running at this point so here
1687 * we assume that 115200 is the maximum baud rate. please note that
1688 * the baud rate is not programmed during earlyprintk - it is assumed
1689 * that the previous boot loader has enabled required clocks and
1690 * setup the baud rate generator hardware for us already.
1691 */
1692 max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1693
1694 baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1695 if (likely(baud && port->uartclk))
1696 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
1697
1698 sci_port_enable(s);
1699
1700 sci_reset(port);
1701
1702 smr_val = sci_in(port, SCSMR) & 3;
1703
1704 if ((termios->c_cflag & CSIZE) == CS7)
1705 smr_val |= 0x40;
1706 if (termios->c_cflag & PARENB)
1707 smr_val |= 0x20;
1708 if (termios->c_cflag & PARODD)
1709 smr_val |= 0x30;
1710 if (termios->c_cflag & CSTOPB)
1711 smr_val |= 0x08;
1712
1713 uart_update_timeout(port, termios->c_cflag, baud);
1714
1715 sci_out(port, SCSMR, smr_val);
1716
1717 dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
1718 s->cfg->scscr);
1719
1720 if (t > 0) {
1721 if (t >= 256) {
1722 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
1723 t >>= 2;
1724 } else
1725 sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
1726
1727 sci_out(port, SCBRR, t);
1728 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1729 }
1730
1731 sci_init_pins(port, termios->c_cflag);
1732 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1733
1734 sci_out(port, SCSCR, s->cfg->scscr);
1735
1736 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1737 /*
1738 * Calculate delay for 1.5 DMA buffers: see
1739 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1740 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1741 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1742 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1743 * sizes), but it has been found out experimentally, that this is not
1744 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1745 * as a minimum seem to work perfectly.
1746 */
1747 if (s->chan_rx) {
1748 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1749 port->fifosize / 2;
1750 dev_dbg(port->dev,
1751 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1752 s->rx_timeout * 1000 / HZ, port->timeout);
1753 if (s->rx_timeout < msecs_to_jiffies(20))
1754 s->rx_timeout = msecs_to_jiffies(20);
1755 }
1756 #endif
1757
1758 if ((termios->c_cflag & CREAD) != 0)
1759 sci_start_rx(port);
1760
1761 sci_port_disable(s);
1762 }
1763
1764 static const char *sci_type(struct uart_port *port)
1765 {
1766 switch (port->type) {
1767 case PORT_IRDA:
1768 return "irda";
1769 case PORT_SCI:
1770 return "sci";
1771 case PORT_SCIF:
1772 return "scif";
1773 case PORT_SCIFA:
1774 return "scifa";
1775 case PORT_SCIFB:
1776 return "scifb";
1777 }
1778
1779 return NULL;
1780 }
1781
1782 static inline unsigned long sci_port_size(struct uart_port *port)
1783 {
1784 /*
1785 * Pick an arbitrary size that encapsulates all of the base
1786 * registers by default. This can be optimized later, or derived
1787 * from platform resource data at such a time that ports begin to
1788 * behave more erratically.
1789 */
1790 return 64;
1791 }
1792
1793 static int sci_remap_port(struct uart_port *port)
1794 {
1795 unsigned long size = sci_port_size(port);
1796
1797 /*
1798 * Nothing to do if there's already an established membase.
1799 */
1800 if (port->membase)
1801 return 0;
1802
1803 if (port->flags & UPF_IOREMAP) {
1804 port->membase = ioremap_nocache(port->mapbase, size);
1805 if (unlikely(!port->membase)) {
1806 dev_err(port->dev, "can't remap port#%d\n", port->line);
1807 return -ENXIO;
1808 }
1809 } else {
1810 /*
1811 * For the simple (and majority of) cases where we don't
1812 * need to do any remapping, just cast the cookie
1813 * directly.
1814 */
1815 port->membase = (void __iomem *)port->mapbase;
1816 }
1817
1818 return 0;
1819 }
1820
1821 static void sci_release_port(struct uart_port *port)
1822 {
1823 if (port->flags & UPF_IOREMAP) {
1824 iounmap(port->membase);
1825 port->membase = NULL;
1826 }
1827
1828 release_mem_region(port->mapbase, sci_port_size(port));
1829 }
1830
1831 static int sci_request_port(struct uart_port *port)
1832 {
1833 unsigned long size = sci_port_size(port);
1834 struct resource *res;
1835 int ret;
1836
1837 res = request_mem_region(port->mapbase, size, dev_name(port->dev));
1838 if (unlikely(res == NULL))
1839 return -EBUSY;
1840
1841 ret = sci_remap_port(port);
1842 if (unlikely(ret != 0)) {
1843 release_resource(res);
1844 return ret;
1845 }
1846
1847 return 0;
1848 }
1849
1850 static void sci_config_port(struct uart_port *port, int flags)
1851 {
1852 if (flags & UART_CONFIG_TYPE) {
1853 struct sci_port *sport = to_sci_port(port);
1854
1855 port->type = sport->cfg->type;
1856 sci_request_port(port);
1857 }
1858 }
1859
1860 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1861 {
1862 struct sci_port *s = to_sci_port(port);
1863
1864 if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1865 return -EINVAL;
1866 if (ser->baud_base < 2400)
1867 /* No paper tape reader for Mitch.. */
1868 return -EINVAL;
1869
1870 return 0;
1871 }
1872
1873 static struct uart_ops sci_uart_ops = {
1874 .tx_empty = sci_tx_empty,
1875 .set_mctrl = sci_set_mctrl,
1876 .get_mctrl = sci_get_mctrl,
1877 .start_tx = sci_start_tx,
1878 .stop_tx = sci_stop_tx,
1879 .stop_rx = sci_stop_rx,
1880 .enable_ms = sci_enable_ms,
1881 .break_ctl = sci_break_ctl,
1882 .startup = sci_startup,
1883 .shutdown = sci_shutdown,
1884 .set_termios = sci_set_termios,
1885 .type = sci_type,
1886 .release_port = sci_release_port,
1887 .request_port = sci_request_port,
1888 .config_port = sci_config_port,
1889 .verify_port = sci_verify_port,
1890 #ifdef CONFIG_CONSOLE_POLL
1891 .poll_get_char = sci_poll_get_char,
1892 .poll_put_char = sci_poll_put_char,
1893 #endif
1894 };
1895
1896 static int __devinit sci_init_single(struct platform_device *dev,
1897 struct sci_port *sci_port,
1898 unsigned int index,
1899 struct plat_sci_port *p)
1900 {
1901 struct uart_port *port = &sci_port->port;
1902 int ret;
1903
1904 port->ops = &sci_uart_ops;
1905 port->iotype = UPIO_MEM;
1906 port->line = index;
1907
1908 switch (p->type) {
1909 case PORT_SCIFB:
1910 port->fifosize = 256;
1911 break;
1912 case PORT_SCIFA:
1913 port->fifosize = 64;
1914 break;
1915 case PORT_SCIF:
1916 port->fifosize = 16;
1917 break;
1918 default:
1919 port->fifosize = 1;
1920 break;
1921 }
1922
1923 if (p->regtype == SCIx_PROBE_REGTYPE) {
1924 ret = sci_probe_regmap(p);
1925 if (unlikely(ret))
1926 return ret;
1927 }
1928
1929 if (dev) {
1930 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1931 if (IS_ERR(sci_port->iclk)) {
1932 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1933 if (IS_ERR(sci_port->iclk)) {
1934 dev_err(&dev->dev, "can't get iclk\n");
1935 return PTR_ERR(sci_port->iclk);
1936 }
1937 }
1938
1939 /*
1940 * The function clock is optional, ignore it if we can't
1941 * find it.
1942 */
1943 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1944 if (IS_ERR(sci_port->fclk))
1945 sci_port->fclk = NULL;
1946
1947 port->dev = &dev->dev;
1948
1949 pm_runtime_irq_safe(&dev->dev);
1950 pm_runtime_enable(&dev->dev);
1951 }
1952
1953 sci_port->break_timer.data = (unsigned long)sci_port;
1954 sci_port->break_timer.function = sci_break_timer;
1955 init_timer(&sci_port->break_timer);
1956
1957 /*
1958 * Establish some sensible defaults for the error detection.
1959 */
1960 if (!p->error_mask)
1961 p->error_mask = (p->type == PORT_SCI) ?
1962 SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
1963
1964 /*
1965 * Establish sensible defaults for the overrun detection, unless
1966 * the part has explicitly disabled support for it.
1967 */
1968 if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
1969 if (p->type == PORT_SCI)
1970 p->overrun_bit = 5;
1971 else if (p->scbrr_algo_id == SCBRR_ALGO_4)
1972 p->overrun_bit = 9;
1973 else
1974 p->overrun_bit = 0;
1975
1976 /*
1977 * Make the error mask inclusive of overrun detection, if
1978 * supported.
1979 */
1980 p->error_mask |= (1 << p->overrun_bit);
1981 }
1982
1983 sci_port->cfg = p;
1984
1985 port->mapbase = p->mapbase;
1986 port->type = p->type;
1987 port->flags = p->flags;
1988 port->regshift = p->regshift;
1989
1990 /*
1991 * The UART port needs an IRQ value, so we peg this to the RX IRQ
1992 * for the multi-IRQ ports, which is where we are primarily
1993 * concerned with the shutdown path synchronization.
1994 *
1995 * For the muxed case there's nothing more to do.
1996 */
1997 port->irq = p->irqs[SCIx_RXI_IRQ];
1998 port->irqflags = 0;
1999
2000 port->serial_in = sci_serial_in;
2001 port->serial_out = sci_serial_out;
2002
2003 if (p->dma_dev)
2004 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
2005 p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
2006
2007 return 0;
2008 }
2009
2010 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2011 static void serial_console_putchar(struct uart_port *port, int ch)
2012 {
2013 sci_poll_put_char(port, ch);
2014 }
2015
2016 /*
2017 * Print a string to the serial port trying not to disturb
2018 * any possible real use of the port...
2019 */
2020 static void serial_console_write(struct console *co, const char *s,
2021 unsigned count)
2022 {
2023 struct sci_port *sci_port = &sci_ports[co->index];
2024 struct uart_port *port = &sci_port->port;
2025 unsigned short bits;
2026
2027 sci_port_enable(sci_port);
2028
2029 uart_console_write(port, s, count, serial_console_putchar);
2030
2031 /* wait until fifo is empty and last bit has been transmitted */
2032 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
2033 while ((sci_in(port, SCxSR) & bits) != bits)
2034 cpu_relax();
2035
2036 sci_port_disable(sci_port);
2037 }
2038
2039 static int __devinit serial_console_setup(struct console *co, char *options)
2040 {
2041 struct sci_port *sci_port;
2042 struct uart_port *port;
2043 int baud = 115200;
2044 int bits = 8;
2045 int parity = 'n';
2046 int flow = 'n';
2047 int ret;
2048
2049 /*
2050 * Refuse to handle any bogus ports.
2051 */
2052 if (co->index < 0 || co->index >= SCI_NPORTS)
2053 return -ENODEV;
2054
2055 sci_port = &sci_ports[co->index];
2056 port = &sci_port->port;
2057
2058 /*
2059 * Refuse to handle uninitialized ports.
2060 */
2061 if (!port->ops)
2062 return -ENODEV;
2063
2064 ret = sci_remap_port(port);
2065 if (unlikely(ret != 0))
2066 return ret;
2067
2068 sci_port_enable(sci_port);
2069
2070 if (options)
2071 uart_parse_options(options, &baud, &parity, &bits, &flow);
2072
2073 sci_port_disable(sci_port);
2074
2075 return uart_set_options(port, co, baud, parity, bits, flow);
2076 }
2077
2078 static struct console serial_console = {
2079 .name = "ttySC",
2080 .device = uart_console_device,
2081 .write = serial_console_write,
2082 .setup = serial_console_setup,
2083 .flags = CON_PRINTBUFFER,
2084 .index = -1,
2085 .data = &sci_uart_driver,
2086 };
2087
2088 static struct console early_serial_console = {
2089 .name = "early_ttySC",
2090 .write = serial_console_write,
2091 .flags = CON_PRINTBUFFER,
2092 .index = -1,
2093 };
2094
2095 static char early_serial_buf[32];
2096
2097 static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
2098 {
2099 struct plat_sci_port *cfg = pdev->dev.platform_data;
2100
2101 if (early_serial_console.data)
2102 return -EEXIST;
2103
2104 early_serial_console.index = pdev->id;
2105
2106 sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
2107
2108 serial_console_setup(&early_serial_console, early_serial_buf);
2109
2110 if (!strstr(early_serial_buf, "keep"))
2111 early_serial_console.flags |= CON_BOOT;
2112
2113 register_console(&early_serial_console);
2114 return 0;
2115 }
2116
2117 #define uart_console(port) ((port)->cons->index == (port)->line)
2118
2119 static int sci_runtime_suspend(struct device *dev)
2120 {
2121 struct sci_port *sci_port = dev_get_drvdata(dev);
2122 struct uart_port *port = &sci_port->port;
2123
2124 if (uart_console(port)) {
2125 sci_port->saved_smr = sci_in(port, SCSMR);
2126 sci_port->saved_brr = sci_in(port, SCBRR);
2127 sci_port->saved_fcr = sci_in(port, SCFCR);
2128 }
2129 return 0;
2130 }
2131
2132 static int sci_runtime_resume(struct device *dev)
2133 {
2134 struct sci_port *sci_port = dev_get_drvdata(dev);
2135 struct uart_port *port = &sci_port->port;
2136
2137 if (uart_console(port)) {
2138 sci_reset(port);
2139 sci_out(port, SCSMR, sci_port->saved_smr);
2140 sci_out(port, SCBRR, sci_port->saved_brr);
2141 sci_out(port, SCFCR, sci_port->saved_fcr);
2142 sci_out(port, SCSCR, sci_port->cfg->scscr);
2143 }
2144 return 0;
2145 }
2146
2147 #define SCI_CONSOLE (&serial_console)
2148
2149 #else
2150 static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
2151 {
2152 return -EINVAL;
2153 }
2154
2155 #define SCI_CONSOLE NULL
2156 #define sci_runtime_suspend NULL
2157 #define sci_runtime_resume NULL
2158
2159 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2160
2161 static char banner[] __initdata =
2162 KERN_INFO "SuperH SCI(F) driver initialized\n";
2163
2164 static struct uart_driver sci_uart_driver = {
2165 .owner = THIS_MODULE,
2166 .driver_name = "sci",
2167 .dev_name = "ttySC",
2168 .major = SCI_MAJOR,
2169 .minor = SCI_MINOR_START,
2170 .nr = SCI_NPORTS,
2171 .cons = SCI_CONSOLE,
2172 };
2173
2174 static int sci_remove(struct platform_device *dev)
2175 {
2176 struct sci_port *port = platform_get_drvdata(dev);
2177
2178 cpufreq_unregister_notifier(&port->freq_transition,
2179 CPUFREQ_TRANSITION_NOTIFIER);
2180
2181 uart_remove_one_port(&sci_uart_driver, &port->port);
2182
2183 clk_put(port->iclk);
2184 clk_put(port->fclk);
2185
2186 pm_runtime_disable(&dev->dev);
2187 return 0;
2188 }
2189
2190 static int __devinit sci_probe_single(struct platform_device *dev,
2191 unsigned int index,
2192 struct plat_sci_port *p,
2193 struct sci_port *sciport)
2194 {
2195 int ret;
2196
2197 /* Sanity check */
2198 if (unlikely(index >= SCI_NPORTS)) {
2199 dev_notice(&dev->dev, "Attempting to register port "
2200 "%d when only %d are available.\n",
2201 index+1, SCI_NPORTS);
2202 dev_notice(&dev->dev, "Consider bumping "
2203 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2204 return 0;
2205 }
2206
2207 ret = sci_init_single(dev, sciport, index, p);
2208 if (ret)
2209 return ret;
2210
2211 return uart_add_one_port(&sci_uart_driver, &sciport->port);
2212 }
2213
2214 static int __devinit sci_probe(struct platform_device *dev)
2215 {
2216 struct plat_sci_port *p = dev->dev.platform_data;
2217 struct sci_port *sp = &sci_ports[dev->id];
2218 int ret;
2219
2220 /*
2221 * If we've come here via earlyprintk initialization, head off to
2222 * the special early probe. We don't have sufficient device state
2223 * to make it beyond this yet.
2224 */
2225 if (is_early_platform_device(dev))
2226 return sci_probe_earlyprintk(dev);
2227
2228 platform_set_drvdata(dev, sp);
2229
2230 ret = sci_probe_single(dev, dev->id, p, sp);
2231 if (ret)
2232 goto err_unreg;
2233
2234 sp->freq_transition.notifier_call = sci_notifier;
2235
2236 ret = cpufreq_register_notifier(&sp->freq_transition,
2237 CPUFREQ_TRANSITION_NOTIFIER);
2238 if (unlikely(ret < 0))
2239 goto err_unreg;
2240
2241 #ifdef CONFIG_SH_STANDARD_BIOS
2242 sh_bios_gdb_detach();
2243 #endif
2244
2245 return 0;
2246
2247 err_unreg:
2248 sci_remove(dev);
2249 return ret;
2250 }
2251
2252 static int sci_suspend(struct device *dev)
2253 {
2254 struct sci_port *sport = dev_get_drvdata(dev);
2255
2256 if (sport)
2257 uart_suspend_port(&sci_uart_driver, &sport->port);
2258
2259 return 0;
2260 }
2261
2262 static int sci_resume(struct device *dev)
2263 {
2264 struct sci_port *sport = dev_get_drvdata(dev);
2265
2266 if (sport)
2267 uart_resume_port(&sci_uart_driver, &sport->port);
2268
2269 return 0;
2270 }
2271
2272 static const struct dev_pm_ops sci_dev_pm_ops = {
2273 .runtime_suspend = sci_runtime_suspend,
2274 .runtime_resume = sci_runtime_resume,
2275 .suspend = sci_suspend,
2276 .resume = sci_resume,
2277 };
2278
2279 static struct platform_driver sci_driver = {
2280 .probe = sci_probe,
2281 .remove = sci_remove,
2282 .driver = {
2283 .name = "sh-sci",
2284 .owner = THIS_MODULE,
2285 .pm = &sci_dev_pm_ops,
2286 },
2287 };
2288
2289 static int __init sci_init(void)
2290 {
2291 int ret;
2292
2293 printk(banner);
2294
2295 ret = uart_register_driver(&sci_uart_driver);
2296 if (likely(ret == 0)) {
2297 ret = platform_driver_register(&sci_driver);
2298 if (unlikely(ret))
2299 uart_unregister_driver(&sci_uart_driver);
2300 }
2301
2302 return ret;
2303 }
2304
2305 static void __exit sci_exit(void)
2306 {
2307 platform_driver_unregister(&sci_driver);
2308 uart_unregister_driver(&sci_uart_driver);
2309 }
2310
2311 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2312 early_platform_init_buffer("earlyprintk", &sci_driver,
2313 early_serial_buf, ARRAY_SIZE(early_serial_buf));
2314 #endif
2315 module_init(sci_init);
2316 module_exit(sci_exit);
2317
2318 MODULE_LICENSE("GPL");
2319 MODULE_ALIAS("platform:sh-sci");
2320 MODULE_AUTHOR("Paul Mundt");
2321 MODULE_DESCRIPTION("SuperH SCI(F) serial driver");
This page took 0.242074 seconds and 5 git commands to generate.