2 * Driver for Atmel AT91 / AT32 Serial ports
3 * Copyright (C) 2003 Rick Bronson
5 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
6 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8 * DMA support added by Chip Coldwell.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/tty.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/serial.h>
30 #include <linux/clk.h>
31 #include <linux/console.h>
32 #include <linux/sysrq.h>
33 #include <linux/tty_flip.h>
34 #include <linux/platform_device.h>
36 #include <linux/of_device.h>
37 #include <linux/of_gpio.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmaengine.h>
40 #include <linux/atmel_pdc.h>
41 #include <linux/atmel_serial.h>
42 #include <linux/uaccess.h>
43 #include <linux/platform_data/atmel.h>
44 #include <linux/timer.h>
45 #include <linux/gpio.h>
46 #include <linux/gpio/consumer.h>
47 #include <linux/err.h>
48 #include <linux/irq.h>
49 #include <linux/suspend.h>
52 #include <asm/ioctls.h>
54 #define PDC_BUFFER_SIZE 512
55 /* Revisit: We should calculate this based on the actual port settings */
56 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
58 /* The minium number of data FIFOs should be able to contain */
59 #define ATMEL_MIN_FIFO_SIZE 8
61 * These two offsets are substracted from the RX FIFO size to define the RTS
62 * high and low thresholds
64 #define ATMEL_RTS_HIGH_OFFSET 16
65 #define ATMEL_RTS_LOW_OFFSET 20
67 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
71 #include <linux/serial_core.h>
73 #include "serial_mctrl_gpio.h"
75 static void atmel_start_rx(struct uart_port
*port
);
76 static void atmel_stop_rx(struct uart_port
*port
);
78 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
80 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
81 * should coexist with the 8250 driver, such as if we have an external 16C550
83 #define SERIAL_ATMEL_MAJOR 204
84 #define MINOR_START 154
85 #define ATMEL_DEVICENAME "ttyAT"
89 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
90 * name, but it is legally reserved for the 8250 driver. */
91 #define SERIAL_ATMEL_MAJOR TTY_MAJOR
92 #define MINOR_START 64
93 #define ATMEL_DEVICENAME "ttyS"
97 #define ATMEL_ISR_PASS_LIMIT 256
99 struct atmel_dma_buffer
{
102 unsigned int dma_size
;
106 struct atmel_uart_char
{
112 * Be careful, the real size of the ring buffer is
113 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
114 * can contain up to 1024 characters in PIO mode and up to 4096 characters in
117 #define ATMEL_SERIAL_RINGSIZE 1024
120 * at91: 6 USARTs and one DBGU port (SAM9260)
123 #define ATMEL_MAX_UART 7
126 * We wrap our port structure around the generic uart_port.
128 struct atmel_uart_port
{
129 struct uart_port uart
; /* uart */
130 struct clk
*clk
; /* uart clock */
131 int may_wakeup
; /* cached value of device_may_wakeup for times we need to disable it */
132 u32 backup_imr
; /* IMR saved during suspend */
133 int break_active
; /* break being received */
135 bool use_dma_rx
; /* enable DMA receiver */
136 bool use_pdc_rx
; /* enable PDC receiver */
137 short pdc_rx_idx
; /* current PDC RX buffer */
138 struct atmel_dma_buffer pdc_rx
[2]; /* PDC receier */
140 bool use_dma_tx
; /* enable DMA transmitter */
141 bool use_pdc_tx
; /* enable PDC transmitter */
142 struct atmel_dma_buffer pdc_tx
; /* PDC transmitter */
144 spinlock_t lock_tx
; /* port lock */
145 spinlock_t lock_rx
; /* port lock */
146 struct dma_chan
*chan_tx
;
147 struct dma_chan
*chan_rx
;
148 struct dma_async_tx_descriptor
*desc_tx
;
149 struct dma_async_tx_descriptor
*desc_rx
;
150 dma_cookie_t cookie_tx
;
151 dma_cookie_t cookie_rx
;
152 struct scatterlist sg_tx
;
153 struct scatterlist sg_rx
;
154 struct tasklet_struct tasklet_rx
;
155 struct tasklet_struct tasklet_tx
;
156 atomic_t tasklet_shutdown
;
157 unsigned int irq_status_prev
;
160 struct circ_buf rx_ring
;
162 struct mctrl_gpios
*gpios
;
163 unsigned int tx_done_mask
;
168 u32 rtor
; /* address of receiver timeout register if it exists */
170 struct timer_list uart_timer
;
173 unsigned int pending
;
174 unsigned int pending_status
;
175 spinlock_t lock_suspended
;
177 int (*prepare_rx
)(struct uart_port
*port
);
178 int (*prepare_tx
)(struct uart_port
*port
);
179 void (*schedule_rx
)(struct uart_port
*port
);
180 void (*schedule_tx
)(struct uart_port
*port
);
181 void (*release_rx
)(struct uart_port
*port
);
182 void (*release_tx
)(struct uart_port
*port
);
185 static struct atmel_uart_port atmel_ports
[ATMEL_MAX_UART
];
186 static DECLARE_BITMAP(atmel_ports_in_use
, ATMEL_MAX_UART
);
189 static struct console atmel_console
;
192 #if defined(CONFIG_OF)
193 static const struct of_device_id atmel_serial_dt_ids
[] = {
194 { .compatible
= "atmel,at91rm9200-usart" },
195 { .compatible
= "atmel,at91sam9260-usart" },
200 static inline struct atmel_uart_port
*
201 to_atmel_uart_port(struct uart_port
*uart
)
203 return container_of(uart
, struct atmel_uart_port
, uart
);
206 static inline u32
atmel_uart_readl(struct uart_port
*port
, u32 reg
)
208 return __raw_readl(port
->membase
+ reg
);
211 static inline void atmel_uart_writel(struct uart_port
*port
, u32 reg
, u32 value
)
213 __raw_writel(value
, port
->membase
+ reg
);
218 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
219 static inline u8
atmel_uart_read_char(struct uart_port
*port
)
221 return __raw_readl(port
->membase
+ ATMEL_US_RHR
);
224 static inline void atmel_uart_write_char(struct uart_port
*port
, u8 value
)
226 __raw_writel(value
, port
->membase
+ ATMEL_US_THR
);
231 static inline u8
atmel_uart_read_char(struct uart_port
*port
)
233 return __raw_readb(port
->membase
+ ATMEL_US_RHR
);
236 static inline void atmel_uart_write_char(struct uart_port
*port
, u8 value
)
238 __raw_writeb(value
, port
->membase
+ ATMEL_US_THR
);
243 #ifdef CONFIG_SERIAL_ATMEL_PDC
244 static bool atmel_use_pdc_rx(struct uart_port
*port
)
246 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
248 return atmel_port
->use_pdc_rx
;
251 static bool atmel_use_pdc_tx(struct uart_port
*port
)
253 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
255 return atmel_port
->use_pdc_tx
;
258 static bool atmel_use_pdc_rx(struct uart_port
*port
)
263 static bool atmel_use_pdc_tx(struct uart_port
*port
)
269 static bool atmel_use_dma_tx(struct uart_port
*port
)
271 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
273 return atmel_port
->use_dma_tx
;
276 static bool atmel_use_dma_rx(struct uart_port
*port
)
278 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
280 return atmel_port
->use_dma_rx
;
283 static bool atmel_use_fifo(struct uart_port
*port
)
285 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
287 return atmel_port
->fifo_size
;
290 static void atmel_tasklet_schedule(struct atmel_uart_port
*atmel_port
,
291 struct tasklet_struct
*t
)
293 if (!atomic_read(&atmel_port
->tasklet_shutdown
))
297 static unsigned int atmel_get_lines_status(struct uart_port
*port
)
299 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
300 unsigned int status
, ret
= 0;
302 status
= atmel_uart_readl(port
, ATMEL_US_CSR
);
304 mctrl_gpio_get(atmel_port
->gpios
, &ret
);
306 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port
->gpios
,
309 status
&= ~ATMEL_US_CTS
;
311 status
|= ATMEL_US_CTS
;
314 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port
->gpios
,
317 status
&= ~ATMEL_US_DSR
;
319 status
|= ATMEL_US_DSR
;
322 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port
->gpios
,
325 status
&= ~ATMEL_US_RI
;
327 status
|= ATMEL_US_RI
;
330 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port
->gpios
,
333 status
&= ~ATMEL_US_DCD
;
335 status
|= ATMEL_US_DCD
;
341 /* Enable or disable the rs485 support */
342 static int atmel_config_rs485(struct uart_port
*port
,
343 struct serial_rs485
*rs485conf
)
345 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
348 /* Disable interrupts */
349 atmel_uart_writel(port
, ATMEL_US_IDR
, atmel_port
->tx_done_mask
);
351 mode
= atmel_uart_readl(port
, ATMEL_US_MR
);
353 /* Resetting serial mode to RS232 (0x0) */
354 mode
&= ~ATMEL_US_USMODE
;
356 port
->rs485
= *rs485conf
;
358 if (rs485conf
->flags
& SER_RS485_ENABLED
) {
359 dev_dbg(port
->dev
, "Setting UART to RS485\n");
360 atmel_port
->tx_done_mask
= ATMEL_US_TXEMPTY
;
361 atmel_uart_writel(port
, ATMEL_US_TTGR
,
362 rs485conf
->delay_rts_after_send
);
363 mode
|= ATMEL_US_USMODE_RS485
;
365 dev_dbg(port
->dev
, "Setting UART to RS232\n");
366 if (atmel_use_pdc_tx(port
))
367 atmel_port
->tx_done_mask
= ATMEL_US_ENDTX
|
370 atmel_port
->tx_done_mask
= ATMEL_US_TXRDY
;
372 atmel_uart_writel(port
, ATMEL_US_MR
, mode
);
374 /* Enable interrupts */
375 atmel_uart_writel(port
, ATMEL_US_IER
, atmel_port
->tx_done_mask
);
381 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
383 static u_int
atmel_tx_empty(struct uart_port
*port
)
385 return (atmel_uart_readl(port
, ATMEL_US_CSR
) & ATMEL_US_TXEMPTY
) ?
391 * Set state of the modem control output lines
393 static void atmel_set_mctrl(struct uart_port
*port
, u_int mctrl
)
395 unsigned int control
= 0;
396 unsigned int mode
= atmel_uart_readl(port
, ATMEL_US_MR
);
397 unsigned int rts_paused
, rts_ready
;
398 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
400 /* override mode to RS485 if needed, otherwise keep the current mode */
401 if (port
->rs485
.flags
& SER_RS485_ENABLED
) {
402 atmel_uart_writel(port
, ATMEL_US_TTGR
,
403 port
->rs485
.delay_rts_after_send
);
404 mode
&= ~ATMEL_US_USMODE
;
405 mode
|= ATMEL_US_USMODE_RS485
;
408 /* set the RTS line state according to the mode */
409 if ((mode
& ATMEL_US_USMODE
) == ATMEL_US_USMODE_HWHS
) {
410 /* force RTS line to high level */
411 rts_paused
= ATMEL_US_RTSEN
;
413 /* give the control of the RTS line back to the hardware */
414 rts_ready
= ATMEL_US_RTSDIS
;
416 /* force RTS line to high level */
417 rts_paused
= ATMEL_US_RTSDIS
;
419 /* force RTS line to low level */
420 rts_ready
= ATMEL_US_RTSEN
;
423 if (mctrl
& TIOCM_RTS
)
424 control
|= rts_ready
;
426 control
|= rts_paused
;
428 if (mctrl
& TIOCM_DTR
)
429 control
|= ATMEL_US_DTREN
;
431 control
|= ATMEL_US_DTRDIS
;
433 atmel_uart_writel(port
, ATMEL_US_CR
, control
);
435 mctrl_gpio_set(atmel_port
->gpios
, mctrl
);
437 /* Local loopback mode? */
438 mode
&= ~ATMEL_US_CHMODE
;
439 if (mctrl
& TIOCM_LOOP
)
440 mode
|= ATMEL_US_CHMODE_LOC_LOOP
;
442 mode
|= ATMEL_US_CHMODE_NORMAL
;
444 atmel_uart_writel(port
, ATMEL_US_MR
, mode
);
448 * Get state of the modem control input lines
450 static u_int
atmel_get_mctrl(struct uart_port
*port
)
452 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
453 unsigned int ret
= 0, status
;
455 status
= atmel_uart_readl(port
, ATMEL_US_CSR
);
458 * The control signals are active low.
460 if (!(status
& ATMEL_US_DCD
))
462 if (!(status
& ATMEL_US_CTS
))
464 if (!(status
& ATMEL_US_DSR
))
466 if (!(status
& ATMEL_US_RI
))
469 return mctrl_gpio_get(atmel_port
->gpios
, &ret
);
475 static void atmel_stop_tx(struct uart_port
*port
)
477 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
479 if (atmel_use_pdc_tx(port
)) {
480 /* disable PDC transmit */
481 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_TXTDIS
);
483 /* Disable interrupts */
484 atmel_uart_writel(port
, ATMEL_US_IDR
, atmel_port
->tx_done_mask
);
486 if ((port
->rs485
.flags
& SER_RS485_ENABLED
) &&
487 !(port
->rs485
.flags
& SER_RS485_RX_DURING_TX
))
488 atmel_start_rx(port
);
492 * Start transmitting.
494 static void atmel_start_tx(struct uart_port
*port
)
496 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
498 if (atmel_use_pdc_tx(port
) && (atmel_uart_readl(port
, ATMEL_PDC_PTSR
)
500 /* The transmitter is already running. Yes, we
504 if (atmel_use_pdc_tx(port
) || atmel_use_dma_tx(port
))
505 if ((port
->rs485
.flags
& SER_RS485_ENABLED
) &&
506 !(port
->rs485
.flags
& SER_RS485_RX_DURING_TX
))
509 if (atmel_use_pdc_tx(port
))
510 /* re-enable PDC transmit */
511 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_TXTEN
);
513 /* Enable interrupts */
514 atmel_uart_writel(port
, ATMEL_US_IER
, atmel_port
->tx_done_mask
);
518 * start receiving - port is in process of being opened.
520 static void atmel_start_rx(struct uart_port
*port
)
522 /* reset status and receiver */
523 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
);
525 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RXEN
);
527 if (atmel_use_pdc_rx(port
)) {
528 /* enable PDC controller */
529 atmel_uart_writel(port
, ATMEL_US_IER
,
530 ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT
|
531 port
->read_status_mask
);
532 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_RXTEN
);
534 atmel_uart_writel(port
, ATMEL_US_IER
, ATMEL_US_RXRDY
);
539 * Stop receiving - port is in process of being closed.
541 static void atmel_stop_rx(struct uart_port
*port
)
543 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RXDIS
);
545 if (atmel_use_pdc_rx(port
)) {
546 /* disable PDC receive */
547 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_RXTDIS
);
548 atmel_uart_writel(port
, ATMEL_US_IDR
,
549 ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT
|
550 port
->read_status_mask
);
552 atmel_uart_writel(port
, ATMEL_US_IDR
, ATMEL_US_RXRDY
);
557 * Enable modem status interrupts
559 static void atmel_enable_ms(struct uart_port
*port
)
561 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
565 * Interrupt should not be enabled twice
567 if (atmel_port
->ms_irq_enabled
)
570 atmel_port
->ms_irq_enabled
= true;
572 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_CTS
))
573 ier
|= ATMEL_US_CTSIC
;
575 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_DSR
))
576 ier
|= ATMEL_US_DSRIC
;
578 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_RI
))
579 ier
|= ATMEL_US_RIIC
;
581 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_DCD
))
582 ier
|= ATMEL_US_DCDIC
;
584 atmel_uart_writel(port
, ATMEL_US_IER
, ier
);
586 mctrl_gpio_enable_ms(atmel_port
->gpios
);
590 * Disable modem status interrupts
592 static void atmel_disable_ms(struct uart_port
*port
)
594 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
598 * Interrupt should not be disabled twice
600 if (!atmel_port
->ms_irq_enabled
)
603 atmel_port
->ms_irq_enabled
= false;
605 mctrl_gpio_disable_ms(atmel_port
->gpios
);
607 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_CTS
))
608 idr
|= ATMEL_US_CTSIC
;
610 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_DSR
))
611 idr
|= ATMEL_US_DSRIC
;
613 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_RI
))
614 idr
|= ATMEL_US_RIIC
;
616 if (!mctrl_gpio_to_gpiod(atmel_port
->gpios
, UART_GPIO_DCD
))
617 idr
|= ATMEL_US_DCDIC
;
619 atmel_uart_writel(port
, ATMEL_US_IDR
, idr
);
623 * Control the transmission of a break signal
625 static void atmel_break_ctl(struct uart_port
*port
, int break_state
)
627 if (break_state
!= 0)
629 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_STTBRK
);
632 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_STPBRK
);
636 * Stores the incoming character in the ring buffer
639 atmel_buffer_rx_char(struct uart_port
*port
, unsigned int status
,
642 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
643 struct circ_buf
*ring
= &atmel_port
->rx_ring
;
644 struct atmel_uart_char
*c
;
646 if (!CIRC_SPACE(ring
->head
, ring
->tail
, ATMEL_SERIAL_RINGSIZE
))
647 /* Buffer overflow, ignore char */
650 c
= &((struct atmel_uart_char
*)ring
->buf
)[ring
->head
];
654 /* Make sure the character is stored before we update head. */
657 ring
->head
= (ring
->head
+ 1) & (ATMEL_SERIAL_RINGSIZE
- 1);
661 * Deal with parity, framing and overrun errors.
663 static void atmel_pdc_rxerr(struct uart_port
*port
, unsigned int status
)
666 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
);
668 if (status
& ATMEL_US_RXBRK
) {
669 /* ignore side-effect */
670 status
&= ~(ATMEL_US_PARE
| ATMEL_US_FRAME
);
673 if (status
& ATMEL_US_PARE
)
674 port
->icount
.parity
++;
675 if (status
& ATMEL_US_FRAME
)
676 port
->icount
.frame
++;
677 if (status
& ATMEL_US_OVRE
)
678 port
->icount
.overrun
++;
682 * Characters received (called from interrupt handler)
684 static void atmel_rx_chars(struct uart_port
*port
)
686 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
687 unsigned int status
, ch
;
689 status
= atmel_uart_readl(port
, ATMEL_US_CSR
);
690 while (status
& ATMEL_US_RXRDY
) {
691 ch
= atmel_uart_read_char(port
);
694 * note that the error handling code is
695 * out of the main execution path
697 if (unlikely(status
& (ATMEL_US_PARE
| ATMEL_US_FRAME
698 | ATMEL_US_OVRE
| ATMEL_US_RXBRK
)
699 || atmel_port
->break_active
)) {
702 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
);
704 if (status
& ATMEL_US_RXBRK
705 && !atmel_port
->break_active
) {
706 atmel_port
->break_active
= 1;
707 atmel_uart_writel(port
, ATMEL_US_IER
,
711 * This is either the end-of-break
712 * condition or we've received at
713 * least one character without RXBRK
714 * being set. In both cases, the next
715 * RXBRK will indicate start-of-break.
717 atmel_uart_writel(port
, ATMEL_US_IDR
,
719 status
&= ~ATMEL_US_RXBRK
;
720 atmel_port
->break_active
= 0;
724 atmel_buffer_rx_char(port
, status
, ch
);
725 status
= atmel_uart_readl(port
, ATMEL_US_CSR
);
728 atmel_tasklet_schedule(atmel_port
, &atmel_port
->tasklet_rx
);
732 * Transmit characters (called from tasklet with TXRDY interrupt
735 static void atmel_tx_chars(struct uart_port
*port
)
737 struct circ_buf
*xmit
= &port
->state
->xmit
;
738 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
741 (atmel_uart_readl(port
, ATMEL_US_CSR
) & atmel_port
->tx_done_mask
)) {
742 atmel_uart_write_char(port
, port
->x_char
);
746 if (uart_circ_empty(xmit
) || uart_tx_stopped(port
))
749 while (atmel_uart_readl(port
, ATMEL_US_CSR
) &
750 atmel_port
->tx_done_mask
) {
751 atmel_uart_write_char(port
, xmit
->buf
[xmit
->tail
]);
752 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
754 if (uart_circ_empty(xmit
))
758 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
759 uart_write_wakeup(port
);
761 if (!uart_circ_empty(xmit
))
762 /* Enable interrupts */
763 atmel_uart_writel(port
, ATMEL_US_IER
,
764 atmel_port
->tx_done_mask
);
767 static void atmel_complete_tx_dma(void *arg
)
769 struct atmel_uart_port
*atmel_port
= arg
;
770 struct uart_port
*port
= &atmel_port
->uart
;
771 struct circ_buf
*xmit
= &port
->state
->xmit
;
772 struct dma_chan
*chan
= atmel_port
->chan_tx
;
775 spin_lock_irqsave(&port
->lock
, flags
);
778 dmaengine_terminate_all(chan
);
779 xmit
->tail
+= atmel_port
->tx_len
;
780 xmit
->tail
&= UART_XMIT_SIZE
- 1;
782 port
->icount
.tx
+= atmel_port
->tx_len
;
784 spin_lock_irq(&atmel_port
->lock_tx
);
785 async_tx_ack(atmel_port
->desc_tx
);
786 atmel_port
->cookie_tx
= -EINVAL
;
787 atmel_port
->desc_tx
= NULL
;
788 spin_unlock_irq(&atmel_port
->lock_tx
);
790 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
791 uart_write_wakeup(port
);
794 * xmit is a circular buffer so, if we have just send data from
795 * xmit->tail to the end of xmit->buf, now we have to transmit the
796 * remaining data from the beginning of xmit->buf to xmit->head.
798 if (!uart_circ_empty(xmit
))
799 atmel_tasklet_schedule(atmel_port
, &atmel_port
->tasklet_tx
);
801 spin_unlock_irqrestore(&port
->lock
, flags
);
804 static void atmel_release_tx_dma(struct uart_port
*port
)
806 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
807 struct dma_chan
*chan
= atmel_port
->chan_tx
;
810 dmaengine_terminate_all(chan
);
811 dma_release_channel(chan
);
812 dma_unmap_sg(port
->dev
, &atmel_port
->sg_tx
, 1,
816 atmel_port
->desc_tx
= NULL
;
817 atmel_port
->chan_tx
= NULL
;
818 atmel_port
->cookie_tx
= -EINVAL
;
822 * Called from tasklet with TXRDY interrupt is disabled.
824 static void atmel_tx_dma(struct uart_port
*port
)
826 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
827 struct circ_buf
*xmit
= &port
->state
->xmit
;
828 struct dma_chan
*chan
= atmel_port
->chan_tx
;
829 struct dma_async_tx_descriptor
*desc
;
830 struct scatterlist sgl
[2], *sg
, *sg_tx
= &atmel_port
->sg_tx
;
831 unsigned int tx_len
, part1_len
, part2_len
, sg_len
;
832 dma_addr_t phys_addr
;
834 /* Make sure we have an idle channel */
835 if (atmel_port
->desc_tx
!= NULL
)
838 if (!uart_circ_empty(xmit
) && !uart_tx_stopped(port
)) {
841 * Port xmit buffer is already mapped,
842 * and it is one page... Just adjust
843 * offsets and lengths. Since it is a circular buffer,
844 * we have to transmit till the end, and then the rest.
845 * Take the port lock to get a
846 * consistent xmit buffer state.
848 tx_len
= CIRC_CNT_TO_END(xmit
->head
,
852 if (atmel_port
->fifo_size
) {
853 /* multi data mode */
854 part1_len
= (tx_len
& ~0x3); /* DWORD access */
855 part2_len
= (tx_len
& 0x3); /* BYTE access */
857 /* single data (legacy) mode */
859 part2_len
= tx_len
; /* BYTE access only */
862 sg_init_table(sgl
, 2);
864 phys_addr
= sg_dma_address(sg_tx
) + xmit
->tail
;
867 sg_dma_address(sg
) = phys_addr
;
868 sg_dma_len(sg
) = part1_len
;
870 phys_addr
+= part1_len
;
875 sg_dma_address(sg
) = phys_addr
;
876 sg_dma_len(sg
) = part2_len
;
880 * save tx_len so atmel_complete_tx_dma() will increase
881 * xmit->tail correctly
883 atmel_port
->tx_len
= tx_len
;
885 desc
= dmaengine_prep_slave_sg(chan
,
892 dev_err(port
->dev
, "Failed to send via dma!\n");
896 dma_sync_sg_for_device(port
->dev
, sg_tx
, 1, DMA_TO_DEVICE
);
898 atmel_port
->desc_tx
= desc
;
899 desc
->callback
= atmel_complete_tx_dma
;
900 desc
->callback_param
= atmel_port
;
901 atmel_port
->cookie_tx
= dmaengine_submit(desc
);
904 if (port
->rs485
.flags
& SER_RS485_ENABLED
) {
905 /* DMA done, stop TX, start RX for RS485 */
906 atmel_start_rx(port
);
910 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
911 uart_write_wakeup(port
);
914 static int atmel_prepare_tx_dma(struct uart_port
*port
)
916 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
918 struct dma_slave_config config
;
922 dma_cap_set(DMA_SLAVE
, mask
);
924 atmel_port
->chan_tx
= dma_request_slave_channel(port
->dev
, "tx");
925 if (atmel_port
->chan_tx
== NULL
)
927 dev_info(port
->dev
, "using %s for tx DMA transfers\n",
928 dma_chan_name(atmel_port
->chan_tx
));
930 spin_lock_init(&atmel_port
->lock_tx
);
931 sg_init_table(&atmel_port
->sg_tx
, 1);
932 /* UART circular tx buffer is an aligned page. */
933 BUG_ON(!PAGE_ALIGNED(port
->state
->xmit
.buf
));
934 sg_set_page(&atmel_port
->sg_tx
,
935 virt_to_page(port
->state
->xmit
.buf
),
937 (unsigned long)port
->state
->xmit
.buf
& ~PAGE_MASK
);
938 nent
= dma_map_sg(port
->dev
,
944 dev_dbg(port
->dev
, "need to release resource of dma\n");
947 dev_dbg(port
->dev
, "%s: mapped %d@%p to %pad\n", __func__
,
948 sg_dma_len(&atmel_port
->sg_tx
),
949 port
->state
->xmit
.buf
,
950 &sg_dma_address(&atmel_port
->sg_tx
));
953 /* Configure the slave DMA */
954 memset(&config
, 0, sizeof(config
));
955 config
.direction
= DMA_MEM_TO_DEV
;
956 config
.dst_addr_width
= (atmel_port
->fifo_size
) ?
957 DMA_SLAVE_BUSWIDTH_4_BYTES
:
958 DMA_SLAVE_BUSWIDTH_1_BYTE
;
959 config
.dst_addr
= port
->mapbase
+ ATMEL_US_THR
;
960 config
.dst_maxburst
= 1;
962 ret
= dmaengine_slave_config(atmel_port
->chan_tx
,
965 dev_err(port
->dev
, "DMA tx slave configuration failed\n");
972 dev_err(port
->dev
, "TX channel not available, switch to pio\n");
973 atmel_port
->use_dma_tx
= 0;
974 if (atmel_port
->chan_tx
)
975 atmel_release_tx_dma(port
);
979 static void atmel_complete_rx_dma(void *arg
)
981 struct uart_port
*port
= arg
;
982 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
984 atmel_tasklet_schedule(atmel_port
, &atmel_port
->tasklet_rx
);
987 static void atmel_release_rx_dma(struct uart_port
*port
)
989 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
990 struct dma_chan
*chan
= atmel_port
->chan_rx
;
993 dmaengine_terminate_all(chan
);
994 dma_release_channel(chan
);
995 dma_unmap_sg(port
->dev
, &atmel_port
->sg_rx
, 1,
999 atmel_port
->desc_rx
= NULL
;
1000 atmel_port
->chan_rx
= NULL
;
1001 atmel_port
->cookie_rx
= -EINVAL
;
1004 static void atmel_rx_from_dma(struct uart_port
*port
)
1006 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1007 struct tty_port
*tport
= &port
->state
->port
;
1008 struct circ_buf
*ring
= &atmel_port
->rx_ring
;
1009 struct dma_chan
*chan
= atmel_port
->chan_rx
;
1010 struct dma_tx_state state
;
1011 enum dma_status dmastat
;
1015 /* Reset the UART timeout early so that we don't miss one */
1016 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_STTTO
);
1017 dmastat
= dmaengine_tx_status(chan
,
1018 atmel_port
->cookie_rx
,
1020 /* Restart a new tasklet if DMA status is error */
1021 if (dmastat
== DMA_ERROR
) {
1022 dev_dbg(port
->dev
, "Get residue error, restart tasklet\n");
1023 atmel_uart_writel(port
, ATMEL_US_IER
, ATMEL_US_TIMEOUT
);
1024 atmel_tasklet_schedule(atmel_port
, &atmel_port
->tasklet_rx
);
1028 /* CPU claims ownership of RX DMA buffer */
1029 dma_sync_sg_for_cpu(port
->dev
,
1035 * ring->head points to the end of data already written by the DMA.
1036 * ring->tail points to the beginning of data to be read by the
1038 * The current transfer size should not be larger than the dma buffer
1041 ring
->head
= sg_dma_len(&atmel_port
->sg_rx
) - state
.residue
;
1042 BUG_ON(ring
->head
> sg_dma_len(&atmel_port
->sg_rx
));
1044 * At this point ring->head may point to the first byte right after the
1045 * last byte of the dma buffer:
1046 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1048 * However ring->tail must always points inside the dma buffer:
1049 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1051 * Since we use a ring buffer, we have to handle the case
1052 * where head is lower than tail. In such a case, we first read from
1053 * tail to the end of the buffer then reset tail.
1055 if (ring
->head
< ring
->tail
) {
1056 count
= sg_dma_len(&atmel_port
->sg_rx
) - ring
->tail
;
1058 tty_insert_flip_string(tport
, ring
->buf
+ ring
->tail
, count
);
1060 port
->icount
.rx
+= count
;
1063 /* Finally we read data from tail to head */
1064 if (ring
->tail
< ring
->head
) {
1065 count
= ring
->head
- ring
->tail
;
1067 tty_insert_flip_string(tport
, ring
->buf
+ ring
->tail
, count
);
1068 /* Wrap ring->head if needed */
1069 if (ring
->head
>= sg_dma_len(&atmel_port
->sg_rx
))
1071 ring
->tail
= ring
->head
;
1072 port
->icount
.rx
+= count
;
1075 /* USART retreives ownership of RX DMA buffer */
1076 dma_sync_sg_for_device(port
->dev
,
1082 * Drop the lock here since it might end up calling
1083 * uart_start(), which takes the lock.
1085 spin_unlock(&port
->lock
);
1086 tty_flip_buffer_push(tport
);
1087 spin_lock(&port
->lock
);
1089 atmel_uart_writel(port
, ATMEL_US_IER
, ATMEL_US_TIMEOUT
);
1092 static int atmel_prepare_rx_dma(struct uart_port
*port
)
1094 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1095 struct dma_async_tx_descriptor
*desc
;
1096 dma_cap_mask_t mask
;
1097 struct dma_slave_config config
;
1098 struct circ_buf
*ring
;
1101 ring
= &atmel_port
->rx_ring
;
1104 dma_cap_set(DMA_CYCLIC
, mask
);
1106 atmel_port
->chan_rx
= dma_request_slave_channel(port
->dev
, "rx");
1107 if (atmel_port
->chan_rx
== NULL
)
1109 dev_info(port
->dev
, "using %s for rx DMA transfers\n",
1110 dma_chan_name(atmel_port
->chan_rx
));
1112 spin_lock_init(&atmel_port
->lock_rx
);
1113 sg_init_table(&atmel_port
->sg_rx
, 1);
1114 /* UART circular rx buffer is an aligned page. */
1115 BUG_ON(!PAGE_ALIGNED(ring
->buf
));
1116 sg_set_page(&atmel_port
->sg_rx
,
1117 virt_to_page(ring
->buf
),
1118 sizeof(struct atmel_uart_char
) * ATMEL_SERIAL_RINGSIZE
,
1119 (unsigned long)ring
->buf
& ~PAGE_MASK
);
1120 nent
= dma_map_sg(port
->dev
,
1126 dev_dbg(port
->dev
, "need to release resource of dma\n");
1129 dev_dbg(port
->dev
, "%s: mapped %d@%p to %pad\n", __func__
,
1130 sg_dma_len(&atmel_port
->sg_rx
),
1132 &sg_dma_address(&atmel_port
->sg_rx
));
1135 /* Configure the slave DMA */
1136 memset(&config
, 0, sizeof(config
));
1137 config
.direction
= DMA_DEV_TO_MEM
;
1138 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
1139 config
.src_addr
= port
->mapbase
+ ATMEL_US_RHR
;
1140 config
.src_maxburst
= 1;
1142 ret
= dmaengine_slave_config(atmel_port
->chan_rx
,
1145 dev_err(port
->dev
, "DMA rx slave configuration failed\n");
1149 * Prepare a cyclic dma transfer, assign 2 descriptors,
1150 * each one is half ring buffer size
1152 desc
= dmaengine_prep_dma_cyclic(atmel_port
->chan_rx
,
1153 sg_dma_address(&atmel_port
->sg_rx
),
1154 sg_dma_len(&atmel_port
->sg_rx
),
1155 sg_dma_len(&atmel_port
->sg_rx
)/2,
1157 DMA_PREP_INTERRUPT
);
1158 desc
->callback
= atmel_complete_rx_dma
;
1159 desc
->callback_param
= port
;
1160 atmel_port
->desc_rx
= desc
;
1161 atmel_port
->cookie_rx
= dmaengine_submit(desc
);
1166 dev_err(port
->dev
, "RX channel not available, switch to pio\n");
1167 atmel_port
->use_dma_rx
= 0;
1168 if (atmel_port
->chan_rx
)
1169 atmel_release_rx_dma(port
);
1173 static void atmel_uart_timer_callback(unsigned long data
)
1175 struct uart_port
*port
= (void *)data
;
1176 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1178 if (!atomic_read(&atmel_port
->tasklet_shutdown
)) {
1179 tasklet_schedule(&atmel_port
->tasklet_rx
);
1180 mod_timer(&atmel_port
->uart_timer
,
1181 jiffies
+ uart_poll_timeout(port
));
1186 * receive interrupt handler.
1189 atmel_handle_receive(struct uart_port
*port
, unsigned int pending
)
1191 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1193 if (atmel_use_pdc_rx(port
)) {
1195 * PDC receive. Just schedule the tasklet and let it
1196 * figure out the details.
1198 * TODO: We're not handling error flags correctly at
1201 if (pending
& (ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT
)) {
1202 atmel_uart_writel(port
, ATMEL_US_IDR
,
1203 (ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT
));
1204 atmel_tasklet_schedule(atmel_port
,
1205 &atmel_port
->tasklet_rx
);
1208 if (pending
& (ATMEL_US_RXBRK
| ATMEL_US_OVRE
|
1209 ATMEL_US_FRAME
| ATMEL_US_PARE
))
1210 atmel_pdc_rxerr(port
, pending
);
1213 if (atmel_use_dma_rx(port
)) {
1214 if (pending
& ATMEL_US_TIMEOUT
) {
1215 atmel_uart_writel(port
, ATMEL_US_IDR
,
1217 atmel_tasklet_schedule(atmel_port
,
1218 &atmel_port
->tasklet_rx
);
1222 /* Interrupt receive */
1223 if (pending
& ATMEL_US_RXRDY
)
1224 atmel_rx_chars(port
);
1225 else if (pending
& ATMEL_US_RXBRK
) {
1227 * End of break detected. If it came along with a
1228 * character, atmel_rx_chars will handle it.
1230 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
);
1231 atmel_uart_writel(port
, ATMEL_US_IDR
, ATMEL_US_RXBRK
);
1232 atmel_port
->break_active
= 0;
1237 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1240 atmel_handle_transmit(struct uart_port
*port
, unsigned int pending
)
1242 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1244 if (pending
& atmel_port
->tx_done_mask
) {
1245 /* Either PDC or interrupt transmission */
1246 atmel_uart_writel(port
, ATMEL_US_IDR
,
1247 atmel_port
->tx_done_mask
);
1248 atmel_tasklet_schedule(atmel_port
, &atmel_port
->tasklet_tx
);
1253 * status flags interrupt handler.
1256 atmel_handle_status(struct uart_port
*port
, unsigned int pending
,
1257 unsigned int status
)
1259 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1260 unsigned int status_change
;
1262 if (pending
& (ATMEL_US_RIIC
| ATMEL_US_DSRIC
| ATMEL_US_DCDIC
1263 | ATMEL_US_CTSIC
)) {
1264 status_change
= status
^ atmel_port
->irq_status_prev
;
1265 atmel_port
->irq_status_prev
= status
;
1267 if (status_change
& (ATMEL_US_RI
| ATMEL_US_DSR
1268 | ATMEL_US_DCD
| ATMEL_US_CTS
)) {
1269 /* TODO: All reads to CSR will clear these interrupts! */
1270 if (status_change
& ATMEL_US_RI
)
1272 if (status_change
& ATMEL_US_DSR
)
1274 if (status_change
& ATMEL_US_DCD
)
1275 uart_handle_dcd_change(port
, !(status
& ATMEL_US_DCD
));
1276 if (status_change
& ATMEL_US_CTS
)
1277 uart_handle_cts_change(port
, !(status
& ATMEL_US_CTS
));
1279 wake_up_interruptible(&port
->state
->port
.delta_msr_wait
);
1287 static irqreturn_t
atmel_interrupt(int irq
, void *dev_id
)
1289 struct uart_port
*port
= dev_id
;
1290 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1291 unsigned int status
, pending
, mask
, pass_counter
= 0;
1293 spin_lock(&atmel_port
->lock_suspended
);
1296 status
= atmel_get_lines_status(port
);
1297 mask
= atmel_uart_readl(port
, ATMEL_US_IMR
);
1298 pending
= status
& mask
;
1302 if (atmel_port
->suspended
) {
1303 atmel_port
->pending
|= pending
;
1304 atmel_port
->pending_status
= status
;
1305 atmel_uart_writel(port
, ATMEL_US_IDR
, mask
);
1310 atmel_handle_receive(port
, pending
);
1311 atmel_handle_status(port
, pending
, status
);
1312 atmel_handle_transmit(port
, pending
);
1313 } while (pass_counter
++ < ATMEL_ISR_PASS_LIMIT
);
1315 spin_unlock(&atmel_port
->lock_suspended
);
1317 return pass_counter
? IRQ_HANDLED
: IRQ_NONE
;
1320 static void atmel_release_tx_pdc(struct uart_port
*port
)
1322 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1323 struct atmel_dma_buffer
*pdc
= &atmel_port
->pdc_tx
;
1325 dma_unmap_single(port
->dev
,
1332 * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1334 static void atmel_tx_pdc(struct uart_port
*port
)
1336 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1337 struct circ_buf
*xmit
= &port
->state
->xmit
;
1338 struct atmel_dma_buffer
*pdc
= &atmel_port
->pdc_tx
;
1341 /* nothing left to transmit? */
1342 if (atmel_uart_readl(port
, ATMEL_PDC_TCR
))
1345 xmit
->tail
+= pdc
->ofs
;
1346 xmit
->tail
&= UART_XMIT_SIZE
- 1;
1348 port
->icount
.tx
+= pdc
->ofs
;
1351 /* more to transmit - setup next transfer */
1353 /* disable PDC transmit */
1354 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_TXTDIS
);
1356 if (!uart_circ_empty(xmit
) && !uart_tx_stopped(port
)) {
1357 dma_sync_single_for_device(port
->dev
,
1362 count
= CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
);
1365 atmel_uart_writel(port
, ATMEL_PDC_TPR
,
1366 pdc
->dma_addr
+ xmit
->tail
);
1367 atmel_uart_writel(port
, ATMEL_PDC_TCR
, count
);
1368 /* re-enable PDC transmit */
1369 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_TXTEN
);
1370 /* Enable interrupts */
1371 atmel_uart_writel(port
, ATMEL_US_IER
,
1372 atmel_port
->tx_done_mask
);
1374 if ((port
->rs485
.flags
& SER_RS485_ENABLED
) &&
1375 !(port
->rs485
.flags
& SER_RS485_RX_DURING_TX
)) {
1376 /* DMA done, stop TX, start RX for RS485 */
1377 atmel_start_rx(port
);
1381 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1382 uart_write_wakeup(port
);
1385 static int atmel_prepare_tx_pdc(struct uart_port
*port
)
1387 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1388 struct atmel_dma_buffer
*pdc
= &atmel_port
->pdc_tx
;
1389 struct circ_buf
*xmit
= &port
->state
->xmit
;
1391 pdc
->buf
= xmit
->buf
;
1392 pdc
->dma_addr
= dma_map_single(port
->dev
,
1396 pdc
->dma_size
= UART_XMIT_SIZE
;
1402 static void atmel_rx_from_ring(struct uart_port
*port
)
1404 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1405 struct circ_buf
*ring
= &atmel_port
->rx_ring
;
1407 unsigned int status
;
1409 while (ring
->head
!= ring
->tail
) {
1410 struct atmel_uart_char c
;
1412 /* Make sure c is loaded after head. */
1415 c
= ((struct atmel_uart_char
*)ring
->buf
)[ring
->tail
];
1417 ring
->tail
= (ring
->tail
+ 1) & (ATMEL_SERIAL_RINGSIZE
- 1);
1424 * note that the error handling code is
1425 * out of the main execution path
1427 if (unlikely(status
& (ATMEL_US_PARE
| ATMEL_US_FRAME
1428 | ATMEL_US_OVRE
| ATMEL_US_RXBRK
))) {
1429 if (status
& ATMEL_US_RXBRK
) {
1430 /* ignore side-effect */
1431 status
&= ~(ATMEL_US_PARE
| ATMEL_US_FRAME
);
1434 if (uart_handle_break(port
))
1437 if (status
& ATMEL_US_PARE
)
1438 port
->icount
.parity
++;
1439 if (status
& ATMEL_US_FRAME
)
1440 port
->icount
.frame
++;
1441 if (status
& ATMEL_US_OVRE
)
1442 port
->icount
.overrun
++;
1444 status
&= port
->read_status_mask
;
1446 if (status
& ATMEL_US_RXBRK
)
1448 else if (status
& ATMEL_US_PARE
)
1450 else if (status
& ATMEL_US_FRAME
)
1455 if (uart_handle_sysrq_char(port
, c
.ch
))
1458 uart_insert_char(port
, status
, ATMEL_US_OVRE
, c
.ch
, flg
);
1462 * Drop the lock here since it might end up calling
1463 * uart_start(), which takes the lock.
1465 spin_unlock(&port
->lock
);
1466 tty_flip_buffer_push(&port
->state
->port
);
1467 spin_lock(&port
->lock
);
1470 static void atmel_release_rx_pdc(struct uart_port
*port
)
1472 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1475 for (i
= 0; i
< 2; i
++) {
1476 struct atmel_dma_buffer
*pdc
= &atmel_port
->pdc_rx
[i
];
1478 dma_unmap_single(port
->dev
,
1486 static void atmel_rx_from_pdc(struct uart_port
*port
)
1488 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1489 struct tty_port
*tport
= &port
->state
->port
;
1490 struct atmel_dma_buffer
*pdc
;
1491 int rx_idx
= atmel_port
->pdc_rx_idx
;
1497 /* Reset the UART timeout early so that we don't miss one */
1498 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_STTTO
);
1500 pdc
= &atmel_port
->pdc_rx
[rx_idx
];
1501 head
= atmel_uart_readl(port
, ATMEL_PDC_RPR
) - pdc
->dma_addr
;
1504 /* If the PDC has switched buffers, RPR won't contain
1505 * any address within the current buffer. Since head
1506 * is unsigned, we just need a one-way comparison to
1509 * In this case, we just need to consume the entire
1510 * buffer and resubmit it for DMA. This will clear the
1511 * ENDRX bit as well, so that we can safely re-enable
1512 * all interrupts below.
1514 head
= min(head
, pdc
->dma_size
);
1516 if (likely(head
!= tail
)) {
1517 dma_sync_single_for_cpu(port
->dev
, pdc
->dma_addr
,
1518 pdc
->dma_size
, DMA_FROM_DEVICE
);
1521 * head will only wrap around when we recycle
1522 * the DMA buffer, and when that happens, we
1523 * explicitly set tail to 0. So head will
1524 * always be greater than tail.
1526 count
= head
- tail
;
1528 tty_insert_flip_string(tport
, pdc
->buf
+ pdc
->ofs
,
1531 dma_sync_single_for_device(port
->dev
, pdc
->dma_addr
,
1532 pdc
->dma_size
, DMA_FROM_DEVICE
);
1534 port
->icount
.rx
+= count
;
1539 * If the current buffer is full, we need to check if
1540 * the next one contains any additional data.
1542 if (head
>= pdc
->dma_size
) {
1544 atmel_uart_writel(port
, ATMEL_PDC_RNPR
, pdc
->dma_addr
);
1545 atmel_uart_writel(port
, ATMEL_PDC_RNCR
, pdc
->dma_size
);
1548 atmel_port
->pdc_rx_idx
= rx_idx
;
1550 } while (head
>= pdc
->dma_size
);
1553 * Drop the lock here since it might end up calling
1554 * uart_start(), which takes the lock.
1556 spin_unlock(&port
->lock
);
1557 tty_flip_buffer_push(tport
);
1558 spin_lock(&port
->lock
);
1560 atmel_uart_writel(port
, ATMEL_US_IER
,
1561 ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT
);
1564 static int atmel_prepare_rx_pdc(struct uart_port
*port
)
1566 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1569 for (i
= 0; i
< 2; i
++) {
1570 struct atmel_dma_buffer
*pdc
= &atmel_port
->pdc_rx
[i
];
1572 pdc
->buf
= kmalloc(PDC_BUFFER_SIZE
, GFP_KERNEL
);
1573 if (pdc
->buf
== NULL
) {
1575 dma_unmap_single(port
->dev
,
1576 atmel_port
->pdc_rx
[0].dma_addr
,
1579 kfree(atmel_port
->pdc_rx
[0].buf
);
1581 atmel_port
->use_pdc_rx
= 0;
1584 pdc
->dma_addr
= dma_map_single(port
->dev
,
1588 pdc
->dma_size
= PDC_BUFFER_SIZE
;
1592 atmel_port
->pdc_rx_idx
= 0;
1594 atmel_uart_writel(port
, ATMEL_PDC_RPR
, atmel_port
->pdc_rx
[0].dma_addr
);
1595 atmel_uart_writel(port
, ATMEL_PDC_RCR
, PDC_BUFFER_SIZE
);
1597 atmel_uart_writel(port
, ATMEL_PDC_RNPR
,
1598 atmel_port
->pdc_rx
[1].dma_addr
);
1599 atmel_uart_writel(port
, ATMEL_PDC_RNCR
, PDC_BUFFER_SIZE
);
1605 * tasklet handling tty stuff outside the interrupt handler.
1607 static void atmel_tasklet_rx_func(unsigned long data
)
1609 struct uart_port
*port
= (struct uart_port
*)data
;
1610 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1612 /* The interrupt handler does not take the lock */
1613 spin_lock(&port
->lock
);
1614 atmel_port
->schedule_rx(port
);
1615 spin_unlock(&port
->lock
);
1618 static void atmel_tasklet_tx_func(unsigned long data
)
1620 struct uart_port
*port
= (struct uart_port
*)data
;
1621 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1623 /* The interrupt handler does not take the lock */
1624 spin_lock(&port
->lock
);
1625 atmel_port
->schedule_tx(port
);
1626 spin_unlock(&port
->lock
);
1629 static void atmel_init_property(struct atmel_uart_port
*atmel_port
,
1630 struct platform_device
*pdev
)
1632 struct device_node
*np
= pdev
->dev
.of_node
;
1633 struct atmel_uart_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1636 /* DMA/PDC usage specification */
1637 if (of_get_property(np
, "atmel,use-dma-rx", NULL
)) {
1638 if (of_get_property(np
, "dmas", NULL
)) {
1639 atmel_port
->use_dma_rx
= true;
1640 atmel_port
->use_pdc_rx
= false;
1642 atmel_port
->use_dma_rx
= false;
1643 atmel_port
->use_pdc_rx
= true;
1646 atmel_port
->use_dma_rx
= false;
1647 atmel_port
->use_pdc_rx
= false;
1650 if (of_get_property(np
, "atmel,use-dma-tx", NULL
)) {
1651 if (of_get_property(np
, "dmas", NULL
)) {
1652 atmel_port
->use_dma_tx
= true;
1653 atmel_port
->use_pdc_tx
= false;
1655 atmel_port
->use_dma_tx
= false;
1656 atmel_port
->use_pdc_tx
= true;
1659 atmel_port
->use_dma_tx
= false;
1660 atmel_port
->use_pdc_tx
= false;
1664 atmel_port
->use_pdc_rx
= pdata
->use_dma_rx
;
1665 atmel_port
->use_pdc_tx
= pdata
->use_dma_tx
;
1666 atmel_port
->use_dma_rx
= false;
1667 atmel_port
->use_dma_tx
= false;
1672 static void atmel_init_rs485(struct uart_port
*port
,
1673 struct platform_device
*pdev
)
1675 struct device_node
*np
= pdev
->dev
.of_node
;
1676 struct atmel_uart_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1679 struct serial_rs485
*rs485conf
= &port
->rs485
;
1681 /* rs485 properties */
1682 if (of_property_read_u32_array(np
, "rs485-rts-delay",
1683 rs485_delay
, 2) == 0) {
1684 rs485conf
->delay_rts_before_send
= rs485_delay
[0];
1685 rs485conf
->delay_rts_after_send
= rs485_delay
[1];
1686 rs485conf
->flags
= 0;
1689 if (of_get_property(np
, "rs485-rx-during-tx", NULL
))
1690 rs485conf
->flags
|= SER_RS485_RX_DURING_TX
;
1692 if (of_get_property(np
, "linux,rs485-enabled-at-boot-time",
1694 rs485conf
->flags
|= SER_RS485_ENABLED
;
1696 port
->rs485
= pdata
->rs485
;
1701 static void atmel_set_ops(struct uart_port
*port
)
1703 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1705 if (atmel_use_dma_rx(port
)) {
1706 atmel_port
->prepare_rx
= &atmel_prepare_rx_dma
;
1707 atmel_port
->schedule_rx
= &atmel_rx_from_dma
;
1708 atmel_port
->release_rx
= &atmel_release_rx_dma
;
1709 } else if (atmel_use_pdc_rx(port
)) {
1710 atmel_port
->prepare_rx
= &atmel_prepare_rx_pdc
;
1711 atmel_port
->schedule_rx
= &atmel_rx_from_pdc
;
1712 atmel_port
->release_rx
= &atmel_release_rx_pdc
;
1714 atmel_port
->prepare_rx
= NULL
;
1715 atmel_port
->schedule_rx
= &atmel_rx_from_ring
;
1716 atmel_port
->release_rx
= NULL
;
1719 if (atmel_use_dma_tx(port
)) {
1720 atmel_port
->prepare_tx
= &atmel_prepare_tx_dma
;
1721 atmel_port
->schedule_tx
= &atmel_tx_dma
;
1722 atmel_port
->release_tx
= &atmel_release_tx_dma
;
1723 } else if (atmel_use_pdc_tx(port
)) {
1724 atmel_port
->prepare_tx
= &atmel_prepare_tx_pdc
;
1725 atmel_port
->schedule_tx
= &atmel_tx_pdc
;
1726 atmel_port
->release_tx
= &atmel_release_tx_pdc
;
1728 atmel_port
->prepare_tx
= NULL
;
1729 atmel_port
->schedule_tx
= &atmel_tx_chars
;
1730 atmel_port
->release_tx
= NULL
;
1735 * Get ip name usart or uart
1737 static void atmel_get_ip_name(struct uart_port
*port
)
1739 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1740 int name
= atmel_uart_readl(port
, ATMEL_US_NAME
);
1742 u32 usart
, dbgu_uart
, new_uart
;
1743 /* ASCII decoding for IP version */
1744 usart
= 0x55534152; /* USAR(T) */
1745 dbgu_uart
= 0x44424755; /* DBGU */
1746 new_uart
= 0x55415254; /* UART */
1748 atmel_port
->has_hw_timer
= false;
1750 if (name
== new_uart
) {
1751 dev_dbg(port
->dev
, "Uart with hw timer");
1752 atmel_port
->has_hw_timer
= true;
1753 atmel_port
->rtor
= ATMEL_UA_RTOR
;
1754 } else if (name
== usart
) {
1755 dev_dbg(port
->dev
, "Usart\n");
1756 atmel_port
->has_hw_timer
= true;
1757 atmel_port
->rtor
= ATMEL_US_RTOR
;
1758 } else if (name
== dbgu_uart
) {
1759 dev_dbg(port
->dev
, "Dbgu or uart without hw timer\n");
1761 /* fallback for older SoCs: use version field */
1762 version
= atmel_uart_readl(port
, ATMEL_US_VERSION
);
1766 dev_dbg(port
->dev
, "This version is usart\n");
1767 atmel_port
->has_hw_timer
= true;
1768 atmel_port
->rtor
= ATMEL_US_RTOR
;
1772 dev_dbg(port
->dev
, "This version is uart\n");
1775 dev_err(port
->dev
, "Not supported ip name nor version, set to uart\n");
1781 * Perform initialization and enable port for reception
1783 static int atmel_startup(struct uart_port
*port
)
1785 struct platform_device
*pdev
= to_platform_device(port
->dev
);
1786 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1787 struct tty_struct
*tty
= port
->state
->port
.tty
;
1791 * Ensure that no interrupts are enabled otherwise when
1792 * request_irq() is called we could get stuck trying to
1793 * handle an unexpected interrupt
1795 atmel_uart_writel(port
, ATMEL_US_IDR
, -1);
1796 atmel_port
->ms_irq_enabled
= false;
1801 retval
= request_irq(port
->irq
, atmel_interrupt
,
1802 IRQF_SHARED
| IRQF_COND_SUSPEND
,
1803 tty
? tty
->name
: "atmel_serial", port
);
1805 dev_err(port
->dev
, "atmel_startup - Can't get irq\n");
1809 atomic_set(&atmel_port
->tasklet_shutdown
, 0);
1810 tasklet_init(&atmel_port
->tasklet_rx
, atmel_tasklet_rx_func
,
1811 (unsigned long)port
);
1812 tasklet_init(&atmel_port
->tasklet_tx
, atmel_tasklet_tx_func
,
1813 (unsigned long)port
);
1816 * Initialize DMA (if necessary)
1818 atmel_init_property(atmel_port
, pdev
);
1819 atmel_set_ops(port
);
1821 if (atmel_port
->prepare_rx
) {
1822 retval
= atmel_port
->prepare_rx(port
);
1824 atmel_set_ops(port
);
1827 if (atmel_port
->prepare_tx
) {
1828 retval
= atmel_port
->prepare_tx(port
);
1830 atmel_set_ops(port
);
1834 * Enable FIFO when available
1836 if (atmel_port
->fifo_size
) {
1837 unsigned int txrdym
= ATMEL_US_ONE_DATA
;
1838 unsigned int rxrdym
= ATMEL_US_ONE_DATA
;
1841 atmel_uart_writel(port
, ATMEL_US_CR
,
1846 if (atmel_use_dma_tx(port
))
1847 txrdym
= ATMEL_US_FOUR_DATA
;
1849 fmr
= ATMEL_US_TXRDYM(txrdym
) | ATMEL_US_RXRDYM(rxrdym
);
1850 if (atmel_port
->rts_high
&&
1851 atmel_port
->rts_low
)
1852 fmr
|= ATMEL_US_FRTSC
|
1853 ATMEL_US_RXFTHRES(atmel_port
->rts_high
) |
1854 ATMEL_US_RXFTHRES2(atmel_port
->rts_low
);
1856 atmel_uart_writel(port
, ATMEL_US_FMR
, fmr
);
1859 /* Save current CSR for comparison in atmel_tasklet_func() */
1860 atmel_port
->irq_status_prev
= atmel_get_lines_status(port
);
1863 * Finally, enable the serial port
1865 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
| ATMEL_US_RSTRX
);
1866 /* enable xmit & rcvr */
1867 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_TXEN
| ATMEL_US_RXEN
);
1869 setup_timer(&atmel_port
->uart_timer
,
1870 atmel_uart_timer_callback
,
1871 (unsigned long)port
);
1873 if (atmel_use_pdc_rx(port
)) {
1874 /* set UART timeout */
1875 if (!atmel_port
->has_hw_timer
) {
1876 mod_timer(&atmel_port
->uart_timer
,
1877 jiffies
+ uart_poll_timeout(port
));
1878 /* set USART timeout */
1880 atmel_uart_writel(port
, atmel_port
->rtor
,
1882 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_STTTO
);
1884 atmel_uart_writel(port
, ATMEL_US_IER
,
1885 ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT
);
1887 /* enable PDC controller */
1888 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_RXTEN
);
1889 } else if (atmel_use_dma_rx(port
)) {
1890 /* set UART timeout */
1891 if (!atmel_port
->has_hw_timer
) {
1892 mod_timer(&atmel_port
->uart_timer
,
1893 jiffies
+ uart_poll_timeout(port
));
1894 /* set USART timeout */
1896 atmel_uart_writel(port
, atmel_port
->rtor
,
1898 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_STTTO
);
1900 atmel_uart_writel(port
, ATMEL_US_IER
,
1904 /* enable receive only */
1905 atmel_uart_writel(port
, ATMEL_US_IER
, ATMEL_US_RXRDY
);
1912 * Flush any TX data submitted for DMA. Called when the TX circular
1915 static void atmel_flush_buffer(struct uart_port
*port
)
1917 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1919 if (atmel_use_pdc_tx(port
)) {
1920 atmel_uart_writel(port
, ATMEL_PDC_TCR
, 0);
1921 atmel_port
->pdc_tx
.ofs
= 0;
1928 static void atmel_shutdown(struct uart_port
*port
)
1930 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1932 /* Disable interrupts at device level */
1933 atmel_uart_writel(port
, ATMEL_US_IDR
, -1);
1935 /* Prevent spurious interrupts from scheduling the tasklet */
1936 atomic_inc(&atmel_port
->tasklet_shutdown
);
1939 * Prevent any tasklets being scheduled during
1942 del_timer_sync(&atmel_port
->uart_timer
);
1944 /* Make sure that no interrupt is on the fly */
1945 synchronize_irq(port
->irq
);
1948 * Clear out any scheduled tasklets before
1949 * we destroy the buffers
1951 tasklet_kill(&atmel_port
->tasklet_rx
);
1952 tasklet_kill(&atmel_port
->tasklet_tx
);
1955 * Ensure everything is stopped and
1956 * disable port and break condition.
1958 atmel_stop_rx(port
);
1959 atmel_stop_tx(port
);
1961 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
);
1964 * Shut-down the DMA.
1966 if (atmel_port
->release_rx
)
1967 atmel_port
->release_rx(port
);
1968 if (atmel_port
->release_tx
)
1969 atmel_port
->release_tx(port
);
1972 * Reset ring buffer pointers
1974 atmel_port
->rx_ring
.head
= 0;
1975 atmel_port
->rx_ring
.tail
= 0;
1978 * Free the interrupts
1980 free_irq(port
->irq
, port
);
1982 atmel_port
->ms_irq_enabled
= false;
1984 atmel_flush_buffer(port
);
1988 * Power / Clock management.
1990 static void atmel_serial_pm(struct uart_port
*port
, unsigned int state
,
1991 unsigned int oldstate
)
1993 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
1998 * Enable the peripheral clock for this serial port.
1999 * This is called on uart_open() or a resume event.
2001 clk_prepare_enable(atmel_port
->clk
);
2003 /* re-enable interrupts if we disabled some on suspend */
2004 atmel_uart_writel(port
, ATMEL_US_IER
, atmel_port
->backup_imr
);
2007 /* Back up the interrupt mask and disable all interrupts */
2008 atmel_port
->backup_imr
= atmel_uart_readl(port
, ATMEL_US_IMR
);
2009 atmel_uart_writel(port
, ATMEL_US_IDR
, -1);
2012 * Disable the peripheral clock for this serial port.
2013 * This is called on uart_close() or a suspend event.
2015 clk_disable_unprepare(atmel_port
->clk
);
2018 dev_err(port
->dev
, "atmel_serial: unknown pm %d\n", state
);
2023 * Change the port parameters
2025 static void atmel_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
2026 struct ktermios
*old
)
2028 unsigned long flags
;
2029 unsigned int old_mode
, mode
, imr
, quot
, baud
;
2031 /* save the current mode register */
2032 mode
= old_mode
= atmel_uart_readl(port
, ATMEL_US_MR
);
2034 /* reset the mode, clock divisor, parity, stop bits and data size */
2035 mode
&= ~(ATMEL_US_USCLKS
| ATMEL_US_CHRL
| ATMEL_US_NBSTOP
|
2036 ATMEL_US_PAR
| ATMEL_US_USMODE
);
2038 baud
= uart_get_baud_rate(port
, termios
, old
, 0, port
->uartclk
/ 16);
2039 quot
= uart_get_divisor(port
, baud
);
2041 if (quot
> 65535) { /* BRGR is 16-bit, so switch to slower clock */
2043 mode
|= ATMEL_US_USCLKS_MCK_DIV8
;
2047 switch (termios
->c_cflag
& CSIZE
) {
2049 mode
|= ATMEL_US_CHRL_5
;
2052 mode
|= ATMEL_US_CHRL_6
;
2055 mode
|= ATMEL_US_CHRL_7
;
2058 mode
|= ATMEL_US_CHRL_8
;
2063 if (termios
->c_cflag
& CSTOPB
)
2064 mode
|= ATMEL_US_NBSTOP_2
;
2067 if (termios
->c_cflag
& PARENB
) {
2068 /* Mark or Space parity */
2069 if (termios
->c_cflag
& CMSPAR
) {
2070 if (termios
->c_cflag
& PARODD
)
2071 mode
|= ATMEL_US_PAR_MARK
;
2073 mode
|= ATMEL_US_PAR_SPACE
;
2074 } else if (termios
->c_cflag
& PARODD
)
2075 mode
|= ATMEL_US_PAR_ODD
;
2077 mode
|= ATMEL_US_PAR_EVEN
;
2079 mode
|= ATMEL_US_PAR_NONE
;
2081 spin_lock_irqsave(&port
->lock
, flags
);
2083 port
->read_status_mask
= ATMEL_US_OVRE
;
2084 if (termios
->c_iflag
& INPCK
)
2085 port
->read_status_mask
|= (ATMEL_US_FRAME
| ATMEL_US_PARE
);
2086 if (termios
->c_iflag
& (IGNBRK
| BRKINT
| PARMRK
))
2087 port
->read_status_mask
|= ATMEL_US_RXBRK
;
2089 if (atmel_use_pdc_rx(port
))
2090 /* need to enable error interrupts */
2091 atmel_uart_writel(port
, ATMEL_US_IER
, port
->read_status_mask
);
2094 * Characters to ignore
2096 port
->ignore_status_mask
= 0;
2097 if (termios
->c_iflag
& IGNPAR
)
2098 port
->ignore_status_mask
|= (ATMEL_US_FRAME
| ATMEL_US_PARE
);
2099 if (termios
->c_iflag
& IGNBRK
) {
2100 port
->ignore_status_mask
|= ATMEL_US_RXBRK
;
2102 * If we're ignoring parity and break indicators,
2103 * ignore overruns too (for real raw support).
2105 if (termios
->c_iflag
& IGNPAR
)
2106 port
->ignore_status_mask
|= ATMEL_US_OVRE
;
2108 /* TODO: Ignore all characters if CREAD is set.*/
2110 /* update the per-port timeout */
2111 uart_update_timeout(port
, termios
->c_cflag
, baud
);
2114 * save/disable interrupts. The tty layer will ensure that the
2115 * transmitter is empty if requested by the caller, so there's
2116 * no need to wait for it here.
2118 imr
= atmel_uart_readl(port
, ATMEL_US_IMR
);
2119 atmel_uart_writel(port
, ATMEL_US_IDR
, -1);
2121 /* disable receiver and transmitter */
2122 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_TXDIS
| ATMEL_US_RXDIS
);
2125 if (port
->rs485
.flags
& SER_RS485_ENABLED
) {
2126 atmel_uart_writel(port
, ATMEL_US_TTGR
,
2127 port
->rs485
.delay_rts_after_send
);
2128 mode
|= ATMEL_US_USMODE_RS485
;
2129 } else if (termios
->c_cflag
& CRTSCTS
) {
2130 /* RS232 with hardware handshake (RTS/CTS) */
2131 if (atmel_use_dma_rx(port
) && !atmel_use_fifo(port
)) {
2132 dev_info(port
->dev
, "not enabling hardware flow control because DMA is used");
2133 termios
->c_cflag
&= ~CRTSCTS
;
2135 mode
|= ATMEL_US_USMODE_HWHS
;
2138 /* RS232 without hadware handshake */
2139 mode
|= ATMEL_US_USMODE_NORMAL
;
2142 /* set the mode, clock divisor, parity, stop bits and data size */
2143 atmel_uart_writel(port
, ATMEL_US_MR
, mode
);
2146 * when switching the mode, set the RTS line state according to the
2147 * new mode, otherwise keep the former state
2149 if ((old_mode
& ATMEL_US_USMODE
) != (mode
& ATMEL_US_USMODE
)) {
2150 unsigned int rts_state
;
2152 if ((mode
& ATMEL_US_USMODE
) == ATMEL_US_USMODE_HWHS
) {
2153 /* let the hardware control the RTS line */
2154 rts_state
= ATMEL_US_RTSDIS
;
2156 /* force RTS line to low level */
2157 rts_state
= ATMEL_US_RTSEN
;
2160 atmel_uart_writel(port
, ATMEL_US_CR
, rts_state
);
2163 /* set the baud rate */
2164 atmel_uart_writel(port
, ATMEL_US_BRGR
, quot
);
2165 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
| ATMEL_US_RSTRX
);
2166 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_TXEN
| ATMEL_US_RXEN
);
2168 /* restore interrupts */
2169 atmel_uart_writel(port
, ATMEL_US_IER
, imr
);
2171 /* CTS flow-control and modem-status interrupts */
2172 if (UART_ENABLE_MS(port
, termios
->c_cflag
))
2173 atmel_enable_ms(port
);
2175 atmel_disable_ms(port
);
2177 spin_unlock_irqrestore(&port
->lock
, flags
);
2180 static void atmel_set_ldisc(struct uart_port
*port
, struct ktermios
*termios
)
2182 if (termios
->c_line
== N_PPS
) {
2183 port
->flags
|= UPF_HARDPPS_CD
;
2184 spin_lock_irq(&port
->lock
);
2185 atmel_enable_ms(port
);
2186 spin_unlock_irq(&port
->lock
);
2188 port
->flags
&= ~UPF_HARDPPS_CD
;
2189 if (!UART_ENABLE_MS(port
, termios
->c_cflag
)) {
2190 spin_lock_irq(&port
->lock
);
2191 atmel_disable_ms(port
);
2192 spin_unlock_irq(&port
->lock
);
2198 * Return string describing the specified port
2200 static const char *atmel_type(struct uart_port
*port
)
2202 return (port
->type
== PORT_ATMEL
) ? "ATMEL_SERIAL" : NULL
;
2206 * Release the memory region(s) being used by 'port'.
2208 static void atmel_release_port(struct uart_port
*port
)
2210 struct platform_device
*pdev
= to_platform_device(port
->dev
);
2211 int size
= pdev
->resource
[0].end
- pdev
->resource
[0].start
+ 1;
2213 release_mem_region(port
->mapbase
, size
);
2215 if (port
->flags
& UPF_IOREMAP
) {
2216 iounmap(port
->membase
);
2217 port
->membase
= NULL
;
2222 * Request the memory region(s) being used by 'port'.
2224 static int atmel_request_port(struct uart_port
*port
)
2226 struct platform_device
*pdev
= to_platform_device(port
->dev
);
2227 int size
= pdev
->resource
[0].end
- pdev
->resource
[0].start
+ 1;
2229 if (!request_mem_region(port
->mapbase
, size
, "atmel_serial"))
2232 if (port
->flags
& UPF_IOREMAP
) {
2233 port
->membase
= ioremap(port
->mapbase
, size
);
2234 if (port
->membase
== NULL
) {
2235 release_mem_region(port
->mapbase
, size
);
2244 * Configure/autoconfigure the port.
2246 static void atmel_config_port(struct uart_port
*port
, int flags
)
2248 if (flags
& UART_CONFIG_TYPE
) {
2249 port
->type
= PORT_ATMEL
;
2250 atmel_request_port(port
);
2255 * Verify the new serial_struct (for TIOCSSERIAL).
2257 static int atmel_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
2260 if (ser
->type
!= PORT_UNKNOWN
&& ser
->type
!= PORT_ATMEL
)
2262 if (port
->irq
!= ser
->irq
)
2264 if (ser
->io_type
!= SERIAL_IO_MEM
)
2266 if (port
->uartclk
/ 16 != ser
->baud_base
)
2268 if (port
->mapbase
!= (unsigned long)ser
->iomem_base
)
2270 if (port
->iobase
!= ser
->port
)
2277 #ifdef CONFIG_CONSOLE_POLL
2278 static int atmel_poll_get_char(struct uart_port
*port
)
2280 while (!(atmel_uart_readl(port
, ATMEL_US_CSR
) & ATMEL_US_RXRDY
))
2283 return atmel_uart_read_char(port
);
2286 static void atmel_poll_put_char(struct uart_port
*port
, unsigned char ch
)
2288 while (!(atmel_uart_readl(port
, ATMEL_US_CSR
) & ATMEL_US_TXRDY
))
2291 atmel_uart_write_char(port
, ch
);
2295 static struct uart_ops atmel_pops
= {
2296 .tx_empty
= atmel_tx_empty
,
2297 .set_mctrl
= atmel_set_mctrl
,
2298 .get_mctrl
= atmel_get_mctrl
,
2299 .stop_tx
= atmel_stop_tx
,
2300 .start_tx
= atmel_start_tx
,
2301 .stop_rx
= atmel_stop_rx
,
2302 .enable_ms
= atmel_enable_ms
,
2303 .break_ctl
= atmel_break_ctl
,
2304 .startup
= atmel_startup
,
2305 .shutdown
= atmel_shutdown
,
2306 .flush_buffer
= atmel_flush_buffer
,
2307 .set_termios
= atmel_set_termios
,
2308 .set_ldisc
= atmel_set_ldisc
,
2310 .release_port
= atmel_release_port
,
2311 .request_port
= atmel_request_port
,
2312 .config_port
= atmel_config_port
,
2313 .verify_port
= atmel_verify_port
,
2314 .pm
= atmel_serial_pm
,
2315 #ifdef CONFIG_CONSOLE_POLL
2316 .poll_get_char
= atmel_poll_get_char
,
2317 .poll_put_char
= atmel_poll_put_char
,
2322 * Configure the port from the platform device resource info.
2324 static int atmel_init_port(struct atmel_uart_port
*atmel_port
,
2325 struct platform_device
*pdev
)
2328 struct uart_port
*port
= &atmel_port
->uart
;
2329 struct atmel_uart_data
*pdata
= dev_get_platdata(&pdev
->dev
);
2331 atmel_init_property(atmel_port
, pdev
);
2332 atmel_set_ops(port
);
2334 atmel_init_rs485(port
, pdev
);
2336 port
->iotype
= UPIO_MEM
;
2337 port
->flags
= UPF_BOOT_AUTOCONF
;
2338 port
->ops
= &atmel_pops
;
2340 port
->dev
= &pdev
->dev
;
2341 port
->mapbase
= pdev
->resource
[0].start
;
2342 port
->irq
= pdev
->resource
[1].start
;
2343 port
->rs485_config
= atmel_config_rs485
;
2345 memset(&atmel_port
->rx_ring
, 0, sizeof(atmel_port
->rx_ring
));
2347 if (pdata
&& pdata
->regs
) {
2348 /* Already mapped by setup code */
2349 port
->membase
= pdata
->regs
;
2351 port
->flags
|= UPF_IOREMAP
;
2352 port
->membase
= NULL
;
2355 /* for console, the clock could already be configured */
2356 if (!atmel_port
->clk
) {
2357 atmel_port
->clk
= clk_get(&pdev
->dev
, "usart");
2358 if (IS_ERR(atmel_port
->clk
)) {
2359 ret
= PTR_ERR(atmel_port
->clk
);
2360 atmel_port
->clk
= NULL
;
2363 ret
= clk_prepare_enable(atmel_port
->clk
);
2365 clk_put(atmel_port
->clk
);
2366 atmel_port
->clk
= NULL
;
2369 port
->uartclk
= clk_get_rate(atmel_port
->clk
);
2370 clk_disable_unprepare(atmel_port
->clk
);
2371 /* only enable clock when USART is in use */
2374 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2375 if (port
->rs485
.flags
& SER_RS485_ENABLED
)
2376 atmel_port
->tx_done_mask
= ATMEL_US_TXEMPTY
;
2377 else if (atmel_use_pdc_tx(port
)) {
2378 port
->fifosize
= PDC_BUFFER_SIZE
;
2379 atmel_port
->tx_done_mask
= ATMEL_US_ENDTX
| ATMEL_US_TXBUFE
;
2381 atmel_port
->tx_done_mask
= ATMEL_US_TXRDY
;
2387 struct platform_device
*atmel_default_console_device
; /* the serial console device */
2389 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2390 static void atmel_console_putchar(struct uart_port
*port
, int ch
)
2392 while (!(atmel_uart_readl(port
, ATMEL_US_CSR
) & ATMEL_US_TXRDY
))
2394 atmel_uart_write_char(port
, ch
);
2398 * Interrupts are disabled on entering
2400 static void atmel_console_write(struct console
*co
, const char *s
, u_int count
)
2402 struct uart_port
*port
= &atmel_ports
[co
->index
].uart
;
2403 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
2404 unsigned int status
, imr
;
2405 unsigned int pdc_tx
;
2408 * First, save IMR and then disable interrupts
2410 imr
= atmel_uart_readl(port
, ATMEL_US_IMR
);
2411 atmel_uart_writel(port
, ATMEL_US_IDR
,
2412 ATMEL_US_RXRDY
| atmel_port
->tx_done_mask
);
2414 /* Store PDC transmit status and disable it */
2415 pdc_tx
= atmel_uart_readl(port
, ATMEL_PDC_PTSR
) & ATMEL_PDC_TXTEN
;
2416 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_TXTDIS
);
2418 uart_console_write(port
, s
, count
, atmel_console_putchar
);
2421 * Finally, wait for transmitter to become empty
2425 status
= atmel_uart_readl(port
, ATMEL_US_CSR
);
2426 } while (!(status
& ATMEL_US_TXRDY
));
2428 /* Restore PDC transmit status */
2430 atmel_uart_writel(port
, ATMEL_PDC_PTCR
, ATMEL_PDC_TXTEN
);
2432 /* set interrupts back the way they were */
2433 atmel_uart_writel(port
, ATMEL_US_IER
, imr
);
2437 * If the port was already initialised (eg, by a boot loader),
2438 * try to determine the current setup.
2440 static void __init
atmel_console_get_options(struct uart_port
*port
, int *baud
,
2441 int *parity
, int *bits
)
2443 unsigned int mr
, quot
;
2446 * If the baud rate generator isn't running, the port wasn't
2447 * initialized by the boot loader.
2449 quot
= atmel_uart_readl(port
, ATMEL_US_BRGR
) & ATMEL_US_CD
;
2453 mr
= atmel_uart_readl(port
, ATMEL_US_MR
) & ATMEL_US_CHRL
;
2454 if (mr
== ATMEL_US_CHRL_8
)
2459 mr
= atmel_uart_readl(port
, ATMEL_US_MR
) & ATMEL_US_PAR
;
2460 if (mr
== ATMEL_US_PAR_EVEN
)
2462 else if (mr
== ATMEL_US_PAR_ODD
)
2466 * The serial core only rounds down when matching this to a
2467 * supported baud rate. Make sure we don't end up slightly
2468 * lower than one of those, as it would make us fall through
2469 * to a much lower baud rate than we really want.
2471 *baud
= port
->uartclk
/ (16 * (quot
- 1));
2474 static int __init
atmel_console_setup(struct console
*co
, char *options
)
2477 struct uart_port
*port
= &atmel_ports
[co
->index
].uart
;
2483 if (port
->membase
== NULL
) {
2484 /* Port not initialized yet - delay setup */
2488 ret
= clk_prepare_enable(atmel_ports
[co
->index
].clk
);
2492 atmel_uart_writel(port
, ATMEL_US_IDR
, -1);
2493 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_RSTSTA
| ATMEL_US_RSTRX
);
2494 atmel_uart_writel(port
, ATMEL_US_CR
, ATMEL_US_TXEN
| ATMEL_US_RXEN
);
2497 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
2499 atmel_console_get_options(port
, &baud
, &parity
, &bits
);
2501 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
2504 static struct uart_driver atmel_uart
;
2506 static struct console atmel_console
= {
2507 .name
= ATMEL_DEVICENAME
,
2508 .write
= atmel_console_write
,
2509 .device
= uart_console_device
,
2510 .setup
= atmel_console_setup
,
2511 .flags
= CON_PRINTBUFFER
,
2513 .data
= &atmel_uart
,
2516 #define ATMEL_CONSOLE_DEVICE (&atmel_console)
2519 * Early console initialization (before VM subsystem initialized).
2521 static int __init
atmel_console_init(void)
2524 if (atmel_default_console_device
) {
2525 struct atmel_uart_data
*pdata
=
2526 dev_get_platdata(&atmel_default_console_device
->dev
);
2527 int id
= pdata
->num
;
2528 struct atmel_uart_port
*atmel_port
= &atmel_ports
[id
];
2530 atmel_port
->backup_imr
= 0;
2531 atmel_port
->uart
.line
= id
;
2533 add_preferred_console(ATMEL_DEVICENAME
, id
, NULL
);
2534 ret
= atmel_init_port(atmel_port
, atmel_default_console_device
);
2537 register_console(&atmel_console
);
2543 console_initcall(atmel_console_init
);
2546 * Late console initialization.
2548 static int __init
atmel_late_console_init(void)
2550 if (atmel_default_console_device
2551 && !(atmel_console
.flags
& CON_ENABLED
))
2552 register_console(&atmel_console
);
2557 core_initcall(atmel_late_console_init
);
2559 static inline bool atmel_is_console_port(struct uart_port
*port
)
2561 return port
->cons
&& port
->cons
->index
== port
->line
;
2565 #define ATMEL_CONSOLE_DEVICE NULL
2567 static inline bool atmel_is_console_port(struct uart_port
*port
)
2573 static struct uart_driver atmel_uart
= {
2574 .owner
= THIS_MODULE
,
2575 .driver_name
= "atmel_serial",
2576 .dev_name
= ATMEL_DEVICENAME
,
2577 .major
= SERIAL_ATMEL_MAJOR
,
2578 .minor
= MINOR_START
,
2579 .nr
= ATMEL_MAX_UART
,
2580 .cons
= ATMEL_CONSOLE_DEVICE
,
2584 static bool atmel_serial_clk_will_stop(void)
2586 #ifdef CONFIG_ARCH_AT91
2587 return at91_suspend_entering_slow_clock();
2593 static int atmel_serial_suspend(struct platform_device
*pdev
,
2596 struct uart_port
*port
= platform_get_drvdata(pdev
);
2597 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
2599 if (atmel_is_console_port(port
) && console_suspend_enabled
) {
2600 /* Drain the TX shifter */
2601 while (!(atmel_uart_readl(port
, ATMEL_US_CSR
) &
2606 /* we can not wake up if we're running on slow clock */
2607 atmel_port
->may_wakeup
= device_may_wakeup(&pdev
->dev
);
2608 if (atmel_serial_clk_will_stop()) {
2609 unsigned long flags
;
2611 spin_lock_irqsave(&atmel_port
->lock_suspended
, flags
);
2612 atmel_port
->suspended
= true;
2613 spin_unlock_irqrestore(&atmel_port
->lock_suspended
, flags
);
2614 device_set_wakeup_enable(&pdev
->dev
, 0);
2617 uart_suspend_port(&atmel_uart
, port
);
2622 static int atmel_serial_resume(struct platform_device
*pdev
)
2624 struct uart_port
*port
= platform_get_drvdata(pdev
);
2625 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
2626 unsigned long flags
;
2628 spin_lock_irqsave(&atmel_port
->lock_suspended
, flags
);
2629 if (atmel_port
->pending
) {
2630 atmel_handle_receive(port
, atmel_port
->pending
);
2631 atmel_handle_status(port
, atmel_port
->pending
,
2632 atmel_port
->pending_status
);
2633 atmel_handle_transmit(port
, atmel_port
->pending
);
2634 atmel_port
->pending
= 0;
2636 atmel_port
->suspended
= false;
2637 spin_unlock_irqrestore(&atmel_port
->lock_suspended
, flags
);
2639 uart_resume_port(&atmel_uart
, port
);
2640 device_set_wakeup_enable(&pdev
->dev
, atmel_port
->may_wakeup
);
2645 #define atmel_serial_suspend NULL
2646 #define atmel_serial_resume NULL
2649 static void atmel_serial_probe_fifos(struct atmel_uart_port
*atmel_port
,
2650 struct platform_device
*pdev
)
2652 atmel_port
->fifo_size
= 0;
2653 atmel_port
->rts_low
= 0;
2654 atmel_port
->rts_high
= 0;
2656 if (of_property_read_u32(pdev
->dev
.of_node
,
2658 &atmel_port
->fifo_size
))
2661 if (!atmel_port
->fifo_size
)
2664 if (atmel_port
->fifo_size
< ATMEL_MIN_FIFO_SIZE
) {
2665 atmel_port
->fifo_size
= 0;
2666 dev_err(&pdev
->dev
, "Invalid FIFO size\n");
2671 * 0 <= rts_low <= rts_high <= fifo_size
2672 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2673 * to flush their internal TX FIFO, commonly up to 16 data, before
2674 * actually stopping to send new data. So we try to set the RTS High
2675 * Threshold to a reasonably high value respecting this 16 data
2676 * empirical rule when possible.
2678 atmel_port
->rts_high
= max_t(int, atmel_port
->fifo_size
>> 1,
2679 atmel_port
->fifo_size
- ATMEL_RTS_HIGH_OFFSET
);
2680 atmel_port
->rts_low
= max_t(int, atmel_port
->fifo_size
>> 2,
2681 atmel_port
->fifo_size
- ATMEL_RTS_LOW_OFFSET
);
2683 dev_info(&pdev
->dev
, "Using FIFO (%u data)\n",
2684 atmel_port
->fifo_size
);
2685 dev_dbg(&pdev
->dev
, "RTS High Threshold : %2u data\n",
2686 atmel_port
->rts_high
);
2687 dev_dbg(&pdev
->dev
, "RTS Low Threshold : %2u data\n",
2688 atmel_port
->rts_low
);
2691 static int atmel_serial_probe(struct platform_device
*pdev
)
2693 struct atmel_uart_port
*atmel_port
;
2694 struct device_node
*np
= pdev
->dev
.of_node
;
2695 struct atmel_uart_data
*pdata
= dev_get_platdata(&pdev
->dev
);
2700 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE
& (ATMEL_SERIAL_RINGSIZE
- 1));
2703 ret
= of_alias_get_id(np
, "serial");
2709 /* port id not found in platform data nor device-tree aliases:
2710 * auto-enumerate it */
2711 ret
= find_first_zero_bit(atmel_ports_in_use
, ATMEL_MAX_UART
);
2713 if (ret
>= ATMEL_MAX_UART
) {
2718 if (test_and_set_bit(ret
, atmel_ports_in_use
)) {
2719 /* port already in use */
2724 atmel_port
= &atmel_ports
[ret
];
2725 atmel_port
->backup_imr
= 0;
2726 atmel_port
->uart
.line
= ret
;
2727 atmel_serial_probe_fifos(atmel_port
, pdev
);
2729 atomic_set(&atmel_port
->tasklet_shutdown
, 0);
2730 spin_lock_init(&atmel_port
->lock_suspended
);
2732 ret
= atmel_init_port(atmel_port
, pdev
);
2736 atmel_port
->gpios
= mctrl_gpio_init(&atmel_port
->uart
, 0);
2737 if (IS_ERR(atmel_port
->gpios
)) {
2738 ret
= PTR_ERR(atmel_port
->gpios
);
2742 if (!atmel_use_pdc_rx(&atmel_port
->uart
)) {
2744 data
= kmalloc(sizeof(struct atmel_uart_char
)
2745 * ATMEL_SERIAL_RINGSIZE
, GFP_KERNEL
);
2747 goto err_alloc_ring
;
2748 atmel_port
->rx_ring
.buf
= data
;
2751 rs485_enabled
= atmel_port
->uart
.rs485
.flags
& SER_RS485_ENABLED
;
2753 ret
= uart_add_one_port(&atmel_uart
, &atmel_port
->uart
);
2757 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2758 if (atmel_is_console_port(&atmel_port
->uart
)
2759 && ATMEL_CONSOLE_DEVICE
->flags
& CON_ENABLED
) {
2761 * The serial core enabled the clock for us, so undo
2762 * the clk_prepare_enable() in atmel_console_setup()
2764 clk_disable_unprepare(atmel_port
->clk
);
2768 device_init_wakeup(&pdev
->dev
, 1);
2769 platform_set_drvdata(pdev
, atmel_port
);
2772 * The peripheral clock has been disabled by atmel_init_port():
2773 * enable it before accessing I/O registers
2775 clk_prepare_enable(atmel_port
->clk
);
2777 if (rs485_enabled
) {
2778 atmel_uart_writel(&atmel_port
->uart
, ATMEL_US_MR
,
2779 ATMEL_US_USMODE_NORMAL
);
2780 atmel_uart_writel(&atmel_port
->uart
, ATMEL_US_CR
,
2785 * Get port name of usart or uart
2787 atmel_get_ip_name(&atmel_port
->uart
);
2790 * The peripheral clock can now safely be disabled till the port
2793 clk_disable_unprepare(atmel_port
->clk
);
2798 kfree(atmel_port
->rx_ring
.buf
);
2799 atmel_port
->rx_ring
.buf
= NULL
;
2801 if (!atmel_is_console_port(&atmel_port
->uart
)) {
2802 clk_put(atmel_port
->clk
);
2803 atmel_port
->clk
= NULL
;
2806 clear_bit(atmel_port
->uart
.line
, atmel_ports_in_use
);
2812 * Even if the driver is not modular, it makes sense to be able to
2813 * unbind a device: there can be many bound devices, and there are
2814 * situations where dynamic binding and unbinding can be useful.
2816 * For example, a connected device can require a specific firmware update
2817 * protocol that needs bitbanging on IO lines, but use the regular serial
2818 * port in the normal case.
2820 static int atmel_serial_remove(struct platform_device
*pdev
)
2822 struct uart_port
*port
= platform_get_drvdata(pdev
);
2823 struct atmel_uart_port
*atmel_port
= to_atmel_uart_port(port
);
2826 tasklet_kill(&atmel_port
->tasklet_rx
);
2827 tasklet_kill(&atmel_port
->tasklet_tx
);
2829 device_init_wakeup(&pdev
->dev
, 0);
2831 ret
= uart_remove_one_port(&atmel_uart
, port
);
2833 kfree(atmel_port
->rx_ring
.buf
);
2835 /* "port" is allocated statically, so we shouldn't free it */
2837 clear_bit(port
->line
, atmel_ports_in_use
);
2839 clk_put(atmel_port
->clk
);
2840 atmel_port
->clk
= NULL
;
2845 static struct platform_driver atmel_serial_driver
= {
2846 .probe
= atmel_serial_probe
,
2847 .remove
= atmel_serial_remove
,
2848 .suspend
= atmel_serial_suspend
,
2849 .resume
= atmel_serial_resume
,
2851 .name
= "atmel_usart",
2852 .of_match_table
= of_match_ptr(atmel_serial_dt_ids
),
2856 static int __init
atmel_serial_init(void)
2860 ret
= uart_register_driver(&atmel_uart
);
2864 ret
= platform_driver_register(&atmel_serial_driver
);
2866 uart_unregister_driver(&atmel_uart
);
2870 device_initcall(atmel_serial_init
);