2 * drivers/serial/sh-sci.c
4 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
6 * Copyright (C) 2002 - 2011 Paul Mundt
7 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
9 * based off of the old drivers/char/sh-sci.c by:
11 * Copyright (C) 1999, 2000 Niibe Yutaka
12 * Copyright (C) 2000 Sugioka Toshinobu
13 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
14 * Modified to support SecureEdge. David McCullough (2002)
15 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
16 * Removed SH7300 support (Jul 2007).
18 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file "COPYING" in the main directory of this archive
22 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/timer.h>
31 #include <linux/interrupt.h>
32 #include <linux/tty.h>
33 #include <linux/tty_flip.h>
34 #include <linux/serial.h>
35 #include <linux/major.h>
36 #include <linux/string.h>
37 #include <linux/sysrq.h>
38 #include <linux/ioport.h>
40 #include <linux/init.h>
41 #include <linux/delay.h>
42 #include <linux/console.h>
43 #include <linux/platform_device.h>
44 #include <linux/serial_sci.h>
45 #include <linux/notifier.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/cpufreq.h>
48 #include <linux/clk.h>
49 #include <linux/ctype.h>
50 #include <linux/err.h>
51 #include <linux/dmaengine.h>
52 #include <linux/scatterlist.h>
53 #include <linux/slab.h>
56 #include <asm/sh_bios.h>
66 struct uart_port port;
68 /* Platform configuration */
69 struct plat_sci_port *cfg;
71 /* Port enable callback */
72 void (*enable)(struct uart_port *port);
74 /* Port disable callback */
75 void (*disable)(struct uart_port *port);
78 struct timer_list break_timer;
86 struct dma_chan *chan_tx;
87 struct dma_chan *chan_rx;
89 #ifdef CONFIG_SERIAL_SH_SCI_DMA
90 struct dma_async_tx_descriptor *desc_tx;
91 struct dma_async_tx_descriptor *desc_rx[2];
92 dma_cookie_t cookie_tx;
93 dma_cookie_t cookie_rx[2];
94 dma_cookie_t active_rx;
95 struct scatterlist sg_tx;
96 unsigned int sg_len_tx;
97 struct scatterlist sg_rx[2];
99 struct sh_dmae_slave param_tx;
100 struct sh_dmae_slave param_rx;
101 struct work_struct work_tx;
102 struct work_struct work_rx;
103 struct timer_list rx_timer;
104 unsigned int rx_timeout;
107 struct notifier_block freq_transition;
110 /* Function prototypes */
111 static void sci_start_tx(struct uart_port *port);
112 static void sci_stop_tx(struct uart_port *port);
113 static void sci_start_rx(struct uart_port *port);
115 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
117 static struct sci_port sci_ports[SCI_NPORTS];
118 static struct uart_driver sci_uart_driver;
120 static inline struct sci_port *
121 to_sci_port(struct uart_port *uart)
123 return container_of(uart, struct sci_port, port);
126 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
128 #ifdef CONFIG_CONSOLE_POLL
129 static int sci_poll_get_char(struct uart_port *port)
131 unsigned short status;
135 status = sci_in(port, SCxSR);
136 if (status & SCxSR_ERRORS(port)) {
137 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
143 if (!(status & SCxSR_RDxF(port)))
146 c = sci_in(port, SCxRDR);
150 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
156 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
158 unsigned short status;
161 status = sci_in(port, SCxSR);
162 } while (!(status & SCxSR_TDxE(port)));
164 sci_out(port, SCxTDR, c);
165 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
167 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
169 #if defined(__H8300H__) || defined(__H8300S__)
170 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
172 int ch = (port->mapbase - SMR0) >> 3;
175 H8300_GPIO_DDR(h8300_sci_pins[ch].port,
176 h8300_sci_pins[ch].rx,
178 H8300_GPIO_DDR(h8300_sci_pins[ch].port,
179 h8300_sci_pins[ch].tx,
183 H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
185 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
186 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
188 if (port->mapbase == 0xA4400000) {
189 __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
190 __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
191 } else if (port->mapbase == 0xA4410000)
192 __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
194 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
195 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
199 if (cflag & CRTSCTS) {
201 if (port->mapbase == 0xa4430000) { /* SCIF0 */
202 /* Clear PTCR bit 9-2; enable all scif pins but sck */
203 data = __raw_readw(PORT_PTCR);
204 __raw_writew((data & 0xfc03), PORT_PTCR);
205 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
206 /* Clear PVCR bit 9-2 */
207 data = __raw_readw(PORT_PVCR);
208 __raw_writew((data & 0xfc03), PORT_PVCR);
211 if (port->mapbase == 0xa4430000) { /* SCIF0 */
212 /* Clear PTCR bit 5-2; enable only tx and rx */
213 data = __raw_readw(PORT_PTCR);
214 __raw_writew((data & 0xffc3), PORT_PTCR);
215 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
216 /* Clear PVCR bit 5-2 */
217 data = __raw_readw(PORT_PVCR);
218 __raw_writew((data & 0xffc3), PORT_PVCR);
222 #elif defined(CONFIG_CPU_SH3)
223 /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
224 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
228 /* We need to set SCPCR to enable RTS/CTS */
229 data = __raw_readw(SCPCR);
230 /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
231 __raw_writew(data & 0x0fcf, SCPCR);
233 if (!(cflag & CRTSCTS)) {
234 /* We need to set SCPCR to enable RTS/CTS */
235 data = __raw_readw(SCPCR);
236 /* Clear out SCP7MD1,0, SCP4MD1,0,
237 Set SCP6MD1,0 = {01} (output) */
238 __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
240 data = __raw_readb(SCPDR);
241 /* Set /RTS2 (bit6) = 0 */
242 __raw_writeb(data & 0xbf, SCPDR);
245 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
246 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
250 if (port->mapbase == 0xffe00000) {
251 data = __raw_readw(PSCR);
253 if (!(cflag & CRTSCTS))
256 __raw_writew(data, PSCR);
259 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
260 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
261 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
262 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
263 defined(CONFIG_CPU_SUBTYPE_SH7786) || \
264 defined(CONFIG_CPU_SUBTYPE_SHX3)
265 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
267 if (!(cflag & CRTSCTS))
268 __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
270 #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
271 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
273 if (!(cflag & CRTSCTS))
274 __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
277 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
283 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
284 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
285 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
286 defined(CONFIG_CPU_SUBTYPE_SH7786)
287 static int scif_txfill(struct uart_port *port)
289 return sci_in(port, SCTFDR) & 0xff;
292 static int scif_txroom(struct uart_port *port)
294 return SCIF_TXROOM_MAX - scif_txfill(port);
297 static int scif_rxfill(struct uart_port *port)
299 return sci_in(port, SCRFDR) & 0xff;
301 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
302 static int scif_txfill(struct uart_port *port)
304 if (port->mapbase == 0xffe00000 ||
305 port->mapbase == 0xffe08000)
307 return sci_in(port, SCTFDR) & 0xff;
310 return sci_in(port, SCFDR) >> 8;
313 static int scif_txroom(struct uart_port *port)
315 if (port->mapbase == 0xffe00000 ||
316 port->mapbase == 0xffe08000)
318 return SCIF_TXROOM_MAX - scif_txfill(port);
321 return SCIF2_TXROOM_MAX - scif_txfill(port);
324 static int scif_rxfill(struct uart_port *port)
326 if ((port->mapbase == 0xffe00000) ||
327 (port->mapbase == 0xffe08000)) {
329 return sci_in(port, SCRFDR) & 0xff;
332 return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
335 #elif defined(CONFIG_ARCH_SH7372)
336 static int scif_txfill(struct uart_port *port)
338 if (port->type == PORT_SCIFA)
339 return sci_in(port, SCFDR) >> 8;
341 return sci_in(port, SCTFDR);
344 static int scif_txroom(struct uart_port *port)
346 return port->fifosize - scif_txfill(port);
349 static int scif_rxfill(struct uart_port *port)
351 if (port->type == PORT_SCIFA)
352 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
354 return sci_in(port, SCRFDR);
357 static int scif_txfill(struct uart_port *port)
359 return sci_in(port, SCFDR) >> 8;
362 static int scif_txroom(struct uart_port *port)
364 return SCIF_TXROOM_MAX - scif_txfill(port);
367 static int scif_rxfill(struct uart_port *port)
369 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
373 static int sci_txfill(struct uart_port *port)
375 return !(sci_in(port, SCxSR) & SCI_TDRE);
378 static int sci_txroom(struct uart_port *port)
380 return !sci_txfill(port);
383 static int sci_rxfill(struct uart_port *port)
385 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
388 /* ********************************************************************** *
389 * the interrupt related routines *
390 * ********************************************************************** */
392 static void sci_transmit_chars(struct uart_port *port)
394 struct circ_buf *xmit = &port->state->xmit;
395 unsigned int stopped = uart_tx_stopped(port);
396 unsigned short status;
400 status = sci_in(port, SCxSR);
401 if (!(status & SCxSR_TDxE(port))) {
402 ctrl = sci_in(port, SCSCR);
403 if (uart_circ_empty(xmit))
407 sci_out(port, SCSCR, ctrl);
411 if (port->type == PORT_SCI)
412 count = sci_txroom(port);
414 count = scif_txroom(port);
422 } else if (!uart_circ_empty(xmit) && !stopped) {
423 c = xmit->buf[xmit->tail];
424 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
429 sci_out(port, SCxTDR, c);
432 } while (--count > 0);
434 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
436 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
437 uart_write_wakeup(port);
438 if (uart_circ_empty(xmit)) {
441 ctrl = sci_in(port, SCSCR);
443 if (port->type != PORT_SCI) {
444 sci_in(port, SCxSR); /* Dummy read */
445 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
449 sci_out(port, SCSCR, ctrl);
453 /* On SH3, SCIF may read end-of-break as a space->mark char */
454 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
456 static void sci_receive_chars(struct uart_port *port)
458 struct sci_port *sci_port = to_sci_port(port);
459 struct tty_struct *tty = port->state->port.tty;
460 int i, count, copied = 0;
461 unsigned short status;
464 status = sci_in(port, SCxSR);
465 if (!(status & SCxSR_RDxF(port)))
469 if (port->type == PORT_SCI)
470 count = sci_rxfill(port);
472 count = scif_rxfill(port);
474 /* Don't copy more bytes than there is room for in the buffer */
475 count = tty_buffer_request_room(tty, count);
477 /* If for any reason we can't copy more data, we're done! */
481 if (port->type == PORT_SCI) {
482 char c = sci_in(port, SCxRDR);
483 if (uart_handle_sysrq_char(port, c) ||
484 sci_port->break_flag)
487 tty_insert_flip_char(tty, c, TTY_NORMAL);
489 for (i = 0; i < count; i++) {
490 char c = sci_in(port, SCxRDR);
491 status = sci_in(port, SCxSR);
492 #if defined(CONFIG_CPU_SH3)
493 /* Skip "chars" during break */
494 if (sci_port->break_flag) {
496 (status & SCxSR_FER(port))) {
501 /* Nonzero => end-of-break */
502 dev_dbg(port->dev, "debounce<%02x>\n", c);
503 sci_port->break_flag = 0;
510 #endif /* CONFIG_CPU_SH3 */
511 if (uart_handle_sysrq_char(port, c)) {
516 /* Store data and status */
517 if (status & SCxSR_FER(port)) {
519 dev_notice(port->dev, "frame error\n");
520 } else if (status & SCxSR_PER(port)) {
522 dev_notice(port->dev, "parity error\n");
526 tty_insert_flip_char(tty, c, flag);
530 sci_in(port, SCxSR); /* dummy read */
531 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
534 port->icount.rx += count;
538 /* Tell the rest of the system the news. New characters! */
539 tty_flip_buffer_push(tty);
541 sci_in(port, SCxSR); /* dummy read */
542 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
546 #define SCI_BREAK_JIFFIES (HZ/20)
549 * The sci generates interrupts during the break,
550 * 1 per millisecond or so during the break period, for 9600 baud.
551 * So dont bother disabling interrupts.
552 * But dont want more than 1 break event.
553 * Use a kernel timer to periodically poll the rx line until
554 * the break is finished.
556 static inline void sci_schedule_break_timer(struct sci_port *port)
558 mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
561 /* Ensure that two consecutive samples find the break over. */
562 static void sci_break_timer(unsigned long data)
564 struct sci_port *port = (struct sci_port *)data;
567 port->enable(&port->port);
569 if (sci_rxd_in(&port->port) == 0) {
570 port->break_flag = 1;
571 sci_schedule_break_timer(port);
572 } else if (port->break_flag == 1) {
574 port->break_flag = 2;
575 sci_schedule_break_timer(port);
577 port->break_flag = 0;
580 port->disable(&port->port);
583 static int sci_handle_errors(struct uart_port *port)
586 unsigned short status = sci_in(port, SCxSR);
587 struct tty_struct *tty = port->state->port.tty;
589 if (status & SCxSR_ORER(port)) {
591 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
594 dev_notice(port->dev, "overrun error");
597 if (status & SCxSR_FER(port)) {
598 if (sci_rxd_in(port) == 0) {
599 /* Notify of BREAK */
600 struct sci_port *sci_port = to_sci_port(port);
602 if (!sci_port->break_flag) {
603 sci_port->break_flag = 1;
604 sci_schedule_break_timer(sci_port);
606 /* Do sysrq handling. */
607 if (uart_handle_break(port))
610 dev_dbg(port->dev, "BREAK detected\n");
612 if (tty_insert_flip_char(tty, 0, TTY_BREAK))
618 if (tty_insert_flip_char(tty, 0, TTY_FRAME))
621 dev_notice(port->dev, "frame error\n");
625 if (status & SCxSR_PER(port)) {
627 if (tty_insert_flip_char(tty, 0, TTY_PARITY))
630 dev_notice(port->dev, "parity error");
634 tty_flip_buffer_push(tty);
639 static int sci_handle_fifo_overrun(struct uart_port *port)
641 struct tty_struct *tty = port->state->port.tty;
644 if (port->type != PORT_SCIF)
647 if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
648 sci_out(port, SCLSR, 0);
650 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
651 tty_flip_buffer_push(tty);
653 dev_notice(port->dev, "overrun error\n");
660 static int sci_handle_breaks(struct uart_port *port)
663 unsigned short status = sci_in(port, SCxSR);
664 struct tty_struct *tty = port->state->port.tty;
665 struct sci_port *s = to_sci_port(port);
667 if (uart_handle_break(port))
670 if (!s->break_flag && status & SCxSR_BRK(port)) {
671 #if defined(CONFIG_CPU_SH3)
675 /* Notify of BREAK */
676 if (tty_insert_flip_char(tty, 0, TTY_BREAK))
679 dev_dbg(port->dev, "BREAK detected\n");
683 tty_flip_buffer_push(tty);
685 copied += sci_handle_fifo_overrun(port);
690 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
692 #ifdef CONFIG_SERIAL_SH_SCI_DMA
693 struct uart_port *port = ptr;
694 struct sci_port *s = to_sci_port(port);
697 u16 scr = sci_in(port, SCSCR);
698 u16 ssr = sci_in(port, SCxSR);
700 /* Disable future Rx interrupts */
701 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
702 disable_irq_nosync(irq);
707 sci_out(port, SCSCR, scr);
708 /* Clear current interrupt */
709 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
710 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
711 jiffies, s->rx_timeout);
712 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
718 /* I think sci_receive_chars has to be called irrespective
719 * of whether the I_IXOFF is set, otherwise, how is the interrupt
722 sci_receive_chars(ptr);
727 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
729 struct uart_port *port = ptr;
732 spin_lock_irqsave(&port->lock, flags);
733 sci_transmit_chars(port);
734 spin_unlock_irqrestore(&port->lock, flags);
739 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
741 struct uart_port *port = ptr;
744 if (port->type == PORT_SCI) {
745 if (sci_handle_errors(port)) {
746 /* discard character in rx buffer */
748 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
751 sci_handle_fifo_overrun(port);
752 sci_rx_interrupt(irq, ptr);
755 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
757 /* Kick the transmission */
758 sci_tx_interrupt(irq, ptr);
763 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
765 struct uart_port *port = ptr;
768 sci_handle_breaks(port);
769 sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
774 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
777 * Not all ports (such as SCIFA) will support REIE. Rather than
778 * special-casing the port type, we check the port initialization
779 * IRQ enable mask to see whether the IRQ is desired at all. If
780 * it's unset, it's logically inferred that there's no point in
783 return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
786 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
788 unsigned short ssr_status, scr_status, err_enabled;
789 struct uart_port *port = ptr;
790 struct sci_port *s = to_sci_port(port);
791 irqreturn_t ret = IRQ_NONE;
793 ssr_status = sci_in(port, SCxSR);
794 scr_status = sci_in(port, SCSCR);
795 err_enabled = scr_status & port_rx_irq_mask(port);
798 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
800 ret = sci_tx_interrupt(irq, ptr);
803 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
806 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
807 (scr_status & SCSCR_RIE))
808 ret = sci_rx_interrupt(irq, ptr);
810 /* Error Interrupt */
811 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
812 ret = sci_er_interrupt(irq, ptr);
814 /* Break Interrupt */
815 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
816 ret = sci_br_interrupt(irq, ptr);
822 * Here we define a transition notifier so that we can update all of our
823 * ports' baud rate when the peripheral clock changes.
825 static int sci_notifier(struct notifier_block *self,
826 unsigned long phase, void *p)
828 struct sci_port *sci_port;
831 sci_port = container_of(self, struct sci_port, freq_transition);
833 if ((phase == CPUFREQ_POSTCHANGE) ||
834 (phase == CPUFREQ_RESUMECHANGE)) {
835 struct uart_port *port = &sci_port->port;
837 spin_lock_irqsave(&port->lock, flags);
838 port->uartclk = clk_get_rate(sci_port->iclk);
839 spin_unlock_irqrestore(&port->lock, flags);
845 static void sci_clk_enable(struct uart_port *port)
847 struct sci_port *sci_port = to_sci_port(port);
849 pm_runtime_get_sync(port->dev);
851 clk_enable(sci_port->iclk);
852 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
853 clk_enable(sci_port->fclk);
856 static void sci_clk_disable(struct uart_port *port)
858 struct sci_port *sci_port = to_sci_port(port);
860 clk_disable(sci_port->fclk);
861 clk_disable(sci_port->iclk);
863 pm_runtime_put_sync(port->dev);
866 static int sci_request_irq(struct sci_port *port)
869 irqreturn_t (*handlers[4])(int irq, void *ptr) = {
870 sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
873 const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
874 "SCI Transmit Data Empty", "SCI Break" };
876 if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
877 if (unlikely(!port->cfg->irqs[0]))
880 if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
881 IRQF_DISABLED, "sci", port)) {
882 dev_err(port->port.dev, "Can't allocate IRQ\n");
886 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
887 if (unlikely(!port->cfg->irqs[i]))
890 if (request_irq(port->cfg->irqs[i], handlers[i],
891 IRQF_DISABLED, desc[i], port)) {
892 dev_err(port->port.dev, "Can't allocate IRQ\n");
901 static void sci_free_irq(struct sci_port *port)
905 if (port->cfg->irqs[0] == port->cfg->irqs[1])
906 free_irq(port->cfg->irqs[0], port);
908 for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
909 if (!port->cfg->irqs[i])
912 free_irq(port->cfg->irqs[i], port);
917 static unsigned int sci_tx_empty(struct uart_port *port)
919 unsigned short status = sci_in(port, SCxSR);
920 unsigned short in_tx_fifo = scif_txfill(port);
922 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
925 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
927 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
928 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
929 /* If you have signals for DTR and DCD, please implement here. */
932 static unsigned int sci_get_mctrl(struct uart_port *port)
934 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
937 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
940 #ifdef CONFIG_SERIAL_SH_SCI_DMA
941 static void sci_dma_tx_complete(void *arg)
943 struct sci_port *s = arg;
944 struct uart_port *port = &s->port;
945 struct circ_buf *xmit = &port->state->xmit;
948 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
950 spin_lock_irqsave(&port->lock, flags);
952 xmit->tail += sg_dma_len(&s->sg_tx);
953 xmit->tail &= UART_XMIT_SIZE - 1;
955 port->icount.tx += sg_dma_len(&s->sg_tx);
957 async_tx_ack(s->desc_tx);
958 s->cookie_tx = -EINVAL;
961 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
962 uart_write_wakeup(port);
964 if (!uart_circ_empty(xmit)) {
965 schedule_work(&s->work_tx);
966 } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
967 u16 ctrl = sci_in(port, SCSCR);
968 sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
971 spin_unlock_irqrestore(&port->lock, flags);
974 /* Locking: called with port lock held */
975 static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
978 struct uart_port *port = &s->port;
981 room = tty_buffer_request_room(tty, count);
983 if (s->active_rx == s->cookie_rx[0]) {
985 } else if (s->active_rx == s->cookie_rx[1]) {
988 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
993 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
998 for (i = 0; i < room; i++)
999 tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1002 port->icount.rx += room;
1007 static void sci_dma_rx_complete(void *arg)
1009 struct sci_port *s = arg;
1010 struct uart_port *port = &s->port;
1011 struct tty_struct *tty = port->state->port.tty;
1012 unsigned long flags;
1015 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1017 spin_lock_irqsave(&port->lock, flags);
1019 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
1021 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1023 spin_unlock_irqrestore(&port->lock, flags);
1026 tty_flip_buffer_push(tty);
1028 schedule_work(&s->work_rx);
1031 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1033 struct dma_chan *chan = s->chan_rx;
1034 struct uart_port *port = &s->port;
1037 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1038 dma_release_channel(chan);
1039 if (sg_dma_address(&s->sg_rx[0]))
1040 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1041 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1046 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1048 struct dma_chan *chan = s->chan_tx;
1049 struct uart_port *port = &s->port;
1052 s->cookie_tx = -EINVAL;
1053 dma_release_channel(chan);
1058 static void sci_submit_rx(struct sci_port *s)
1060 struct dma_chan *chan = s->chan_rx;
1063 for (i = 0; i < 2; i++) {
1064 struct scatterlist *sg = &s->sg_rx[i];
1065 struct dma_async_tx_descriptor *desc;
1067 desc = chan->device->device_prep_slave_sg(chan,
1068 sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
1071 s->desc_rx[i] = desc;
1072 desc->callback = sci_dma_rx_complete;
1073 desc->callback_param = s;
1074 s->cookie_rx[i] = desc->tx_submit(desc);
1077 if (!desc || s->cookie_rx[i] < 0) {
1079 async_tx_ack(s->desc_rx[0]);
1080 s->cookie_rx[0] = -EINVAL;
1084 s->cookie_rx[i] = -EINVAL;
1086 dev_warn(s->port.dev,
1087 "failed to re-start DMA, using PIO\n");
1088 sci_rx_dma_release(s, true);
1091 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1092 s->cookie_rx[i], i);
1095 s->active_rx = s->cookie_rx[0];
1097 dma_async_issue_pending(chan);
1100 static void work_fn_rx(struct work_struct *work)
1102 struct sci_port *s = container_of(work, struct sci_port, work_rx);
1103 struct uart_port *port = &s->port;
1104 struct dma_async_tx_descriptor *desc;
1107 if (s->active_rx == s->cookie_rx[0]) {
1109 } else if (s->active_rx == s->cookie_rx[1]) {
1112 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1115 desc = s->desc_rx[new];
1117 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1119 /* Handle incomplete DMA receive */
1120 struct tty_struct *tty = port->state->port.tty;
1121 struct dma_chan *chan = s->chan_rx;
1122 struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
1124 unsigned long flags;
1127 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1128 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1129 sh_desc->partial, sh_desc->cookie);
1131 spin_lock_irqsave(&port->lock, flags);
1132 count = sci_dma_rx_push(s, tty, sh_desc->partial);
1133 spin_unlock_irqrestore(&port->lock, flags);
1136 tty_flip_buffer_push(tty);
1143 s->cookie_rx[new] = desc->tx_submit(desc);
1144 if (s->cookie_rx[new] < 0) {
1145 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1146 sci_rx_dma_release(s, true);
1150 s->active_rx = s->cookie_rx[!new];
1152 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1153 s->cookie_rx[new], new, s->active_rx);
1156 static void work_fn_tx(struct work_struct *work)
1158 struct sci_port *s = container_of(work, struct sci_port, work_tx);
1159 struct dma_async_tx_descriptor *desc;
1160 struct dma_chan *chan = s->chan_tx;
1161 struct uart_port *port = &s->port;
1162 struct circ_buf *xmit = &port->state->xmit;
1163 struct scatterlist *sg = &s->sg_tx;
1167 * Port xmit buffer is already mapped, and it is one page... Just adjust
1168 * offsets and lengths. Since it is a circular buffer, we have to
1169 * transmit till the end, and then the rest. Take the port lock to get a
1170 * consistent xmit buffer state.
1172 spin_lock_irq(&port->lock);
1173 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1174 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1176 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1177 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1178 spin_unlock_irq(&port->lock);
1180 BUG_ON(!sg_dma_len(sg));
1182 desc = chan->device->device_prep_slave_sg(chan,
1183 sg, s->sg_len_tx, DMA_TO_DEVICE,
1184 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1187 sci_tx_dma_release(s, true);
1191 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1193 spin_lock_irq(&port->lock);
1195 desc->callback = sci_dma_tx_complete;
1196 desc->callback_param = s;
1197 spin_unlock_irq(&port->lock);
1198 s->cookie_tx = desc->tx_submit(desc);
1199 if (s->cookie_tx < 0) {
1200 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1202 sci_tx_dma_release(s, true);
1206 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1207 xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1209 dma_async_issue_pending(chan);
1213 static void sci_start_tx(struct uart_port *port)
1215 struct sci_port *s = to_sci_port(port);
1216 unsigned short ctrl;
1218 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1219 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1220 u16 new, scr = sci_in(port, SCSCR);
1224 new = scr & ~0x8000;
1226 sci_out(port, SCSCR, new);
1229 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1231 schedule_work(&s->work_tx);
1234 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1235 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1236 ctrl = sci_in(port, SCSCR);
1237 sci_out(port, SCSCR, ctrl | SCSCR_TIE);
1241 static void sci_stop_tx(struct uart_port *port)
1243 unsigned short ctrl;
1245 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1246 ctrl = sci_in(port, SCSCR);
1248 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1253 sci_out(port, SCSCR, ctrl);
1256 static void sci_start_rx(struct uart_port *port)
1258 unsigned short ctrl;
1260 ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
1262 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1265 sci_out(port, SCSCR, ctrl);
1268 static void sci_stop_rx(struct uart_port *port)
1270 unsigned short ctrl;
1272 ctrl = sci_in(port, SCSCR);
1274 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1277 ctrl &= ~port_rx_irq_mask(port);
1279 sci_out(port, SCSCR, ctrl);
1282 static void sci_enable_ms(struct uart_port *port)
1284 /* Nothing here yet .. */
1287 static void sci_break_ctl(struct uart_port *port, int break_state)
1289 /* Nothing here yet .. */
1292 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1293 static bool filter(struct dma_chan *chan, void *slave)
1295 struct sh_dmae_slave *param = slave;
1297 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1300 if (param->dma_dev == chan->device->dev) {
1301 chan->private = param;
1308 static void rx_timer_fn(unsigned long arg)
1310 struct sci_port *s = (struct sci_port *)arg;
1311 struct uart_port *port = &s->port;
1312 u16 scr = sci_in(port, SCSCR);
1314 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1316 enable_irq(s->cfg->irqs[1]);
1318 sci_out(port, SCSCR, scr | SCSCR_RIE);
1319 dev_dbg(port->dev, "DMA Rx timed out\n");
1320 schedule_work(&s->work_rx);
1323 static void sci_request_dma(struct uart_port *port)
1325 struct sci_port *s = to_sci_port(port);
1326 struct sh_dmae_slave *param;
1327 struct dma_chan *chan;
1328 dma_cap_mask_t mask;
1331 dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
1332 port->line, s->cfg->dma_dev);
1334 if (!s->cfg->dma_dev)
1338 dma_cap_set(DMA_SLAVE, mask);
1340 param = &s->param_tx;
1342 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1343 param->slave_id = s->cfg->dma_slave_tx;
1344 param->dma_dev = s->cfg->dma_dev;
1346 s->cookie_tx = -EINVAL;
1347 chan = dma_request_channel(mask, filter, param);
1348 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1351 sg_init_table(&s->sg_tx, 1);
1352 /* UART circular tx buffer is an aligned page. */
1353 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1354 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1355 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1356 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1358 sci_tx_dma_release(s, false);
1360 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1361 sg_dma_len(&s->sg_tx),
1362 port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1364 s->sg_len_tx = nent;
1366 INIT_WORK(&s->work_tx, work_fn_tx);
1369 param = &s->param_rx;
1371 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1372 param->slave_id = s->cfg->dma_slave_rx;
1373 param->dma_dev = s->cfg->dma_dev;
1375 chan = dma_request_channel(mask, filter, param);
1376 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1384 s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1385 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1386 &dma[0], GFP_KERNEL);
1390 "failed to allocate dma buffer, using PIO\n");
1391 sci_rx_dma_release(s, true);
1395 buf[1] = buf[0] + s->buf_len_rx;
1396 dma[1] = dma[0] + s->buf_len_rx;
1398 for (i = 0; i < 2; i++) {
1399 struct scatterlist *sg = &s->sg_rx[i];
1401 sg_init_table(sg, 1);
1402 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1403 (int)buf[i] & ~PAGE_MASK);
1404 sg_dma_address(sg) = dma[i];
1407 INIT_WORK(&s->work_rx, work_fn_rx);
1408 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1414 static void sci_free_dma(struct uart_port *port)
1416 struct sci_port *s = to_sci_port(port);
1418 if (!s->cfg->dma_dev)
1422 sci_tx_dma_release(s, false);
1424 sci_rx_dma_release(s, false);
1427 static inline void sci_request_dma(struct uart_port *port)
1431 static inline void sci_free_dma(struct uart_port *port)
1436 static int sci_startup(struct uart_port *port)
1438 struct sci_port *s = to_sci_port(port);
1441 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1446 ret = sci_request_irq(s);
1447 if (unlikely(ret < 0))
1450 sci_request_dma(port);
1458 static void sci_shutdown(struct uart_port *port)
1460 struct sci_port *s = to_sci_port(port);
1462 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1474 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1479 return ((freq + 16 * bps) / (16 * bps) - 1);
1481 return ((freq + 16 * bps) / (32 * bps) - 1);
1483 return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
1485 return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
1487 return (((freq * 1000 / 32) / bps) - 1);
1490 /* Warn, but use a safe default */
1493 return ((freq + 16 * bps) / (32 * bps) - 1);
1496 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1497 struct ktermios *old)
1499 struct sci_port *s = to_sci_port(port);
1500 unsigned int status, baud, smr_val, max_baud;
1505 * earlyprintk comes here early on with port->uartclk set to zero.
1506 * the clock framework is not up and running at this point so here
1507 * we assume that 115200 is the maximum baud rate. please note that
1508 * the baud rate is not programmed during earlyprintk - it is assumed
1509 * that the previous boot loader has enabled required clocks and
1510 * setup the baud rate generator hardware for us already.
1512 max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1514 baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1515 if (likely(baud && port->uartclk))
1516 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
1522 status = sci_in(port, SCxSR);
1523 } while (!(status & SCxSR_TEND(port)));
1525 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1527 if (port->type != PORT_SCI)
1528 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1530 smr_val = sci_in(port, SCSMR) & 3;
1532 if ((termios->c_cflag & CSIZE) == CS7)
1534 if (termios->c_cflag & PARENB)
1536 if (termios->c_cflag & PARODD)
1538 if (termios->c_cflag & CSTOPB)
1541 uart_update_timeout(port, termios->c_cflag, baud);
1543 sci_out(port, SCSMR, smr_val);
1545 dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
1550 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
1553 sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
1555 sci_out(port, SCBRR, t);
1556 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1559 sci_init_pins(port, termios->c_cflag);
1560 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1562 sci_out(port, SCSCR, s->cfg->scscr);
1564 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1566 * Calculate delay for 1.5 DMA buffers: see
1567 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1568 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1569 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1570 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1571 * sizes), but it has been found out experimentally, that this is not
1572 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1573 * as a minimum seem to work perfectly.
1576 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1579 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1580 s->rx_timeout * 1000 / HZ, port->timeout);
1581 if (s->rx_timeout < msecs_to_jiffies(20))
1582 s->rx_timeout = msecs_to_jiffies(20);
1586 if ((termios->c_cflag & CREAD) != 0)
1593 static const char *sci_type(struct uart_port *port)
1595 switch (port->type) {
1611 static inline unsigned long sci_port_size(struct uart_port *port)
1614 * Pick an arbitrary size that encapsulates all of the base
1615 * registers by default. This can be optimized later, or derived
1616 * from platform resource data at such a time that ports begin to
1617 * behave more erratically.
1622 static int sci_remap_port(struct uart_port *port)
1624 unsigned long size = sci_port_size(port);
1627 * Nothing to do if there's already an established membase.
1632 if (port->flags & UPF_IOREMAP) {
1633 port->membase = ioremap_nocache(port->mapbase, size);
1634 if (unlikely(!port->membase)) {
1635 dev_err(port->dev, "can't remap port#%d\n", port->line);
1640 * For the simple (and majority of) cases where we don't
1641 * need to do any remapping, just cast the cookie
1644 port->membase = (void __iomem *)port->mapbase;
1650 static void sci_release_port(struct uart_port *port)
1652 if (port->flags & UPF_IOREMAP) {
1653 iounmap(port->membase);
1654 port->membase = NULL;
1657 release_mem_region(port->mapbase, sci_port_size(port));
1660 static int sci_request_port(struct uart_port *port)
1662 unsigned long size = sci_port_size(port);
1663 struct resource *res;
1666 res = request_mem_region(port->mapbase, size, dev_name(port->dev));
1667 if (unlikely(res == NULL))
1670 ret = sci_remap_port(port);
1671 if (unlikely(ret != 0)) {
1672 release_resource(res);
1679 static void sci_config_port(struct uart_port *port, int flags)
1681 if (flags & UART_CONFIG_TYPE) {
1682 struct sci_port *sport = to_sci_port(port);
1684 port->type = sport->cfg->type;
1685 sci_request_port(port);
1689 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1691 struct sci_port *s = to_sci_port(port);
1693 if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1695 if (ser->baud_base < 2400)
1696 /* No paper tape reader for Mitch.. */
1702 static struct uart_ops sci_uart_ops = {
1703 .tx_empty = sci_tx_empty,
1704 .set_mctrl = sci_set_mctrl,
1705 .get_mctrl = sci_get_mctrl,
1706 .start_tx = sci_start_tx,
1707 .stop_tx = sci_stop_tx,
1708 .stop_rx = sci_stop_rx,
1709 .enable_ms = sci_enable_ms,
1710 .break_ctl = sci_break_ctl,
1711 .startup = sci_startup,
1712 .shutdown = sci_shutdown,
1713 .set_termios = sci_set_termios,
1715 .release_port = sci_release_port,
1716 .request_port = sci_request_port,
1717 .config_port = sci_config_port,
1718 .verify_port = sci_verify_port,
1719 #ifdef CONFIG_CONSOLE_POLL
1720 .poll_get_char = sci_poll_get_char,
1721 .poll_put_char = sci_poll_put_char,
1725 static int __devinit sci_init_single(struct platform_device *dev,
1726 struct sci_port *sci_port,
1728 struct plat_sci_port *p)
1730 struct uart_port *port = &sci_port->port;
1732 port->ops = &sci_uart_ops;
1733 port->iotype = UPIO_MEM;
1738 port->fifosize = 256;
1741 port->fifosize = 64;
1744 port->fifosize = 16;
1752 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1753 if (IS_ERR(sci_port->iclk)) {
1754 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1755 if (IS_ERR(sci_port->iclk)) {
1756 dev_err(&dev->dev, "can't get iclk\n");
1757 return PTR_ERR(sci_port->iclk);
1762 * The function clock is optional, ignore it if we can't
1765 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1766 if (IS_ERR(sci_port->fclk))
1767 sci_port->fclk = NULL;
1769 sci_port->enable = sci_clk_enable;
1770 sci_port->disable = sci_clk_disable;
1771 port->dev = &dev->dev;
1773 pm_runtime_enable(&dev->dev);
1776 sci_port->break_timer.data = (unsigned long)sci_port;
1777 sci_port->break_timer.function = sci_break_timer;
1778 init_timer(&sci_port->break_timer);
1782 port->mapbase = p->mapbase;
1783 port->type = p->type;
1784 port->flags = p->flags;
1787 * The UART port needs an IRQ value, so we peg this to the TX IRQ
1788 * for the multi-IRQ ports, which is where we are primarily
1789 * concerned with the shutdown path synchronization.
1791 * For the muxed case there's nothing more to do.
1793 port->irq = p->irqs[SCIx_RXI_IRQ];
1796 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
1797 p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
1802 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1803 static void serial_console_putchar(struct uart_port *port, int ch)
1805 sci_poll_put_char(port, ch);
1809 * Print a string to the serial port trying not to disturb
1810 * any possible real use of the port...
1812 static void serial_console_write(struct console *co, const char *s,
1815 struct sci_port *sci_port = &sci_ports[co->index];
1816 struct uart_port *port = &sci_port->port;
1817 unsigned short bits;
1819 if (sci_port->enable)
1820 sci_port->enable(port);
1822 uart_console_write(port, s, count, serial_console_putchar);
1824 /* wait until fifo is empty and last bit has been transmitted */
1825 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
1826 while ((sci_in(port, SCxSR) & bits) != bits)
1829 if (sci_port->disable)
1830 sci_port->disable(port);
1833 static int __devinit serial_console_setup(struct console *co, char *options)
1835 struct sci_port *sci_port;
1836 struct uart_port *port;
1844 * Refuse to handle any bogus ports.
1846 if (co->index < 0 || co->index >= SCI_NPORTS)
1849 sci_port = &sci_ports[co->index];
1850 port = &sci_port->port;
1853 * Refuse to handle uninitialized ports.
1858 ret = sci_remap_port(port);
1859 if (unlikely(ret != 0))
1862 if (sci_port->enable)
1863 sci_port->enable(port);
1866 uart_parse_options(options, &baud, &parity, &bits, &flow);
1868 ret = uart_set_options(port, co, baud, parity, bits, flow);
1869 #if defined(__H8300H__) || defined(__H8300S__)
1870 /* disable rx interrupt */
1874 /* TODO: disable clock */
1878 static struct console serial_console = {
1880 .device = uart_console_device,
1881 .write = serial_console_write,
1882 .setup = serial_console_setup,
1883 .flags = CON_PRINTBUFFER,
1885 .data = &sci_uart_driver,
1888 static struct console early_serial_console = {
1889 .name = "early_ttySC",
1890 .write = serial_console_write,
1891 .flags = CON_PRINTBUFFER,
1895 static char early_serial_buf[32];
1897 static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1899 struct plat_sci_port *cfg = pdev->dev.platform_data;
1901 if (early_serial_console.data)
1904 early_serial_console.index = pdev->id;
1906 sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
1908 serial_console_setup(&early_serial_console, early_serial_buf);
1910 if (!strstr(early_serial_buf, "keep"))
1911 early_serial_console.flags |= CON_BOOT;
1913 register_console(&early_serial_console);
1917 #define SCI_CONSOLE (&serial_console)
1920 static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1925 #define SCI_CONSOLE NULL
1927 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1929 static char banner[] __initdata =
1930 KERN_INFO "SuperH SCI(F) driver initialized\n";
1932 static struct uart_driver sci_uart_driver = {
1933 .owner = THIS_MODULE,
1934 .driver_name = "sci",
1935 .dev_name = "ttySC",
1937 .minor = SCI_MINOR_START,
1939 .cons = SCI_CONSOLE,
1942 static int sci_remove(struct platform_device *dev)
1944 struct sci_port *port = platform_get_drvdata(dev);
1946 cpufreq_unregister_notifier(&port->freq_transition,
1947 CPUFREQ_TRANSITION_NOTIFIER);
1949 uart_remove_one_port(&sci_uart_driver, &port->port);
1951 clk_put(port->iclk);
1952 clk_put(port->fclk);
1954 pm_runtime_disable(&dev->dev);
1958 static int __devinit sci_probe_single(struct platform_device *dev,
1960 struct plat_sci_port *p,
1961 struct sci_port *sciport)
1966 if (unlikely(index >= SCI_NPORTS)) {
1967 dev_notice(&dev->dev, "Attempting to register port "
1968 "%d when only %d are available.\n",
1969 index+1, SCI_NPORTS);
1970 dev_notice(&dev->dev, "Consider bumping "
1971 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1975 ret = sci_init_single(dev, sciport, index, p);
1979 return uart_add_one_port(&sci_uart_driver, &sciport->port);
1982 static int __devinit sci_probe(struct platform_device *dev)
1984 struct plat_sci_port *p = dev->dev.platform_data;
1985 struct sci_port *sp = &sci_ports[dev->id];
1989 * If we've come here via earlyprintk initialization, head off to
1990 * the special early probe. We don't have sufficient device state
1991 * to make it beyond this yet.
1993 if (is_early_platform_device(dev))
1994 return sci_probe_earlyprintk(dev);
1996 platform_set_drvdata(dev, sp);
1998 ret = sci_probe_single(dev, dev->id, p, sp);
2002 sp->freq_transition.notifier_call = sci_notifier;
2004 ret = cpufreq_register_notifier(&sp->freq_transition,
2005 CPUFREQ_TRANSITION_NOTIFIER);
2006 if (unlikely(ret < 0))
2009 #ifdef CONFIG_SH_STANDARD_BIOS
2010 sh_bios_gdb_detach();
2020 static int sci_suspend(struct device *dev)
2022 struct sci_port *sport = dev_get_drvdata(dev);
2025 uart_suspend_port(&sci_uart_driver, &sport->port);
2030 static int sci_resume(struct device *dev)
2032 struct sci_port *sport = dev_get_drvdata(dev);
2035 uart_resume_port(&sci_uart_driver, &sport->port);
2040 static const struct dev_pm_ops sci_dev_pm_ops = {
2041 .suspend = sci_suspend,
2042 .resume = sci_resume,
2045 static struct platform_driver sci_driver = {
2047 .remove = sci_remove,
2050 .owner = THIS_MODULE,
2051 .pm = &sci_dev_pm_ops,
2055 static int __init sci_init(void)
2061 ret = uart_register_driver(&sci_uart_driver);
2062 if (likely(ret == 0)) {
2063 ret = platform_driver_register(&sci_driver);
2065 uart_unregister_driver(&sci_uart_driver);
2071 static void __exit sci_exit(void)
2073 platform_driver_unregister(&sci_driver);
2074 uart_unregister_driver(&sci_uart_driver);
2077 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2078 early_platform_init_buffer("earlyprintk", &sci_driver,
2079 early_serial_buf, ARRAY_SIZE(early_serial_buf));
2081 module_init(sci_init);
2082 module_exit(sci_exit);
2084 MODULE_LICENSE("GPL");
2085 MODULE_ALIAS("platform:sh-sci");