summaryrefslogtreecommitdiff
path: root/drivers/tty/serial/fsl_lpuart.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/serial/fsl_lpuart.c')
-rw-r--r--drivers/tty/serial/fsl_lpuart.c1324
1 files changed, 959 insertions, 365 deletions
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 5b6093dc3ff2..f588081e73a1 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2,6 +2,7 @@
* Freescale lpuart serial port driver
*
* Copyright 2012-2014 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,6 +16,7 @@
#include <linux/clk.h>
#include <linux/console.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
@@ -24,6 +26,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
@@ -118,20 +122,21 @@
#define UARTSFIFO_RXUF 0x01
/* 32-bit register defination */
-#define UARTBAUD 0x00
-#define UARTSTAT 0x04
-#define UARTCTRL 0x08
-#define UARTDATA 0x0C
-#define UARTMATCH 0x10
-#define UARTMODIR 0x14
-#define UARTFIFO 0x18
-#define UARTWATER 0x1c
+#define UARTBAUD 0x10
+#define UARTSTAT 0x14
+#define UARTCTRL 0x18
+#define UARTDATA 0x1C
+#define UARTMATCH 0x20
+#define UARTMODIR 0x24
+#define UARTFIFO 0x28
+#define UARTWATER 0x2c
#define UARTBAUD_MAEN1 0x80000000
#define UARTBAUD_MAEN2 0x40000000
#define UARTBAUD_M10 0x20000000
#define UARTBAUD_TDMAE 0x00800000
#define UARTBAUD_RDMAE 0x00200000
+#define UARTBAUD_RIDMAE 0x00100000
#define UARTBAUD_MATCFG 0x00400000
#define UARTBAUD_BOTHEDGE 0x00020000
#define UARTBAUD_RESYNCDIS 0x00010000
@@ -140,6 +145,8 @@
#define UARTBAUD_SBNS 0x00002000
#define UARTBAUD_SBR 0x00000000
#define UARTBAUD_SBR_MASK 0x1fff
+#define UARTBAUD_OSR_MASK 0x1f
+#define UARTBAUD_OSR_SHIFT 24
#define UARTSTAT_LBKDIF 0x80000000
#define UARTSTAT_RXEDGIF 0x40000000
@@ -178,7 +185,7 @@
#define UARTCTRL_SBK 0x00010000
#define UARTCTRL_MA1IE 0x00008000
#define UARTCTRL_MA2IE 0x00004000
-#define UARTCTRL_IDLECFG 0x00000100
+#define UARTCTRL_IDLECFG_OFF 0x8
#define UARTCTRL_LOOPS 0x00000080
#define UARTCTRL_DOZEEN 0x00000040
#define UARTCTRL_RSRC 0x00000020
@@ -193,9 +200,12 @@
#define UARTDATA_FRETSC 0x00002000
#define UARTDATA_RXEMPT 0x00001000
#define UARTDATA_IDLINE 0x00000800
+#define UARTDATA_INVALID 0x0000F000
#define UARTDATA_MASK 0x3ff
#define UARTMODIR_IREN 0x00020000
+#define UARTMODIR_RTSWATER_S 0x8
+#define UARTMODIR_RTSWATER_M 0x0000ff00
#define UARTMODIR_TXCTSSRC 0x00000020
#define UARTMODIR_TXCTSC 0x00000010
#define UARTMODIR_RXRTSE 0x00000008
@@ -209,6 +219,8 @@
#define UARTFIFO_RXUF 0x00010000
#define UARTFIFO_TXFLUSH 0x00008000
#define UARTFIFO_RXFLUSH 0x00004000
+#define UARTFIFO_RXIDEN_MASK 0x7
+#define UARTFIFO_RXIDEN_OFF 10
#define UARTFIFO_TXOFE 0x00000200
#define UARTFIFO_RXUFE 0x00000100
#define UARTFIFO_TXFE 0x00000080
@@ -224,8 +236,10 @@
#define UARTWATER_TXWATER_OFF 0
#define UARTWATER_RXWATER_OFF 16
-/* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
-#define DMA_RX_TIMEOUT (10)
+#define UARTFIFO_RXIDEN_RDRF 0x3
+#define UARTCTRL_IDLECFG 0x7
+#define FSL_UART_RX_DMA_BUFFER_SIZE 128
+#define UART_AUTOSUSPEND_TIMEOUT 3000
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
@@ -233,27 +247,35 @@
struct lpuart_port {
struct uart_port port;
- struct clk *clk;
+ struct clk *ipg_clk;
+ struct clk *per_clk;
unsigned int txfifo_size;
unsigned int rxfifo_size;
+ unsigned int txfifo_watermark;
+ unsigned int rxfifo_watermark;
+ unsigned int rts_watermark;
bool lpuart32;
+ bool dma_eeop;
bool lpuart_dma_tx_use;
bool lpuart_dma_rx_use;
+ bool dma_rx_chan_active;
struct dma_chan *dma_tx_chan;
struct dma_chan *dma_rx_chan;
struct dma_async_tx_descriptor *dma_tx_desc;
struct dma_async_tx_descriptor *dma_rx_desc;
+ dma_addr_t dma_rx_buf_bus;
dma_cookie_t dma_tx_cookie;
dma_cookie_t dma_rx_cookie;
+ unsigned char *dma_rx_buf_virt;
unsigned int dma_tx_bytes;
unsigned int dma_rx_bytes;
+ size_t rxdma_len;
bool dma_tx_in_progress;
+ bool dma_rx_in_progress;
unsigned int dma_rx_timeout;
struct timer_list lpuart_timer;
struct scatterlist rx_sgl, tx_sgl[2];
- struct circ_buf rx_ring;
- int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
};
@@ -265,21 +287,29 @@ static const struct of_device_id lpuart_dt_ids[] = {
{
.compatible = "fsl,ls1021a-lpuart",
},
+ {
+ .compatible = "fsl,imx7ulp-lpuart",
+ },
+ {
+ .compatible = "fsl,imx8qm-lpuart",
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
/* Forward declare this for the dma callbacks*/
+static int lpuart_dma_rx(struct lpuart_port *sport);
static void lpuart_dma_tx_complete(void *arg);
+static inline void lpuart_prepare_rx(struct lpuart_port *sport);
static u32 lpuart32_read(void __iomem *addr)
{
- return ioread32be(addr);
+ return readl(addr);
}
static void lpuart32_write(u32 val, void __iomem *addr)
{
- iowrite32be(val, addr);
+ writel(val, addr);
}
static void lpuart_stop_tx(struct uart_port *port)
@@ -316,19 +346,46 @@ static void lpuart32_stop_rx(struct uart_port *port)
lpuart32_write(temp & ~UARTCTRL_RE, port->membase + UARTCTRL);
}
+static void lpuart_recal_min_trans_size(struct lpuart_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ u32 txcount, rxcount;
+
+ sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
+
+ /* lpuart32 and loopback mode re-calculate the trans size */
+ if (!sport->lpuart32 || !(sport->port.mctrl & TIOCM_LOOP))
+ return;
+
+ txcount = lpuart32_read(sport->port.membase + UARTWATER);
+ txcount = txcount >> UARTWATER_TXCNT_OFF;
+ txcount &= UARTWATER_COUNT_MASK;
+ rxcount = lpuart32_read(sport->port.membase + UARTWATER);
+ rxcount = rxcount >> UARTWATER_RXCNT_OFF;
+ txcount = min_t(unsigned int, sport->txfifo_size - txcount,
+ sport->rxfifo_size - rxcount);
+ sport->dma_tx_bytes = min_t(unsigned int, txcount, sport->dma_tx_bytes);
+}
+
static void lpuart_dma_tx(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct device *dev = sport->port.dev;
+ u32 toend_cnt;
int ret;
if (sport->dma_tx_in_progress)
return;
- sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
+ lpuart_recal_min_trans_size(sport);
+ if (!sport->dma_tx_bytes)
+ return;
+
+ toend_cnt = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- if (xmit->tail < xmit->head || xmit->head == 0) {
+ if (xmit->tail < xmit->head || xmit->head == 0 ||
+ (sport->port.mctrl & TIOCM_LOOP && sport->dma_tx_bytes <= toend_cnt)) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
} else {
@@ -336,7 +393,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sg_init_table(sgl, 2);
sg_set_buf(sgl, xmit->buf + xmit->tail,
UART_XMIT_SIZE - xmit->tail);
- sg_set_buf(sgl + 1, xmit->buf, xmit->head);
+ sg_set_buf(sgl + 1, xmit->buf, sport->dma_tx_bytes -
+ (UART_XMIT_SIZE - xmit->tail));
}
ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
@@ -369,6 +427,10 @@ static void lpuart_dma_tx_complete(void *arg)
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
+ if (!sport->dma_tx_in_progress) {
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return;
+ }
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
@@ -376,18 +438,16 @@ static void lpuart_dma_tx_complete(void *arg)
sport->port.icount.tx += sport->dma_tx_bytes;
sport->dma_tx_in_progress = false;
- spin_unlock_irqrestore(&sport->port.lock, flags);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
return;
}
- spin_lock_irqsave(&sport->port.lock, flags);
-
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
lpuart_dma_tx(sport);
@@ -401,15 +461,18 @@ static int lpuart_dma_tx_request(struct uart_port *port)
struct dma_slave_config dma_tx_sconfig = {};
int ret;
- dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
+ if (sport->lpuart32)
+ dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDATA;
+ else
+ dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_tx_sconfig.dst_maxburst = 1;
dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
- if (ret) {
+ if (ret < 0) {
dev_err(sport->port.dev,
- "DMA slave config failed, err = %d\n", ret);
+ "Dma slave config failed, err = %d\n", ret);
return ret;
}
@@ -419,6 +482,7 @@ static int lpuart_dma_tx_request(struct uart_port *port)
static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ u32 val;
if (sport->lpuart_dma_tx_use) {
if (sport->dma_tx_in_progress) {
@@ -428,6 +492,16 @@ static void lpuart_flush_buffer(struct uart_port *port)
}
dmaengine_terminate_all(sport->dma_tx_chan);
}
+
+ if (sport->lpuart32) {
+ val = lpuart32_read(sport->port.membase + UARTFIFO);
+ val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ lpuart32_write(val, sport->port.membase + UARTFIFO);
+ } else {
+ val = readb(sport->port.membase + UARTPFIFO);
+ val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
+ writeb(val, sport->port.membase + UARTCFIFO);
+ }
}
static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
@@ -479,13 +553,13 @@ static void lpuart_start_tx(struct uart_port *port)
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned char temp;
- temp = readb(port->membase + UARTCR2);
- writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
-
if (sport->lpuart_dma_tx_use) {
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port))
lpuart_dma_tx(sport);
} else {
+ temp = readb(port->membase + UARTCR2);
+ writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
+
if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
lpuart_transmit_buffer(sport);
}
@@ -494,13 +568,33 @@ static void lpuart_start_tx(struct uart_port *port)
static void lpuart32_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long temp;
- temp = lpuart32_read(port->membase + UARTCTRL);
- lpuart32_write(temp | UARTCTRL_TIE, port->membase + UARTCTRL);
+ if (sport->lpuart_dma_tx_use) {
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(port))
+ lpuart_dma_tx(sport);
+ } else {
+ temp = lpuart32_read(port->membase + UARTCTRL);
+ lpuart32_write(temp | UARTCTRL_TIE, port->membase + UARTCTRL);
+ if (lpuart32_read(sport->port.membase + UARTSTAT) &
+ UARTSTAT_TDRE)
+ lpuart32_transmit_buffer(sport);
+ }
+}
- if (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TDRE)
- lpuart32_transmit_buffer(sport);
+static void
+lpuart_uart_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
+{
+ switch (state) {
+ case UART_PM_STATE_OFF:
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+ break;
+ default:
+ pm_runtime_get_sync(port->dev);
+ break;
+ }
}
/* return TIOCSER_TEMT when transmitter is not busy */
@@ -522,8 +616,18 @@ static unsigned int lpuart_tx_empty(struct uart_port *port)
static unsigned int lpuart32_tx_empty(struct uart_port *port)
{
- return (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TC) ?
- TIOCSER_TEMT : 0;
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ unsigned int sr1 = lpuart32_read(port->membase + UARTSTAT);
+ unsigned int sfifo = lpuart32_read(sport->port.membase + UARTFIFO);
+
+ if (sport->dma_tx_in_progress)
+ return 0;
+
+ if (sr1 & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
+ return TIOCSER_TEMT;
+
+ return 0;
}
static irqreturn_t lpuart_txint(int irq, void *dev_id)
@@ -638,13 +742,19 @@ static irqreturn_t lpuart32_rxint(int irq, void *dev_id)
while (!(lpuart32_read(sport->port.membase + UARTFIFO) & UARTFIFO_RXEMPT)) {
flg = TTY_NORMAL;
sport->port.icount.rx++;
+
/*
* to clear the FE, OR, NF, FE, PE flags,
* read STAT then read DATA reg
*/
sr = lpuart32_read(sport->port.membase + UARTSTAT);
rx = lpuart32_read(sport->port.membase + UARTDATA);
- rx &= 0x3ff;
+
+ if ((sr & UARTSTAT_FE) && (rx & UARTDATA_FRETSC) &&
+ !(rx & UARTDATA_MASK)) {
+ if (uart_handle_break(&sport->port))
+ continue;
+ }
if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
continue;
@@ -677,8 +787,13 @@ static irqreturn_t lpuart32_rxint(int irq, void *dev_id)
#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
#endif
+ continue;
}
+ if (rx & UARTDATA_INVALID)
+ continue;
+
+ rx &= UARTDATA_MASK;
tty_insert_flip_char(port, rx, flg);
}
@@ -692,14 +807,19 @@ out:
static irqreturn_t lpuart_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned char sts;
+ unsigned char sts, crdma;
sts = readb(sport->port.membase + UARTSR1);
+ crdma = readb(sport->port.membase + UARTCR5);
- if (sts & UARTSR1_RDRF)
- lpuart_rxint(irq, dev_id);
+ if (sts & UARTSR1_RDRF && !(crdma & UARTCR5_RDMAS)) {
+ if (sport->lpuart_dma_rx_use)
+ lpuart_prepare_rx(sport);
+ else
+ lpuart_rxint(irq, dev_id);
+ }
- if (sts & UARTSR1_TDRE)
+ if (sts & UARTSR1_TDRE && !sport->lpuart_dma_tx_use)
lpuart_txint(irq, dev_id);
return IRQ_HANDLED;
@@ -708,196 +828,164 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
static irqreturn_t lpuart32_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned long sts, rxcount;
+ unsigned long sts, rxcount, crdma;
sts = lpuart32_read(sport->port.membase + UARTSTAT);
rxcount = lpuart32_read(sport->port.membase + UARTWATER);
rxcount = rxcount >> UARTWATER_RXCNT_OFF;
+ crdma = lpuart32_read(sport->port.membase + UARTBAUD);
+
+ if (!sts)
+ return IRQ_NONE;
+
+ if (!(crdma & UARTBAUD_RDMAE) && rxcount > 0) {
+ if (!sport->lpuart_dma_rx_use ||
+ (sts & (UARTSTAT_PE | UARTSTAT_NF | UARTSTAT_FE)))
+ lpuart32_rxint(irq, dev_id);
+ else if (sport->lpuart_dma_rx_use && sport->dma_rx_chan_active)
+ lpuart_prepare_rx(sport);
+ } else if (!(crdma & UARTBAUD_RDMAE) && (sts & UARTSTAT_IDLE) &&
+ !(sport->lpuart_dma_rx_use && sport->dma_eeop &&
+ rxcount > 0)) {
+ lpuart32_write(UARTSTAT_IDLE, sport->port.membase + UARTSTAT);
+ }
- if (sts & UARTSTAT_RDRF || rxcount > 0)
- lpuart32_rxint(irq, dev_id);
-
- if ((sts & UARTSTAT_TDRE) &&
- !(lpuart32_read(sport->port.membase + UARTBAUD) & UARTBAUD_TDMAE))
+ if (sts & UARTSTAT_TDRE && !sport->lpuart_dma_tx_use)
lpuart_txint(irq, dev_id);
+ sts &= ~UARTSTAT_IDLE;
lpuart32_write(sts, sport->port.membase + UARTSTAT);
return IRQ_HANDLED;
}
-static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+static void lpuart_copy_rx_to_tty(struct lpuart_port *sport,
+ struct tty_port *tty, int count)
{
- struct tty_port *port = &sport->port.state->port;
- struct dma_tx_state state;
- enum dma_status dmastat;
- struct circ_buf *ring = &sport->rx_ring;
- unsigned long flags;
- int count = 0;
- unsigned char sr;
-
- sr = readb(sport->port.membase + UARTSR1);
+ int copied;
- if (sr & (UARTSR1_PE | UARTSR1_FE)) {
- /* Read DR to clear the error flags */
- readb(sport->port.membase + UARTDR);
-
- if (sr & UARTSR1_PE)
- sport->port.icount.parity++;
- else if (sr & UARTSR1_FE)
- sport->port.icount.frame++;
- }
-
- async_tx_ack(sport->dma_rx_desc);
+ sport->port.icount.rx += count;
- spin_lock_irqsave(&sport->port.lock, flags);
-
- dmastat = dmaengine_tx_status(sport->dma_rx_chan,
- sport->dma_rx_cookie,
- &state);
-
- if (dmastat == DMA_ERROR) {
- dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ if (!tty) {
+ dev_err(sport->port.dev, "No tty port\n");
return;
}
- /* CPU claims ownership of RX DMA buffer */
- dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(sport->port.dev, sport->dma_rx_buf_bus,
+ sport->rxdma_len, DMA_FROM_DEVICE);
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(sport->dma_rx_buf_virt)), count);
- /*
- * ring->head points to the end of data already written by the DMA.
- * ring->tail points to the beginning of data to be read by the
- * framework.
- * The current transfer size should not be larger than the dma buffer
- * length.
- */
- ring->head = sport->rx_sgl.length - state.residue;
- BUG_ON(ring->head > sport->rx_sgl.length);
- /*
- * At this point ring->head may point to the first byte right after the
- * last byte of the dma buffer:
- * 0 <= ring->head <= sport->rx_sgl.length
- *
- * However ring->tail must always points inside the dma buffer:
- * 0 <= ring->tail <= sport->rx_sgl.length - 1
- *
- * Since we use a ring buffer, we have to handle the case
- * where head is lower than tail. In such a case, we first read from
- * tail to the end of the buffer then reset tail.
- */
- if (ring->head < ring->tail) {
- count = sport->rx_sgl.length - ring->tail;
+ if (copied != count)
+ sport->port.icount.buf_overrun += count - copied;
+ sport->port.icount.rx += copied;
+}
- tty_insert_flip_string(port, ring->buf + ring->tail, count);
- ring->tail = 0;
- sport->port.icount.rx += count;
- }
+static void lpuart_dma_stop(struct lpuart_port *sport, bool enable_pio)
+{
+ unsigned int temp;
+ unsigned int crdma;
- /* Finally we read data from tail to head */
- if (ring->tail < ring->head) {
- count = ring->head - ring->tail;
- tty_insert_flip_string(port, ring->buf + ring->tail, count);
- /* Wrap ring->head if needed */
- if (ring->head >= sport->rx_sgl.length)
- ring->head = 0;
- ring->tail = ring->head;
- sport->port.icount.rx += count;
+ if (sport->lpuart32) {
+ lpuart32_write(UARTSTAT_IDLE, sport->port.membase + UARTSTAT);
+ crdma = lpuart32_read(sport->port.membase + UARTBAUD);
+ lpuart32_write(crdma & ~(UARTBAUD_RDMAE | UARTBAUD_RIDMAE),
+ sport->port.membase + UARTBAUD);
+ if (enable_pio) {
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp |= (UARTCTRL_RIE | UARTCTRL_ILIE);
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ }
+ } else {
+ temp = readb(sport->port.membase + UARTCR5);
+ writeb(temp & ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
}
+}
- dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
- DMA_FROM_DEVICE);
+static void lpuart_dma_rx_complete(void *arg)
+{
+ struct lpuart_port *sport = arg;
+ struct tty_port *port = &sport->port.state->port;
+ unsigned long flags;
+ struct dma_tx_state state;
+ int count, rxcount;
+
+ if (!sport->dma_eeop)
+ mod_timer(&sport->lpuart_timer,
+ jiffies + sport->dma_rx_timeout);
+ spin_lock_irqsave(&sport->port.lock, flags);
+ sport->dma_rx_in_progress = false;
+ dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state);
+ count = sport->rxdma_len - state.residue;
spin_unlock_irqrestore(&sport->port.lock, flags);
+ lpuart_copy_rx_to_tty(sport, port, count);
tty_flip_buffer_push(port);
- mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
-}
-static void lpuart_dma_rx_complete(void *arg)
-{
- struct lpuart_port *sport = arg;
+ spin_lock_irqsave(&sport->port.lock, flags);
- lpuart_copy_rx_to_tty(sport);
-}
+ /* For end of packet, clear the idle flag to avoid to trigger
+ * the next transfer. Only i.MX8x lpuart support EEOP.
+ */
+ if (sport->dma_eeop && sport->lpuart32) {
+ rxcount = lpuart32_read(sport->port.membase + UARTWATER);
+ rxcount = rxcount >> UARTWATER_RXCNT_OFF;
+ if (!rxcount)
+ lpuart32_write(UARTSTAT_IDLE,
+ sport->port.membase + UARTSTAT);
+ }
-static void lpuart_timer_func(unsigned long data)
-{
- struct lpuart_port *sport = (struct lpuart_port *)data;
+ if (!sport->dma_eeop && count < sport->rxfifo_watermark)
+ lpuart_dma_stop(sport, true);
+ else
+ lpuart_dma_rx(sport);
- lpuart_copy_rx_to_tty(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
-static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
+static void lpuart_timer_func(unsigned long data)
{
- struct dma_slave_config dma_rx_sconfig = {};
- struct circ_buf *ring = &sport->rx_ring;
- int ret, nent;
- int bits, baud;
+ struct lpuart_port *sport = (struct lpuart_port *)data;
struct tty_port *port = &sport->port.state->port;
- struct tty_struct *tty = port->tty;
- struct ktermios *termios = &tty->termios;
-
- baud = tty_get_baud_rate(tty);
+ struct dma_tx_state state;
+ unsigned long flags;
+ int count;
- bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10;
- if (termios->c_cflag & PARENB)
- bits++;
+ del_timer(&sport->lpuart_timer);
+ dmaengine_pause(sport->dma_rx_chan);
+ dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state);
+ dmaengine_terminate_all(sport->dma_rx_chan);
+ count = sport->rxdma_len - state.residue;
- /*
- * Calculate length of one DMA buffer size to keep latency below
- * 10ms at any baud rate.
- */
- sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
- sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
- if (sport->rx_dma_rng_buf_len < 16)
- sport->rx_dma_rng_buf_len = 16;
-
- ring->buf = kmalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
- if (!ring->buf) {
- dev_err(sport->port.dev, "Ring buf alloc failed\n");
- return -ENOMEM;
- }
-
- sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
- sg_set_buf(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
- nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ spin_lock_irqsave(&sport->port.lock, flags);
- if (!nent) {
- dev_err(sport->port.dev, "DMA Rx mapping error\n");
- return -EINVAL;
- }
+ sport->dma_rx_in_progress = false;
+ lpuart_copy_rx_to_tty(sport, port, count);
+ tty_flip_buffer_push(port);
+ lpuart_dma_stop(sport, true);
- dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
- dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- dma_rx_sconfig.src_maxburst = 1;
- dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
- if (ret < 0) {
- dev_err(sport->port.dev,
- "DMA Rx slave config failed, err = %d\n", ret);
- return ret;
- }
+static int lpuart_dma_rx(struct lpuart_port *sport)
+{
+ dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
+ sport->rxdma_len, DMA_FROM_DEVICE);
+ sport->dma_rx_desc = dmaengine_prep_slave_single(sport->dma_rx_chan,
+ sport->dma_rx_buf_bus, sport->rxdma_len,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
- sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
- sg_dma_address(&sport->rx_sgl),
- sport->rx_sgl.length,
- sport->rx_sgl.length / 2,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
if (!sport->dma_rx_desc) {
- dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
- return -EFAULT;
+ dev_err(sport->port.dev, "Not able to get desc for rx\n");
+ return -EIO;
}
sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
sport->dma_rx_desc->callback_param = sport;
+ sport->dma_rx_in_progress = true;
sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
dma_async_issue_pending(sport->dma_rx_chan);
- writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
- sport->port.membase + UARTCR5);
-
return 0;
}
@@ -906,15 +994,44 @@ static void lpuart_dma_rx_free(struct uart_port *port)
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- if (sport->dma_rx_chan)
- dmaengine_terminate_all(sport->dma_rx_chan);
+ sport->dma_rx_chan_active = false;
+ dma_unmap_single(sport->port.dev, sport->dma_rx_buf_bus,
+ sport->rxdma_len, DMA_FROM_DEVICE);
- dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
- kfree(sport->rx_ring.buf);
- sport->rx_ring.tail = 0;
- sport->rx_ring.head = 0;
- sport->dma_rx_desc = NULL;
- sport->dma_rx_cookie = -EINVAL;
+ devm_kfree(sport->port.dev, sport->dma_rx_buf_virt);
+ sport->dma_rx_buf_bus = 0;
+ sport->dma_rx_buf_virt = NULL;
+}
+
+static inline void lpuart_prepare_rx(struct lpuart_port *sport)
+{
+ unsigned long flags;
+ unsigned int temp;
+ unsigned int crdma;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ if (!sport->dma_eeop) {
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
+ }
+
+ lpuart_dma_rx(sport);
+ if (sport->lpuart32) {
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE);
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ crdma = lpuart32_read(sport->port.membase + UARTBAUD);
+ if (sport->dma_eeop)
+ crdma |= UARTBAUD_RIDMAE;
+ lpuart32_write(crdma | UARTBAUD_RDMAE,
+ sport->port.membase + UARTBAUD);
+ } else {
+ temp = readb(sport->port.membase + UARTCR5);
+ writeb(temp | UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+ }
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
static int lpuart_config_rs485(struct uart_port *port,
@@ -990,11 +1107,15 @@ static unsigned int lpuart32_get_mctrl(struct uart_port *port)
if (reg & UARTMODIR_RXRTSE)
temp |= TIOCM_RTS;
+ if (lpuart32_read(port->membase + UARTCTRL) & UARTCTRL_LOOPS)
+ temp |= TIOCM_LOOP;
+
return temp;
}
static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ /* No flow control for user handle */
unsigned char temp;
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
@@ -1018,16 +1139,13 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned long temp;
- temp = lpuart32_read(port->membase + UARTMODIR) &
- ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
-
- if (mctrl & TIOCM_RTS)
- temp |= UARTMODIR_RXRTSE;
-
- if (mctrl & TIOCM_CTS)
- temp |= UARTMODIR_TXCTSE;
+ temp = lpuart32_read(port->membase + UARTCTRL);
+ if (mctrl & TIOCM_LOOP)
+ temp |= UARTCTRL_LOOPS;
+ else
+ temp &= ~UARTCTRL_LOOPS;
- lpuart32_write(temp, port->membase + UARTMODIR);
+ lpuart32_write(temp, port->membase + UARTCTRL);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
@@ -1090,6 +1208,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
{
unsigned long val, ctrl;
unsigned long ctrl_saved;
+ unsigned long rxiden_cnt = UARTFIFO_RXIDEN_RDRF;
ctrl = lpuart32_read(sport->port.membase + UARTCTRL);
ctrl_saved = ctrl;
@@ -1101,28 +1220,82 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
val = lpuart32_read(sport->port.membase + UARTFIFO);
val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ val &= ~(UARTFIFO_RXIDEN_MASK << UARTFIFO_RXIDEN_OFF);
+ if (sport->dma_eeop)
+ rxiden_cnt = 0;
+ val |= ((rxiden_cnt & UARTFIFO_RXIDEN_MASK) <<
+ UARTFIFO_RXIDEN_OFF);
lpuart32_write(val, sport->port.membase + UARTFIFO);
/* set the watermark */
- val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
+ if (uart_console(&sport->port)) {
+ val = (0x1 << UARTWATER_RXWATER_OFF) |
+ (0x0 << UARTWATER_TXWATER_OFF);
+ } else {
+ val = lpuart32_read(sport->port.membase + UARTMODIR);
+ val = sport->rts_watermark << UARTMODIR_RTSWATER_S;
+ lpuart32_write(val, sport->port.membase + UARTMODIR);
+ val = (sport->rxfifo_watermark << UARTWATER_RXWATER_OFF) |
+ (sport->txfifo_watermark << UARTWATER_TXWATER_OFF);
+ }
lpuart32_write(val, sport->port.membase + UARTWATER);
/* Restore cr2 */
lpuart32_write(ctrl_saved, sport->port.membase + UARTCTRL);
}
-static void rx_dma_timer_init(struct lpuart_port *sport)
+static int lpuart_dma_rx_request(struct uart_port *port)
{
- setup_timer(&sport->lpuart_timer, lpuart_timer_func,
- (unsigned long)sport);
- sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
- add_timer(&sport->lpuart_timer);
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_slave_config dma_rx_sconfig;
+ dma_addr_t dma_bus;
+ unsigned char *dma_buf;
+ int ret;
+
+ dma_buf = devm_kzalloc(sport->port.dev,
+ sport->rxdma_len, GFP_KERNEL);
+
+ if (!dma_buf) {
+ dev_err(sport->port.dev, "Dma rx alloc failed\n");
+ return -ENOMEM;
+ }
+
+ dma_bus = dma_map_single(sport->port.dev, dma_buf,
+ sport->rxdma_len, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(sport->port.dev, dma_bus)) {
+ dev_err(sport->port.dev, "dma_map_single rx failed\n");
+ return -ENOMEM;
+ }
+
+ if (sport->lpuart32)
+ dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDATA;
+ else
+ dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
+
+ dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_rx_sconfig.src_maxburst = 1;
+ dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
+
+ if (ret < 0) {
+ dev_err(sport->port.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ return ret;
+ }
+
+ sport->dma_rx_buf_virt = dma_buf;
+ sport->dma_rx_buf_bus = dma_bus;
+ sport->dma_rx_in_progress = false;
+ sport->dma_rx_chan_active = true;
+
+ return 0;
}
static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- int ret;
unsigned long flags;
unsigned char temp;
@@ -1136,74 +1309,95 @@ static int lpuart_startup(struct uart_port *port)
sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
+ sport->rxdma_len = FSL_UART_RX_DMA_BUFFER_SIZE;
- ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
- DRIVER_NAME, sport);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- lpuart_setup_watermark(sport);
-
- temp = readb(sport->port.membase + UARTCR2);
- temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
- writeb(temp, sport->port.membase + UARTCR2);
-
- if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
- /* set Rx DMA timeout */
- sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
- if (!sport->dma_rx_timeout)
- sport->dma_rx_timeout = 1;
-
+ if (sport->dma_rx_chan && !lpuart_dma_rx_request(port)) {
sport->lpuart_dma_rx_use = true;
- rx_dma_timer_init(sport);
- } else {
+ setup_timer(&sport->lpuart_timer, lpuart_timer_func,
+ (unsigned long)sport);
+ } else
sport->lpuart_dma_rx_use = false;
- }
+
if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
init_waitqueue_head(&sport->dma_wait);
sport->lpuart_dma_tx_use = true;
temp = readb(port->membase + UARTCR5);
+ temp &= ~UARTCR5_RDMAS;
writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
- } else {
+ } else
sport->lpuart_dma_tx_use = false;
- }
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ lpuart_setup_watermark(sport);
+
+ temp = readb(sport->port.membase + UARTCR2);
+ temp |= (UARTCR2_RIE | UARTCR2_RE | UARTCR2_TE);
+ writeb(temp, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
return 0;
}
static int lpuart32_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct tty_port *tty_port = &sport->port.state->port;
int ret;
unsigned long flags;
unsigned long temp;
+ /* some modem may need reset */
+ if (!tty_port_suspended(tty_port)) {
+ ret = device_reset(sport->port.dev);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+
/* determine FIFO size */
temp = lpuart32_read(sport->port.membase + UARTFIFO);
sport->txfifo_size = 0x1 << (((temp >> UARTFIFO_TXSIZE_OFF) &
- UARTFIFO_FIFOSIZE_MASK) - 1);
+ UARTFIFO_FIFOSIZE_MASK) + 1);
+
+ sport->port.fifosize = sport->txfifo_size;
sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
- UARTFIFO_FIFOSIZE_MASK) - 1);
+ UARTFIFO_FIFOSIZE_MASK) + 1);
- ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0,
- DRIVER_NAME, sport);
- if (ret)
- return ret;
+ sport->txfifo_watermark = sport->txfifo_size >> 1;
+ sport->rxfifo_watermark = 1;
+ sport->rts_watermark = sport->rxfifo_size >> 1;
+ sport->rxdma_len = FSL_UART_RX_DMA_BUFFER_SIZE;
+
+ if (sport->dma_rx_chan && !lpuart_dma_rx_request(port)) {
+ sport->lpuart_dma_rx_use = true;
+ if (!sport->dma_eeop)
+ setup_timer(&sport->lpuart_timer,
+ lpuart_timer_func,
+ (unsigned long)sport);
+ } else
+ sport->lpuart_dma_rx_use = false;
+
+
+ if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ temp = lpuart32_read(sport->port.membase + UARTBAUD);
+ temp |= UARTBAUD_TDMAE;
+ lpuart32_write(temp, sport->port.membase + UARTBAUD);
+ } else
+ sport->lpuart_dma_tx_use = false;
spin_lock_irqsave(&sport->port.lock, flags);
lpuart32_setup_watermark(sport);
temp = lpuart32_read(sport->port.membase + UARTCTRL);
- temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | UARTCTRL_TE);
+ temp |= (UARTCTRL_RIE | UARTCTRL_RE | UARTCTRL_TE);
temp |= UARTCTRL_ILIE;
+ temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF;
lpuart32_write(temp, sport->port.membase + UARTCTRL);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1215,6 +1409,7 @@ static void lpuart_shutdown(struct uart_port *port)
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned char temp;
unsigned long flags;
+ int ret;
spin_lock_irqsave(&port->lock, flags);
@@ -1226,21 +1421,20 @@ static void lpuart_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
- devm_free_irq(port->dev, port->irq, sport);
-
if (sport->lpuart_dma_rx_use) {
+ sport->dma_rx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_rx_chan);
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
}
if (sport->lpuart_dma_tx_use) {
- if (wait_event_interruptible(sport->dma_wait,
- !sport->dma_tx_in_progress) != false) {
+ ret = wait_event_interruptible_timeout(sport->dma_wait,
+ !sport->dma_tx_in_progress, msecs_to_jiffies(1));
+ if (ret <= 0) {
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
-
- lpuart_stop_tx(port);
}
}
@@ -1249,18 +1443,45 @@ static void lpuart32_shutdown(struct uart_port *port)
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long temp;
unsigned long flags;
+ int ret;
spin_lock_irqsave(&port->lock, flags);
+ /* clear statue */
+ temp = lpuart32_read(sport->port.membase + UARTSTAT);
+ lpuart32_write(temp, sport->port.membase + UARTSTAT);
+
+ /* disable Rx/Tx DMA */
+ temp = lpuart32_read(sport->port.membase + UARTBAUD);
+ temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE | UARTBAUD_RIDMAE);
+ lpuart32_write(temp, sport->port.membase + UARTBAUD);
+
/* disable Rx/Tx and interrupts */
temp = lpuart32_read(port->membase + UARTCTRL);
- temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
- UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
+ temp &= ~(UARTCTRL_TE | UARTCTRL_RE | UARTCTRL_TIE |
+ UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_ILIE |
+ UARTCTRL_LOOPS);
lpuart32_write(temp, port->membase + UARTCTRL);
+ lpuart32_write(0, sport->port.membase + UARTMODIR);
spin_unlock_irqrestore(&port->lock, flags);
- devm_free_irq(port->dev, port->irq, sport);
+ if (sport->lpuart_dma_rx_use) {
+ sport->dma_rx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_rx_chan);
+ if (!sport->dma_eeop)
+ del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+ if (sport->lpuart_dma_tx_use) {
+ ret = wait_event_interruptible_timeout(sport->dma_wait,
+ !sport->dma_tx_in_progress, msecs_to_jiffies(1));
+ if (ret <= 0) {
+ sport->dma_tx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_tx_chan);
+ }
+ }
}
static void
@@ -1376,6 +1597,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
+ if (sport->lpuart_dma_rx_use) {
+ /* Calculate delay for 1.5 DMA buffers */
+ sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) *
+ sport->rxdma_len * 3 /
+ sport->rxfifo_size / 2;
+ dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ sport->dma_rx_timeout * 1000 / HZ, sport->port.timeout);
+ if (sport->dma_rx_timeout < msecs_to_jiffies(20))
+ sport->dma_rx_timeout = msecs_to_jiffies(20);
+ }
+
/* wait transmit engin complete */
while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC))
barrier();
@@ -1400,25 +1632,74 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
/* restore control register */
writeb(old_cr2, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static void
+lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
+{
+ u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
+ u32 clk = sport->port.uartclk;
+
/*
- * If new baud rate is set, we will also need to update the Ring buffer
- * length according to the selected baud rate and restart Rx DMA path.
+ * The idea is to use the best OSR (over-sampling rate) possible.
+ * Note, OSR is typically hard-set to 16 in other LPUART instantiations.
+ * Loop to find the best OSR value possible, one that generates minimum
+ * baud_diff iterate through the rest of the supported values of OSR.
+ *
+ * Calculation Formula:
+ * Baud Rate = baud clock / ((OSR+1) × SBR)
*/
- if (old) {
- if (sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
- lpuart_dma_rx_free(&sport->port);
+ baud_diff = baudrate;
+ osr = 0;
+ sbr = 0;
+
+ for (tmp_osr = 4; tmp_osr <= 32; tmp_osr++) {
+ /* calculate the temporary sbr value */
+ tmp_sbr = (clk / (baudrate * tmp_osr));
+ if (tmp_sbr == 0)
+ tmp_sbr = 1;
+
+ /*
+ * calculate the baud rate difference based on the temporary
+ * osr and sbr values
+ */
+ tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate;
+
+ /* select best values between sbr and sbr+1 */
+ tmp = clk / (tmp_osr * (tmp_sbr + 1));
+ if (tmp_diff > (baudrate - tmp)) {
+ tmp_diff = baudrate - tmp;
+ tmp_sbr++;
}
- if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
- sport->lpuart_dma_rx_use = true;
- rx_dma_timer_init(sport);
- } else {
- sport->lpuart_dma_rx_use = false;
+ if (tmp_diff <= baud_diff) {
+ baud_diff = tmp_diff;
+ osr = tmp_osr;
+ sbr = tmp_sbr;
+
+ if (!baud_diff)
+ break;
}
}
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ /* handle buadrate outside acceptable rate */
+ if (baud_diff > ((baudrate / 100) * 3))
+ dev_warn(sport->port.dev,
+ "unacceptable baud rate difference of more than 3%%\n");
+
+ tmp = lpuart32_read(sport->port.membase + UARTBAUD);
+
+ if ((osr > 3) && (osr < 8))
+ tmp |= UARTBAUD_BOTHEDGE;
+
+ tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
+ tmp |= (((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT);
+
+ tmp &= ~UARTBAUD_SBR_MASK;
+ tmp |= sbr & UARTBAUD_SBR_MASK;
+
+ lpuart32_write(tmp, sport->port.membase + UARTBAUD);
}
static void
@@ -1430,7 +1711,6 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long ctrl, old_ctrl, bd, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- unsigned int sbr;
ctrl = old_ctrl = lpuart32_read(sport->port.membase + UARTCTRL);
bd = lpuart32_read(sport->port.membase + UARTBAUD);
@@ -1470,7 +1750,9 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
}
if (termios->c_cflag & CSTOPB)
- termios->c_cflag &= ~CSTOPB;
+ bd |= UARTBAUD_SBNS;
+ else
+ bd &= ~UARTBAUD_SBNS;
/* parity must be enabled when CS7 to match 8-bits format */
if ((termios->c_cflag & CSIZE) == CS7)
@@ -1521,7 +1803,19 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- /* wait transmit engin complete */
+ if (sport->lpuart_dma_rx_use && !sport->dma_eeop) {
+ /* Calculate delay for 1.5 DMA buffers */
+ sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) *
+ sport->rxdma_len * 3 /
+ sport->rxfifo_size / 2;
+ dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ sport->dma_rx_timeout * 1000 / HZ, sport->port.timeout);
+ if (sport->dma_rx_timeout < msecs_to_jiffies(20))
+ sport->dma_rx_timeout = msecs_to_jiffies(20);
+ }
+
+ /* wait transmit engin complete, there disable flow control */
+ lpuart32_write(0, sport->port.membase + UARTMODIR);
while (!(lpuart32_read(sport->port.membase + UARTSTAT) & UARTSTAT_TC))
barrier();
@@ -1529,17 +1823,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
sport->port.membase + UARTCTRL);
- sbr = sport->port.uartclk / (16 * baud);
- bd &= ~UARTBAUD_SBR_MASK;
- bd |= sbr & UARTBAUD_SBR_MASK;
- bd |= UARTBAUD_BOTHEDGE;
- bd &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
lpuart32_write(bd, sport->port.membase + UARTBAUD);
+ lpuart32_serial_setbrg(sport, baud);
lpuart32_write(modem, sport->port.membase + UARTMODIR);
lpuart32_write(ctrl, sport->port.membase + UARTCTRL);
- /* restore control register */
spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ /* wait baud rate stable */
+ usleep_range(1000, 2000);
}
static const char *lpuart_type(struct uart_port *port)
@@ -1593,6 +1885,7 @@ static const struct uart_ops lpuart_pops = {
.break_ctl = lpuart_break_ctl,
.startup = lpuart_startup,
.shutdown = lpuart_shutdown,
+ .pm = lpuart_uart_pm,
.set_termios = lpuart_set_termios,
.type = lpuart_type,
.request_port = lpuart_request_port,
@@ -1612,6 +1905,7 @@ static const struct uart_ops lpuart32_pops = {
.break_ctl = lpuart32_break_ctl,
.startup = lpuart32_startup,
.shutdown = lpuart32_shutdown,
+ .pm = lpuart_uart_pm,
.set_termios = lpuart32_set_termios,
.type = lpuart_type,
.request_port = lpuart_request_port,
@@ -1724,7 +2018,10 @@ lpuart_console_get_options(struct lpuart_port *sport, int *baud,
brfa = readb(sport->port.membase + UARTCR4);
brfa &= UARTCR4_BRFA_MASK;
- uartclk = clk_get_rate(sport->clk);
+ if (sport->per_clk)
+ uartclk = clk_get_rate(sport->per_clk);
+ else
+ uartclk = clk_get_rate(sport->ipg_clk);
/*
* baud = mod_clk/(16*(sbr[13]+(brfa)/32)
*/
@@ -1740,7 +2037,7 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
int *parity, int *bits)
{
unsigned long cr, bd;
- unsigned int sbr, uartclk, baud_raw;
+ unsigned int sbr, osr, uartclk, baud_raw;
cr = lpuart32_read(sport->port.membase + UARTCTRL);
cr &= UARTCTRL_TE | UARTCTRL_RE;
@@ -1767,12 +2064,13 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
bd = lpuart32_read(sport->port.membase + UARTBAUD);
bd &= UARTBAUD_SBR_MASK;
sbr = bd;
- uartclk = clk_get_rate(sport->clk);
- /*
- * baud = mod_clk/(16*(sbr[13]+(brfa)/32)
- */
- baud_raw = uartclk / (16 * sbr);
+ osr = (bd >> UARTBAUD_OSR_SHIFT) & UARTBAUD_OSR_MASK;
+ if (sport->per_clk)
+ uartclk = clk_get_rate(sport->per_clk);
+ else
+ uartclk = clk_get_rate(sport->ipg_clk);
+ baud_raw = uartclk / ((osr + 1) * sbr);
if (*baud != baud_raw)
printk(KERN_INFO "Serial: Console lpuart rounded baud rate"
"from %d to %d\n", baud_raw, *baud);
@@ -1835,21 +2133,24 @@ static struct console lpuart32_console = {
.data = &lpuart_reg,
};
-static void lpuart_early_write(struct console *con, const char *s, unsigned n)
+static void
+lpuart_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, lpuart_console_putchar);
}
-static void lpuart32_early_write(struct console *con, const char *s, unsigned n)
+static void
+lpuart32_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, lpuart32_console_putchar);
}
-static int __init lpuart_early_console_setup(struct earlycon_device *device,
+static int __init
+lpuart_early_console_setup(struct earlycon_device *device,
const char *opt)
{
if (!device->port.membase)
@@ -1859,8 +2160,8 @@ static int __init lpuart_early_console_setup(struct earlycon_device *device,
return 0;
}
-static int __init lpuart32_early_console_setup(struct earlycon_device *device,
- const char *opt)
+static int __init
+lpuart32_early_console_setup(struct earlycon_device *device, const char *opt)
{
if (!device->port.membase)
return -ENODEV;
@@ -1869,8 +2170,12 @@ static int __init lpuart32_early_console_setup(struct earlycon_device *device,
return 0;
}
-OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
-OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart",
+ lpuart_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart",
+ lpuart32_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart",
+ lpuart32_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
@@ -1900,8 +2205,6 @@ static int lpuart_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
- pdev->dev.coherent_dma_mask = 0;
-
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
@@ -1912,7 +2215,10 @@ static int lpuart_probe(struct platform_device *pdev)
return -EINVAL;
}
sport->port.line = ret;
- sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
+ sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart") |
+ of_device_is_compatible(np, "fsl,imx7ulp-lpuart") |
+ of_device_is_compatible(np, "fsl,imx8qm-lpuart");
+ sport->dma_eeop = of_device_is_compatible(np, "fsl,imx8qm-lpuart");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
@@ -1936,55 +2242,86 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
- sport->port.rs485_config = lpuart_config_rs485;
+ if (!sport->lpuart32)
+ sport->port.rs485_config = lpuart_config_rs485;
- sport->clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(sport->clk)) {
- ret = PTR_ERR(sport->clk);
- dev_err(&pdev->dev, "failed to get uart clk: %d\n", ret);
+ sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sport->ipg_clk)) {
+ ret = PTR_ERR(sport->ipg_clk);
+ dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
return ret;
}
+ sport->per_clk = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(sport->per_clk))
+ sport->per_clk = NULL;
- ret = clk_prepare_enable(sport->clk);
+ ret = clk_prepare_enable(sport->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable uart ipg clk: %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(sport->per_clk);
if (ret) {
+ clk_disable_unprepare(sport->ipg_clk);
dev_err(&pdev->dev, "failed to enable uart clk: %d\n", ret);
return ret;
}
-
- sport->port.uartclk = clk_get_rate(sport->clk);
+ if (sport->per_clk)
+ sport->port.uartclk = clk_get_rate(sport->per_clk);
+ else
+ sport->port.uartclk = clk_get_rate(sport->ipg_clk);
lpuart_ports[sport->port.line] = sport;
platform_set_drvdata(pdev, &sport->port);
- if (sport->lpuart32)
+ if (sport->lpuart32) {
lpuart_reg.cons = LPUART32_CONSOLE;
- else
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
+ DRIVER_NAME, sport);
+ } else {
lpuart_reg.cons = LPUART_CONSOLE;
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
+ DRIVER_NAME, sport);
+ }
+
+ if (ret)
+ goto failed_irq_request;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret) {
- clk_disable_unprepare(sport->clk);
- return ret;
- }
+ if (ret)
+ goto failed_attach_port;
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
- dev_info(sport->port.dev, "DMA tx channel request failed, "
- "operating without tx DMA\n");
+ dev_info(sport->port.dev, "NO DMA tx channel, run at cpu mode\n");
sport->dma_rx_chan = dma_request_slave_channel(sport->port.dev, "rx");
if (!sport->dma_rx_chan)
- dev_info(sport->port.dev, "DMA rx channel request failed, "
- "operating without rx DMA\n");
+ dev_info(sport->port.dev, "NO DMA rx channel, run at cpu mode\n");
- if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) {
+ if (!sport->lpuart32 &&
+ of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) {
sport->port.rs485.flags |= SER_RS485_ENABLED;
sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
}
return 0;
+
+failed_attach_port:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+failed_irq_request:
+ clk_disable_unprepare(sport->per_clk);
+ clk_disable_unprepare(sport->ipg_clk);
+ return ret;
}
static int lpuart_remove(struct platform_device *pdev)
@@ -1993,114 +2330,371 @@ static int lpuart_remove(struct platform_device *pdev)
uart_remove_one_port(&lpuart_reg, &sport->port);
- clk_disable_unprepare(sport->clk);
-
if (sport->dma_tx_chan)
dma_release_channel(sport->dma_tx_chan);
if (sport->dma_rx_chan)
dma_release_channel(sport->dma_rx_chan);
+ clk_disable_unprepare(sport->per_clk);
+ clk_disable_unprepare(sport->ipg_clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
-static int lpuart_suspend(struct device *dev)
+static int lpuart_runtime_suspend(struct device *dev)
{
- struct lpuart_port *sport = dev_get_drvdata(dev);
- unsigned long temp;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpuart_port *sport = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(sport->per_clk);
+ clk_disable_unprepare(sport->ipg_clk);
+
+ return 0;
+};
+
+static int lpuart_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpuart_port *sport = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(sport->ipg_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(sport->per_clk);
+ if (ret) {
+ clk_disable_unprepare(sport->ipg_clk);
+ return ret;
+ }
+
+ return 0;
+};
+
+static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on)
+{
+ unsigned int val;
if (sport->lpuart32) {
- /* disable Rx/Tx and interrupts */
- temp = lpuart32_read(sport->port.membase + UARTCTRL);
- temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
- lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ val = lpuart32_read(sport->port.membase + UARTCTRL);
+ if (on)
+ val |= (UARTCTRL_RIE | UARTCTRL_ILIE);
+ else
+ val &= ~(UARTCTRL_RIE | UARTCTRL_ILIE);
+ lpuart32_write(val, sport->port.membase + UARTCTRL);
} else {
- /* disable Rx/Tx and interrupts */
- temp = readb(sport->port.membase + UARTCR2);
- temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
- writeb(temp, sport->port.membase + UARTCR2);
+ val = readb(sport->port.membase + UARTCR2);
+ if (on)
+ val |= UARTCR2_RIE;
+ else
+ val &= ~UARTCR2_RIE;
+ writeb(val, sport->port.membase + UARTCR2);
+ }
+}
+
+static bool lpuart_uport_is_active(struct lpuart_port *sport)
+{
+ struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty;
+ struct device *tty_dev;
+ int may_wake = 0;
+
+ tty = tty_port_tty_get(port);
+ if (tty) {
+ tty_dev = tty->dev;
+ may_wake = device_may_wakeup(tty_dev);
+ tty_kref_put(tty);
+ }
+
+ if ((tty_port_initialized(port) && may_wake) ||
+ (!console_suspend_enabled && uart_console(&sport->port)))
+ return true;
+
+ return false;
+}
+
+static int lpuart_suspend_noirq(struct device *dev)
+{
+ struct lpuart_port *sport = dev_get_drvdata(dev);
+
+ if (lpuart_uport_is_active(sport))
+ serial_lpuart_enable_wakeup(sport, !!sport->port.irq_wake);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int lpuart_resume_noirq(struct device *dev)
+{
+ struct lpuart_port *sport = dev_get_drvdata(dev);
+ unsigned int val;
+
+ pinctrl_pm_select_default_state(dev);
+
+ if (lpuart_uport_is_active(sport)) {
+ serial_lpuart_enable_wakeup(sport, false);
+
+ /* clear the wakeup flags */
+ if (sport->lpuart32) {
+ val = lpuart32_read(sport->port.membase + UARTSTAT);
+ lpuart32_write(val, sport->port.membase + UARTSTAT);
+ }
}
+ return 0;
+}
+
+static int lpuart_suspend(struct device *dev)
+{
+ struct lpuart_port *sport = dev_get_drvdata(dev);
+ unsigned long temp;
+ unsigned long flags;
+
uart_suspend_port(&lpuart_reg, &sport->port);
- if (sport->lpuart_dma_rx_use) {
- /*
- * EDMA driver during suspend will forcefully release any
- * non-idle DMA channels. If port wakeup is enabled or if port
- * is console port or 'no_console_suspend' is set the Rx DMA
- * cannot resume as as expected, hence gracefully release the
- * Rx DMA path before suspend and start Rx DMA path on resume.
- */
- if (sport->port.irq_wake) {
- del_timer_sync(&sport->lpuart_timer);
+ if (lpuart_uport_is_active(sport)) {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ if (sport->lpuart32) {
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ } else {
+ temp = readb(sport->port.membase + UARTCR2);
+ temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
+ writeb(temp, sport->port.membase + UARTCR2);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (sport->lpuart_dma_rx_use) {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ lpuart_dma_stop(sport, false);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ dmaengine_terminate_all(sport->dma_rx_chan);
+ if (!sport->dma_eeop)
+ del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
}
- /* Disable Rx DMA to use UART port as wakeup source */
- writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_RDMAS,
- sport->port.membase + UARTCR5);
- }
+ if (sport->lpuart_dma_tx_use) {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ if (sport->lpuart32) {
+ temp = lpuart32_read(sport->port.membase + UARTBAUD);
+ temp &= ~UARTBAUD_TDMAE;
+ lpuart32_write(temp, sport->port.membase + UARTBAUD);
+ } else {
+ temp = readb(sport->port.membase + UARTCR5);
+ temp &= ~UARTCR5_TDMAS;
+ writeb(temp, sport->port.membase + UARTCR5);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- if (sport->lpuart_dma_tx_use) {
- sport->dma_tx_in_progress = false;
- dmaengine_terminate_all(sport->dma_tx_chan);
+ sport->dma_tx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_tx_chan);
+ }
+ } else if (pm_runtime_active(sport->port.dev)) {
+ clk_disable_unprepare(sport->per_clk);
+ clk_disable_unprepare(sport->ipg_clk);
+ pm_runtime_disable(sport->port.dev);
+ pm_runtime_set_suspended(sport->port.dev);
}
- if (sport->port.suspended && !sport->port.irq_wake)
- clk_disable_unprepare(sport->clk);
-
return 0;
}
-static int lpuart_resume(struct device *dev)
+static void lpuart_console_fixup(struct lpuart_port *sport)
+{
+ struct tty_port *port = &sport->port.state->port;
+ struct uart_port *uport = &sport->port;
+ struct device_node *np = sport->port.dev->of_node;
+ struct ktermios termios;
+
+ if (!sport->lpuart32 || !np)
+ return;
+
+ /* i.MX7ULP enter VLLS mode that lpuart module power off and registers
+ * all lost no matter the port is wakeup source.
+ * For console port, console baud rate setting lost and print messy
+ * log when enable the console port as wakeup source. To avoid the
+ * issue happen, user should not enable uart port as wakeup source
+ * in VLLS mode, or restore console setting here.
+ */
+ if (of_device_is_compatible(np, "fsl,imx7ulp-lpuart") &&
+ lpuart_uport_is_active(sport) && console_suspend_enabled &&
+ uart_console(&sport->port)) {
+
+ mutex_lock(&port->mutex);
+ memset(&termios, 0, sizeof(struct ktermios));
+ termios.c_cflag = uport->cons->cflag;
+ if (port->tty && termios.c_cflag == 0)
+ termios = port->tty->termios;
+ uport->ops->set_termios(uport, &termios, NULL);
+ mutex_unlock(&port->mutex);
+ }
+}
+
+static inline void lpuart32_resume_init(struct lpuart_port *sport)
{
- struct lpuart_port *sport = dev_get_drvdata(dev);
unsigned long temp;
+ unsigned long flags;
- if (sport->port.suspended && !sport->port.irq_wake)
- clk_prepare_enable(sport->clk);
+ spin_lock_irqsave(&sport->port.lock, flags);
+ lpuart32_setup_watermark(sport);
- if (sport->lpuart32) {
- lpuart32_setup_watermark(sport);
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
+ UARTCTRL_TE | UARTCTRL_ILIE);
+
+ if (sport->dma_rx_chan)
+ temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE);
+
+ if (sport->dma_tx_chan)
+ temp &= ~(UARTCTRL_TIE | UARTCTRL_TE);
+
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (sport->lpuart_dma_rx_use) {
+ if (!lpuart_dma_rx_request(&sport->port)) {
+ sport->lpuart_dma_rx_use = true;
+ if (!sport->dma_eeop)
+ setup_timer(&sport->lpuart_timer,
+ lpuart_timer_func,
+ (unsigned long)sport);
+ } else {
+ sport->lpuart_dma_rx_use = false;
+ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
temp = lpuart32_read(sport->port.membase + UARTCTRL);
- temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
- UARTCTRL_TE | UARTCTRL_ILIE);
+ temp |= (UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE);
+ temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF;
lpuart32_write(temp, sport->port.membase + UARTCTRL);
- } else {
- lpuart_setup_watermark(sport);
- temp = readb(sport->port.membase + UARTCR2);
- temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
- writeb(temp, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ if (sport->lpuart_dma_tx_use) {
+ if (!lpuart_dma_tx_request(&sport->port)) {
+ init_waitqueue_head(&sport->dma_wait);
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = lpuart32_read(sport->port.membase + UARTBAUD);
+ temp |= UARTBAUD_TDMAE;
+ lpuart32_write(temp, sport->port.membase + UARTBAUD);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ } else {
+ sport->lpuart_dma_tx_use = false;
+ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp |= UARTCTRL_TE;
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
+}
+
+static inline void lpuart_resume_init(struct lpuart_port *sport)
+{
+ unsigned char temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ lpuart_setup_watermark(sport);
+ temp = readb(sport->port.membase + UARTCR2);
+ temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
+
+ if (sport->dma_rx_chan)
+ temp &= ~(UARTCR2_RIE | UARTCR2_RE);
+
+ if (sport->dma_tx_chan)
+ temp &= ~(UARTCR2_TIE | UARTCR2_TE);
+
+ writeb(temp, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
if (sport->lpuart_dma_rx_use) {
- if (sport->port.irq_wake) {
- if (!lpuart_start_rx_dma(sport)) {
- sport->lpuart_dma_rx_use = true;
- rx_dma_timer_init(sport);
- } else {
- sport->lpuart_dma_rx_use = false;
- }
+ if (!lpuart_dma_rx_request(&sport->port)) {
+ sport->lpuart_dma_rx_use = true;
+ setup_timer(&sport->lpuart_timer,
+ lpuart_timer_func,
+ (unsigned long)sport);
+ } else {
+ sport->lpuart_dma_rx_use = false;
}
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readb(sport->port.membase + UARTCR2);
+ temp |= (UARTCR2_RIE | UARTCR2_RE);
+ writeb(temp, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
- if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) {
+ if (sport->lpuart_dma_tx_use) {
+ if (!lpuart_dma_tx_request(&sport->port)) {
init_waitqueue_head(&sport->dma_wait);
- sport->lpuart_dma_tx_use = true;
- writeb(readb(sport->port.membase + UARTCR5) |
- UARTCR5_TDMAS, sport->port.membase + UARTCR5);
- } else {
- sport->lpuart_dma_tx_use = false;
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readb(sport->port.membase + UARTCR5);
+ temp |= UARTCR5_TDMAS;
+ writeb(temp, sport->port.membase + UARTCR5);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ } else {
+ sport->lpuart_dma_tx_use = false;
+ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readb(sport->port.membase + UARTCR2);
+ temp |= UARTCR2_TE;
+ writeb(temp, sport->port.membase + UARTCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
+}
+
+static int lpuart_resume(struct device *dev)
+{
+ struct lpuart_port *sport = dev_get_drvdata(dev);
+ int ret;
+ if (lpuart_uport_is_active(sport)) {
+ if (sport->lpuart32)
+ lpuart32_resume_init(sport);
+ else
+ lpuart_resume_init(sport);
+ } else if (pm_runtime_active(sport->port.dev)) {
+ ret = clk_prepare_enable(sport->ipg_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(sport->per_clk);
+ if (ret) {
+ clk_disable_unprepare(sport->ipg_clk);
+ return ret;
+ }
+ pm_runtime_set_active(sport->port.dev);
+ pm_runtime_enable(sport->port.dev);
+ }
+
+ lpuart_console_fixup(sport);
uart_resume_port(&lpuart_reg, &sport->port);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume);
+static const struct dev_pm_ops lpuart_pm_ops = {
+ SET_RUNTIME_PM_OPS(lpuart_runtime_suspend,
+ lpuart_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpuart_suspend_noirq,
+ lpuart_resume_noirq)
+ SET_SYSTEM_SLEEP_PM_OPS(lpuart_suspend, lpuart_resume)
+};
+#define SERIAL_LPUART_PM_OPS (&lpuart_pm_ops)
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define SERIAL_LPUART_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
static struct platform_driver lpuart_driver = {
.probe = lpuart_probe,
@@ -2108,7 +2702,7 @@ static struct platform_driver lpuart_driver = {
.driver = {
.name = "fsl-lpuart",
.of_match_table = lpuart_dt_ids,
- .pm = &lpuart_pm_ops,
+ .pm = SERIAL_LPUART_PM_OPS,
},
};