summaryrefslogtreecommitdiff
path: root/drivers/tty/serial/imx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/serial/imx.c')
-rw-r--r--drivers/tty/serial/imx.c683
1 files changed, 374 insertions, 309 deletions
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 325c38c9b451..8de9ad8d01ca 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -34,6 +34,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/rational.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -41,6 +42,7 @@
#include <linux/dma-mapping.h>
#include <asm/irq.h>
+#include <linux/busfreq-imx.h>
#include <linux/platform_data/serial-imx.h>
#include <linux/platform_data/dma-imx.h>
@@ -185,6 +187,8 @@
#define DRIVER_NAME "IMX-uart"
#define UART_NR 8
+#define IMX_RXBD_NUM 20
+#define IMX_MODULE_MAX_CLK_RATE 80000000
/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
enum imx_uart_type {
@@ -200,6 +204,24 @@ struct imx_uart_data {
enum imx_uart_type devtype;
};
+struct imx_dma_bufinfo {
+ bool filled;
+ unsigned int rx_bytes;
+};
+
+struct imx_dma_rxbuf {
+ unsigned int periods;
+ unsigned int period_len;
+ unsigned int buf_len;
+
+ void *buf;
+ dma_addr_t dmaaddr;
+ unsigned int cur_idx;
+ unsigned int last_completed_idx;
+ dma_cookie_t cookie;
+ struct imx_dma_bufinfo buf_info[IMX_RXBD_NUM];
+};
+
struct imx_port {
struct uart_port port;
struct timer_list timer;
@@ -221,16 +243,16 @@ struct imx_port {
unsigned int dma_is_rxing:1;
unsigned int dma_is_txing:1;
struct dma_chan *dma_chan_rx, *dma_chan_tx;
- struct scatterlist rx_sgl, tx_sgl[2];
- void *rx_buf;
- struct circ_buf rx_ring;
- unsigned int rx_periods;
- dma_cookie_t rx_cookie;
+ struct scatterlist tx_sgl[2];
+ struct imx_dma_rxbuf rx_buf;
unsigned int tx_bytes;
unsigned int dma_tx_nents;
+ struct work_struct tsk_dma_tx;
wait_queue_head_t dma_wait;
unsigned int saved_reg[10];
bool context_saved;
+#define DMA_TX_IS_WORKING 1
+ unsigned long flags;
};
struct imx_port_ucrs {
@@ -405,12 +427,14 @@ static void imx_stop_rx(struct uart_port *port)
}
}
- temp = readl(sport->port.membase + UCR2);
- writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
-
- /* disable the `Receiver Ready Interrrupt` */
+ /* disable the Receiver Ready and overrun Interrrupt */
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
+ temp = readl(sport->port.membase + UCR4);
+ writel(temp & ~UCR4_OREN, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR2);
+ writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
}
/*
@@ -425,7 +449,6 @@ static void imx_enable_ms(struct uart_port *port)
mctrl_gpio_enable_ms(sport->gpios);
}
-static void imx_dma_tx(struct imx_port *sport);
static inline void imx_transmit_buffer(struct imx_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -456,7 +479,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
writel(temp, sport->port.membase + UCR1);
} else {
writel(temp, sport->port.membase + UCR1);
- imx_dma_tx(sport);
+ schedule_work(&sport->tsk_dma_tx);
}
}
@@ -482,95 +505,99 @@ static void dma_tx_callback(void *data)
struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
- unsigned long temp;
+ /* update the stat */
spin_lock_irqsave(&sport->port.lock, flags);
+ /* user call .flush() before the code slice coming */
+ if (!sport->dma_is_txing) {
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return;
+ }
+ sport->dma_is_txing = 0;
- dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
-
- temp = readl(sport->port.membase + UCR1);
- temp &= ~UCR1_TDMAEN;
- writel(temp, sport->port.membase + UCR1);
-
- /* update the stat */
xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx += sport->tx_bytes;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
- sport->dma_is_txing = 0;
+ dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
- spin_unlock_irqrestore(&sport->port.lock, flags);
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_atomic();
+ uart_write_wakeup(&sport->port);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&sport->port);
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
+ schedule_work(&sport->tsk_dma_tx);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
return;
}
-
- spin_lock_irqsave(&sport->port.lock, flags);
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
- imx_dma_tx(sport);
- spin_unlock_irqrestore(&sport->port.lock, flags);
}
-static void imx_dma_tx(struct imx_port *sport)
+static void dma_tx_work(struct work_struct *w)
{
+ struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
+ unsigned long flags;
unsigned long temp;
int ret;
- if (sport->dma_is_txing)
+ if (test_and_set_bit(DMA_TX_IS_WORKING, &sport->flags))
return;
+ spin_lock_irqsave(&sport->port.lock, flags);
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail < xmit->head || xmit->head == 0) {
- sport->dma_tx_nents = 1;
- sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
- } else {
- sport->dma_tx_nents = 2;
- sg_init_table(sgl, 2);
- sg_set_buf(sgl, xmit->buf + xmit->tail,
- UART_XMIT_SIZE - xmit->tail);
- sg_set_buf(sgl + 1, xmit->buf, xmit->head);
- }
-
- ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
- if (ret == 0) {
- dev_err(dev, "DMA mapping error for TX.\n");
- return;
- }
- desc = dmaengine_prep_slave_sg(chan, sgl, ret,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
- if (!desc) {
- dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
- DMA_TO_DEVICE);
- dev_err(dev, "We cannot prepare for the TX slave dma!\n");
- return;
- }
- desc->callback = dma_tx_callback;
- desc->callback_param = sport;
+ if (sport->tx_bytes > 0) {
+ if (xmit->tail > xmit->head && xmit->head > 0) {
+ sport->dma_tx_nents = 2;
+ sg_init_table(sgl, 2);
+ sg_set_buf(sgl, xmit->buf + xmit->tail,
+ UART_XMIT_SIZE - xmit->tail);
+ sg_set_buf(sgl + 1, xmit->buf, xmit->head);
+ } else {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
- uart_circ_chars_pending(xmit));
+ ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ if (ret == 0) {
+ dev_err(dev, "DMA mapping error for TX.\n");
+ goto err_out;
+ }
+ desc = dmaengine_prep_slave_sg(chan, sgl, ret,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "We cannot prepare for the TX slave dma!\n");
+ goto err_out;
+ }
+ desc->callback = dma_tx_callback;
+ desc->callback_param = sport;
- temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_TDMAEN;
- writel(temp, sport->port.membase + UCR1);
+ dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
+ uart_circ_chars_pending(xmit));
+ /* fire it */
+ sport->dma_is_txing = 1;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
- /* fire it */
- sport->dma_is_txing = 1;
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
- return;
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
+ return;
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+err_out:
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_atomic();
}
/*
@@ -615,7 +642,7 @@ static void imx_start_tx(struct uart_port *port)
if (!uart_circ_empty(&port->state->xmit) &&
!uart_tx_stopped(port))
- imx_dma_tx(sport);
+ schedule_work(&sport->tsk_dma_tx);
return;
}
}
@@ -718,56 +745,22 @@ out:
return IRQ_HANDLED;
}
-static void clear_rx_errors(struct imx_port *sport);
-static int start_rx_dma(struct imx_port *sport);
-/*
- * If the RXFIFO is filled with some data, and then we
- * arise a DMA operation to receive them.
- */
-static void imx_dma_rxint(struct imx_port *sport)
-{
- unsigned long temp;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- temp = readl(sport->port.membase + USR2);
- if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
- sport->dma_is_rxing = 1;
-
- /* disable the receiver ready and aging timer interrupts */
- temp = readl(sport->port.membase + UCR1);
- temp &= ~(UCR1_RRDYEN);
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR2);
- temp &= ~(UCR2_ATEN);
- writel(temp, sport->port.membase + UCR2);
-
- /* disable the rx errors interrupts */
- temp = readl(sport->port.membase + UCR4);
- temp &= ~UCR4_OREN;
- writel(temp, sport->port.membase + UCR4);
-
- /* tell the DMA to receive the data. */
- start_rx_dma(sport);
- }
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
-
/*
* We have a modem side uart, so the meanings of RTS and CTS are inverted.
*/
static unsigned int imx_get_hwmctrl(struct imx_port *sport)
{
- unsigned int tmp = TIOCM_DSR;
- unsigned usr1 = readl(sport->port.membase + USR1);
- unsigned usr2 = readl(sport->port.membase + USR2);
+ unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
+ unsigned int usr1 = readl(sport->port.membase + USR1);
+ unsigned int usr2 = readl(sport->port.membase + USR2);
+ unsigned int ucr2 = readl(sport->port.membase + UCR2);
if (usr1 & USR1_RTSS)
tmp |= TIOCM_CTS;
+ if (ucr2 & UCR2_CTS)
+ tmp |= TIOCM_RTS;
+
/* in DCE mode DCDIN is always 0 */
if (!(usr2 & USR2_DCDIN))
tmp |= TIOCM_CAR;
@@ -776,6 +769,9 @@ static unsigned int imx_get_hwmctrl(struct imx_port *sport)
if (!(readl(sport->port.membase + USR2) & USR2_RIIN))
tmp |= TIOCM_RI;
+ if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP)
+ tmp |= TIOCM_LOOP;
+
return tmp;
}
@@ -811,26 +807,22 @@ static irqreturn_t imx_int(int irq, void *dev_id)
struct imx_port *sport = dev_id;
unsigned int sts;
unsigned int sts2;
- irqreturn_t ret = IRQ_NONE;
sts = readl(sport->port.membase + USR1);
sts2 = readl(sport->port.membase + USR2);
- if (sts & (USR1_RRDY | USR1_AGTIM)) {
- if (sport->dma_is_enabled)
- imx_dma_rxint(sport);
- else
- imx_rxint(irq, dev_id);
- ret = IRQ_HANDLED;
+ if ((sts & USR1_RRDY || sts & USR1_AGTIM) &&
+ !sport->dma_is_enabled) {
+ if (sts & USR1_AGTIM)
+ writel(USR1_AGTIM, sport->port.membase + USR1);
+ imx_rxint(irq, dev_id);
}
if ((sts & USR1_TRDY &&
readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
(sts2 & USR2_TXDC &&
- readl(sport->port.membase + UCR4) & UCR4_TCEN)) {
+ readl(sport->port.membase + UCR4) & UCR4_TCEN))
imx_txint(irq, dev_id);
- ret = IRQ_HANDLED;
- }
if (sts & USR1_DTRD) {
unsigned long flags;
@@ -841,27 +833,20 @@ static irqreturn_t imx_int(int irq, void *dev_id)
spin_lock_irqsave(&sport->port.lock, flags);
imx_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
-
- ret = IRQ_HANDLED;
}
- if (sts & USR1_RTSD) {
+ if (sts & USR1_RTSD)
imx_rtsint(irq, dev_id);
- ret = IRQ_HANDLED;
- }
- if (sts & USR1_AWAKE) {
+ if (sts & USR1_AWAKE)
writel(USR1_AWAKE, sport->port.membase + USR1);
- ret = IRQ_HANDLED;
- }
if (sts2 & USR2_ORE) {
sport->port.icount.overrun++;
writel(USR2_ORE, sport->port.membase + USR2);
- ret = IRQ_HANDLED;
}
- return ret;
+ return IRQ_HANDLED;
}
/*
@@ -881,6 +866,9 @@ static unsigned int imx_tx_empty(struct uart_port *port)
return ret;
}
+/*
+ * We have a modem side uart, so the meanings of RTS and CTS are inverted.
+ */
static unsigned int imx_get_mctrl(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -937,6 +925,97 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&sport->port.lock, flags);
}
+#define TXTL 2 /* reset default */
+#define RXTL 1 /* For console port */
+#define RXTL_UART 16 /* For uart */
+
+static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
+{
+ unsigned int val;
+ unsigned int rx_fifo_trig;
+
+ if (uart_console(&sport->port))
+ rx_fifo_trig = RXTL;
+ else
+ rx_fifo_trig = RXTL_UART;
+
+ /* set receiver / transmitter trigger level */
+ val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
+ val |= TXTL << UFCR_TXTL_SHF | rx_fifo_trig;
+ writel(val, sport->port.membase + UFCR);
+ return 0;
+}
+
+#define RX_BUF_SIZE (PAGE_SIZE)
+static void dma_rx_push_data(struct imx_port *sport, struct tty_struct *tty,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i;
+ struct tty_port *port = &sport->port.state->port;
+
+ for (i = start; i < end; i++) {
+ if (sport->rx_buf.buf_info[i].filled) {
+ tty_insert_flip_string(port, sport->rx_buf.buf + (i
+ * RX_BUF_SIZE), sport->rx_buf.buf_info[i].rx_bytes);
+ tty_flip_buffer_push(port);
+ sport->rx_buf.buf_info[i].filled = false;
+ sport->rx_buf.last_completed_idx++;
+ sport->rx_buf.last_completed_idx %= IMX_RXBD_NUM;
+ sport->port.icount.rx += sport->rx_buf.buf_info[i].rx_bytes;
+ }
+ }
+}
+
+static void dma_rx_work(struct imx_port *sport)
+{
+ struct tty_struct *tty = sport->port.state->port.tty;
+ unsigned int cur_idx = sport->rx_buf.cur_idx;
+
+ if (sport->rx_buf.last_completed_idx < cur_idx) {
+ dma_rx_push_data(sport, tty, sport->rx_buf.last_completed_idx + 1, cur_idx);
+ } else if (sport->rx_buf.last_completed_idx == (IMX_RXBD_NUM - 1)) {
+ dma_rx_push_data(sport, tty, 0, cur_idx);
+ } else {
+ dma_rx_push_data(sport, tty, sport->rx_buf.last_completed_idx + 1,
+ IMX_RXBD_NUM);
+ dma_rx_push_data(sport, tty, 0, cur_idx);
+ }
+}
+
+static void imx_rx_dma_done(struct imx_port *sport)
+{
+ sport->dma_is_rxing = 0;
+
+ /* Is the shutdown waiting for us? */
+ if (waitqueue_active(&sport->dma_wait))
+ wake_up(&sport->dma_wait);
+}
+
+static void clear_rx_errors(struct imx_port *sport)
+{
+ unsigned int status_usr1, status_usr2;
+
+ status_usr1 = readl(sport->port.membase + USR1);
+ status_usr2 = readl(sport->port.membase + USR2);
+
+ if (status_usr2 & USR2_BRCD) {
+ sport->port.icount.brk++;
+ writel(USR2_BRCD, sport->port.membase + USR2);
+ } else if (status_usr1 & USR1_FRAMERR) {
+ sport->port.icount.frame++;
+ writel(USR1_FRAMERR, sport->port.membase + USR1);
+ } else if (status_usr1 & USR1_PARITYERR) {
+ sport->port.icount.parity++;
+ writel(USR1_PARITYERR, sport->port.membase + USR1);
+ }
+
+ if (status_usr2 & USR2_ORE) {
+ sport->port.icount.overrun++;
+ writel(USR2_ORE, sport->port.membase + USR2);
+ }
+
+}
+
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
@@ -955,188 +1034,103 @@ static void imx_timeout(unsigned long data)
}
}
-#define RX_BUF_SIZE (PAGE_SIZE)
-
/*
- * There are two kinds of RX DMA interrupts(such as in the MX6Q):
+ * There are three kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
- * [2] the aging timer expires
+ * [2] the Aging timer expires(wait for 8 bytes long)
+ * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
*
- * Condition [2] is triggered when a character has been sitting in the FIFO
- * for at least 8 byte durations.
+ * The [2] is trigger when a character was been sitting in the FIFO
+ * meanwhile [3] can wait for 32 bytes long when the RX line is
+ * on IDLE state and RxFIFO is empty.
*/
static void dma_rx_callback(void *data)
{
struct imx_port *sport = data;
struct dma_chan *chan = sport->dma_chan_rx;
- struct scatterlist *sgl = &sport->rx_sgl;
- struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty = sport->port.state->port.tty;
struct dma_tx_state state;
- struct circ_buf *rx_ring = &sport->rx_ring;
enum dma_status status;
- unsigned int w_bytes = 0;
- unsigned int r_bytes;
- unsigned int bd_size;
+ unsigned int count;
- status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
+ /* If we have finish the reading. we will not accept any more data. */
+ if (tty->closing) {
+ imx_rx_dma_done(sport);
+ return;
+ }
+ status = dmaengine_tx_status(chan, sport->rx_buf.cookie, &state);
if (status == DMA_ERROR) {
dev_err(sport->port.dev, "DMA transaction error.\n");
clear_rx_errors(sport);
return;
}
- if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
-
- /*
- * The state-residue variable represents the empty space
- * relative to the entire buffer. Taking this in consideration
- * the head is always calculated base on the buffer total
- * length - DMA transaction residue. The UART script from the
- * SDMA firmware will jump to the next buffer descriptor,
- * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4).
- * Taking this in consideration the tail is always at the
- * beginning of the buffer descriptor that contains the head.
- */
-
- /* Calculate the head */
- rx_ring->head = sg_dma_len(sgl) - state.residue;
-
- /* Calculate the tail. */
- bd_size = sg_dma_len(sgl) / sport->rx_periods;
- rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size;
-
- if (rx_ring->head <= sg_dma_len(sgl) &&
- rx_ring->head > rx_ring->tail) {
+ count = RX_BUF_SIZE - state.residue;
+ sport->rx_buf.buf_info[sport->rx_buf.cur_idx].filled = true;
+ sport->rx_buf.buf_info[sport->rx_buf.cur_idx].rx_bytes = count;
+ sport->rx_buf.cur_idx++;
+ sport->rx_buf.cur_idx %= IMX_RXBD_NUM;
+ dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
- /* Move data from tail to head */
- r_bytes = rx_ring->head - rx_ring->tail;
+ if (sport->rx_buf.cur_idx == sport->rx_buf.last_completed_idx)
+ dev_err(sport->port.dev, "overwrite!\n");
- /* CPU claims ownership of RX DMA buffer */
- dma_sync_sg_for_cpu(sport->port.dev, sgl, 1,
- DMA_FROM_DEVICE);
-
- w_bytes = tty_insert_flip_string(port,
- sport->rx_buf + rx_ring->tail, r_bytes);
-
- /* UART retrieves ownership of RX DMA buffer */
- dma_sync_sg_for_device(sport->port.dev, sgl, 1,
- DMA_FROM_DEVICE);
-
- if (w_bytes != r_bytes)
- sport->port.icount.buf_overrun++;
-
- sport->port.icount.rx += w_bytes;
- } else {
- WARN_ON(rx_ring->head > sg_dma_len(sgl));
- WARN_ON(rx_ring->head <= rx_ring->tail);
- }
- }
-
- if (w_bytes) {
- tty_flip_buffer_push(port);
- dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes);
- }
+ if (count)
+ dma_rx_work(sport);
}
-/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
-
static int start_rx_dma(struct imx_port *sport)
{
- struct scatterlist *sgl = &sport->rx_sgl;
struct dma_chan *chan = sport->dma_chan_rx;
- struct device *dev = sport->port.dev;
struct dma_async_tx_descriptor *desc;
- int ret;
-
- sport->rx_ring.head = 0;
- sport->rx_ring.tail = 0;
- sport->rx_periods = RX_DMA_PERIODS;
- sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
- ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
- if (ret == 0) {
- dev_err(dev, "DMA mapping error for RX.\n");
- return -EINVAL;
- }
-
- desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl),
- sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods,
+ sport->rx_buf.periods = IMX_RXBD_NUM;
+ sport->rx_buf.period_len = RX_BUF_SIZE;
+ sport->rx_buf.buf_len = IMX_RXBD_NUM * RX_BUF_SIZE;
+ sport->rx_buf.cur_idx = 0;
+ sport->rx_buf.last_completed_idx = -1;
+ desc = dmaengine_prep_dma_cyclic(chan, sport->rx_buf.dmaaddr,
+ sport->rx_buf.buf_len, sport->rx_buf.period_len,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
-
if (!desc) {
- dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
- dev_err(dev, "We cannot prepare for the RX slave dma!\n");
+ dev_err(sport->port.dev, "Prepare for the RX slave dma failed!\n");
return -EINVAL;
}
+
desc->callback = dma_rx_callback;
desc->callback_param = sport;
- dev_dbg(dev, "RX: prepare for the DMA.\n");
- sport->rx_cookie = dmaengine_submit(desc);
+ dev_dbg(sport->port.dev, "RX: prepare for the DMA.\n");
+ sport->rx_buf.cookie = dmaengine_submit(desc);
dma_async_issue_pending(chan);
- return 0;
-}
-
-static void clear_rx_errors(struct imx_port *sport)
-{
- unsigned int status_usr1, status_usr2;
-
- status_usr1 = readl(sport->port.membase + USR1);
- status_usr2 = readl(sport->port.membase + USR2);
-
- if (status_usr2 & USR2_BRCD) {
- sport->port.icount.brk++;
- writel(USR2_BRCD, sport->port.membase + USR2);
- } else if (status_usr1 & USR1_FRAMERR) {
- sport->port.icount.frame++;
- writel(USR1_FRAMERR, sport->port.membase + USR1);
- } else if (status_usr1 & USR1_PARITYERR) {
- sport->port.icount.parity++;
- writel(USR1_PARITYERR, sport->port.membase + USR1);
- }
-
- if (status_usr2 & USR2_ORE) {
- sport->port.icount.overrun++;
- writel(USR2_ORE, sport->port.membase + USR2);
- }
-
-}
-
-#define TXTL_DEFAULT 2 /* reset default */
-#define RXTL_DEFAULT 1 /* reset default */
-#define TXTL_DMA 8 /* DMA burst setting */
-#define RXTL_DMA 9 /* DMA burst setting */
-static void imx_setup_ufcr(struct imx_port *sport,
- unsigned char txwl, unsigned char rxwl)
-{
- unsigned int val;
-
- /* set receiver / transmitter trigger level */
- val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
- val |= txwl << UFCR_TXTL_SHF | rxwl;
- writel(val, sport->port.membase + UFCR);
+ sport->dma_is_rxing = 1;
+ return 0;
}
static void imx_uart_dma_exit(struct imx_port *sport)
{
if (sport->dma_chan_rx) {
- dmaengine_terminate_sync(sport->dma_chan_rx);
dma_release_channel(sport->dma_chan_rx);
sport->dma_chan_rx = NULL;
- sport->rx_cookie = -EINVAL;
- kfree(sport->rx_buf);
- sport->rx_buf = NULL;
+
+ if (sport->rx_buf.buf) {
+ dma_free_coherent(sport->port.dev, IMX_RXBD_NUM * RX_BUF_SIZE,
+ (void *)sport->rx_buf.buf,
+ sport->rx_buf.dmaaddr);
+ sport->rx_buf.buf = NULL;
+ }
}
if (sport->dma_chan_tx) {
- dmaengine_terminate_sync(sport->dma_chan_tx);
dma_release_channel(sport->dma_chan_tx);
sport->dma_chan_tx = NULL;
}
+ if (sport->dma_is_inited)
+ release_bus_freq(BUS_FREQ_HIGH);
+
sport->dma_is_inited = 0;
}
@@ -1144,7 +1138,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
{
struct dma_slave_config slave_config = {};
struct device *dev = sport->port.dev;
- int ret;
+ int ret, i;
/* Prepare for RX : */
sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
@@ -1157,20 +1151,25 @@ static int imx_uart_dma_init(struct imx_port *sport)
slave_config.direction = DMA_DEV_TO_MEM;
slave_config.src_addr = sport->port.mapbase + URXD0;
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- /* one byte less than the watermark level to enable the aging timer */
- slave_config.src_maxburst = RXTL_DMA - 1;
+ slave_config.src_maxburst = RXTL_UART;
ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
if (ret) {
dev_err(dev, "error in RX dma configuration.\n");
goto err;
}
- sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sport->rx_buf) {
+ sport->rx_buf.buf = dma_alloc_coherent(sport->port.dev, IMX_RXBD_NUM * RX_BUF_SIZE,
+ &sport->rx_buf.dmaaddr, GFP_KERNEL);
+ if (!sport->rx_buf.buf) {
+ dev_err(dev, "cannot alloc DMA buffer.\n");
ret = -ENOMEM;
goto err;
}
- sport->rx_ring.buf = sport->rx_buf;
+
+ for (i = 0; i < IMX_RXBD_NUM; i++) {
+ sport->rx_buf.buf_info[i].rx_bytes = 0;
+ sport->rx_buf.buf_info[i].filled = false;
+ }
/* Prepare for TX : */
sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
@@ -1183,7 +1182,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
slave_config.direction = DMA_MEM_TO_DEV;
slave_config.dst_addr = sport->port.mapbase + URTX0;
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- slave_config.dst_maxburst = TXTL_DMA;
+ slave_config.dst_maxburst = TXTL;
ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
if (ret) {
dev_err(dev, "error in TX dma configuration.");
@@ -1191,6 +1190,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
}
sport->dma_is_inited = 1;
+ request_bus_freq(BUS_FREQ_HIGH);
return 0;
err:
@@ -1203,17 +1203,19 @@ static void imx_enable_dma(struct imx_port *sport)
unsigned long temp;
init_waitqueue_head(&sport->dma_wait);
+ sport->flags = 0;
/* set UCR1 */
temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
+ temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
+ /* wait for 32 idle frames for IDDMA interrupt */
+ UCR1_ICD_REG(3);
writel(temp, sport->port.membase + UCR1);
- temp = readl(sport->port.membase + UCR2);
- temp |= UCR2_ATEN;
- writel(temp, sport->port.membase + UCR2);
-
- imx_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
+ /* set UCR4 */
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_IDDMAEN;
+ writel(temp, sport->port.membase + UCR4);
sport->dma_is_enabled = 1;
}
@@ -1229,10 +1231,13 @@ static void imx_disable_dma(struct imx_port *sport)
/* clear UCR2 */
temp = readl(sport->port.membase + UCR2);
- temp &= ~(UCR2_CTSC | UCR2_CTS | UCR2_ATEN);
+ temp &= ~(UCR2_CTSC | UCR2_CTS);
writel(temp, sport->port.membase + UCR2);
- imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ /* clear UCR4 */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~UCR4_IDDMAEN;
+ writel(temp, sport->port.membase + UCR4);
sport->dma_is_enabled = 0;
}
@@ -1243,9 +1248,17 @@ static void imx_disable_dma(struct imx_port *sport)
static int imx_startup(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
+ struct tty_port *tty_port = &sport->port.state->port;
int retval, i;
unsigned long flags, temp;
+ /* some modem may need reset */
+ if (!tty_port_suspended(tty_port)) {
+ retval = device_reset(sport->port.dev);
+ if (retval && retval != -ENOENT)
+ return retval;
+ }
+
retval = clk_prepare_enable(sport->clk_per);
if (retval)
return retval;
@@ -1255,7 +1268,7 @@ static int imx_startup(struct uart_port *port)
return retval;
}
- imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_setup_ufcr(sport, 0);
/* disable the DREN bit (Data Ready interrupt enable) before
* requesting IRQs
@@ -1268,11 +1281,6 @@ static int imx_startup(struct uart_port *port)
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
- /* Can we enable the DMA support? */
- if (!uart_console(port) && !sport->dma_is_inited)
- imx_uart_dma_init(sport);
-
- spin_lock_irqsave(&sport->port.lock, flags);
/* Reset fifo's and state machines */
i = 100;
@@ -1283,18 +1291,28 @@ static int imx_startup(struct uart_port *port)
while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
udelay(1);
+ /* Can we enable the DMA support? */
+ if (is_imx6q_uart(sport) && !uart_console(port)
+ && !sport->dma_is_inited)
+ imx_uart_dma_init(sport);
+
+ if (sport->dma_is_inited)
+ INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
/*
* Finally, clear and enable interrupts
*/
- writel(USR1_RTSD | USR1_DTRD, sport->port.membase + USR1);
+ writel(USR1_RTSD, sport->port.membase + USR1);
writel(USR2_ORE, sport->port.membase + USR2);
- if (sport->dma_is_inited && !sport->dma_is_enabled)
- imx_enable_dma(sport);
-
temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
-
+ if (!sport->dma_is_inited)
+ temp |= UCR1_RRDYEN;
+ if (sport->have_rtscts)
+ temp |= UCR1_RTSDEN;
+ temp |= UCR1_UARTEN;
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR4);
@@ -1341,10 +1359,20 @@ static void imx_shutdown(struct uart_port *port)
unsigned long flags;
if (sport->dma_is_enabled) {
- sport->dma_is_rxing = 0;
- sport->dma_is_txing = 0;
- dmaengine_terminate_sync(sport->dma_chan_tx);
- dmaengine_terminate_sync(sport->dma_chan_rx);
+ int ret;
+
+ /* We have to wait for the DMA to finish. */
+ ret = wait_event_interruptible_timeout(sport->dma_wait,
+ !sport->dma_is_rxing && !sport->dma_is_txing,
+ msecs_to_jiffies(1));
+ if (ret <= 0) {
+ sport->dma_is_rxing = 0;
+ sport->dma_is_txing = 0;
+ dmaengine_terminate_all(sport->dma_chan_tx);
+ dmaengine_terminate_all(sport->dma_chan_rx);
+ }
+
+ cancel_work_sync(&sport->tsk_dma_tx);
spin_lock_irqsave(&sport->port.lock, flags);
imx_stop_tx(port);
@@ -1400,7 +1428,9 @@ static void imx_flush_buffer(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp &= ~UCR1_TDMAEN;
writel(temp, sport->port.membase + UCR1);
- sport->dma_is_txing = false;
+ sport->dma_is_txing = 0;
+ clear_bit(DMA_TX_IS_WORKING, &sport->flags);
+ smp_mb__after_atomic();
}
/*
@@ -1433,10 +1463,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
- unsigned long ucr2, old_ucr1, old_ucr2;
- unsigned int baud, quot;
+ unsigned long ucr2, old_ucr1, old_txrxen, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- unsigned long div, ufcr;
+ unsigned int div, ufcr;
unsigned long num, denom;
uint64_t tdiv64;
@@ -1484,7 +1513,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
imx_port_rts_active(sport, &ucr2);
}
-
if (termios->c_cflag & CSTOPB)
ucr2 |= UCR2_STPB;
if (termios->c_cflag & PARENB) {
@@ -1544,10 +1572,10 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
barrier();
/* then, disable everything */
- old_ucr2 = readl(sport->port.membase + UCR2);
- writel(old_ucr2 & ~(UCR2_TXEN | UCR2_RXEN),
+ old_txrxen = readl(sport->port.membase + UCR2);
+ writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
sport->port.membase + UCR2);
- old_ucr2 &= (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN);
+ old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
/* custom-baudrate handling */
div = sport->port.uartclk / (baud * 16);
@@ -1586,11 +1614,21 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
writel(old_ucr1, sport->port.membase + UCR1);
/* set the parity, stop bits and data size */
- writel(ucr2 | old_ucr2, sport->port.membase + UCR2);
+ writel(ucr2 | old_txrxen, sport->port.membase + UCR2);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
+ if (sport->dma_is_inited && !sport->dma_is_enabled) {
+ imx_enable_dma(sport);
+ start_rx_dma(sport);
+ }
+
+ if (!sport->dma_is_enabled) {
+ ucr2 = readl(sport->port.membase + UCR2);
+ writel(ucr2 | UCR2_ATEN, sport->port.membase + UCR2);
+ }
+
spin_unlock_irqrestore(&sport->port.lock, flags);
}
@@ -1656,7 +1694,7 @@ static int imx_poll_init(struct uart_port *port)
if (retval)
clk_disable_unprepare(sport->clk_ipg);
- imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_setup_ufcr(sport, 0);
spin_lock_irqsave(&sport->port.lock, flags);
@@ -1931,7 +1969,7 @@ imx_console_setup(struct console *co, char *options)
else
imx_console_get_options(sport, &baud, &parity, &bits);
- imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_setup_ufcr(sport, 0);
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
@@ -2130,6 +2168,14 @@ static int serial_imx_probe(struct platform_device *pdev)
}
sport->port.uartclk = clk_get_rate(sport->clk_per);
+ if (sport->port.uartclk > IMX_MODULE_MAX_CLK_RATE) {
+ ret = clk_set_rate(sport->clk_per, IMX_MODULE_MAX_CLK_RATE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_set_rate() failed\n");
+ return ret;
+ }
+ }
+ sport->port.uartclk = clk_get_rate(sport->clk_per);
/* For register access, we only need to enable the ipg clock. */
ret = clk_prepare_enable(sport->clk_ipg);
@@ -2230,9 +2276,12 @@ static int serial_imx_remove(struct platform_device *pdev)
static void serial_imx_restore_context(struct imx_port *sport)
{
+ unsigned long flags = 0;
+
if (!sport->context_saved)
return;
+ spin_lock_irqsave(&sport->port.lock, flags);
writel(sport->saved_reg[4], sport->port.membase + UFCR);
writel(sport->saved_reg[5], sport->port.membase + UESC);
writel(sport->saved_reg[6], sport->port.membase + UTIM);
@@ -2244,11 +2293,15 @@ static void serial_imx_restore_context(struct imx_port *sport)
writel(sport->saved_reg[2], sport->port.membase + UCR3);
writel(sport->saved_reg[3], sport->port.membase + UCR4);
sport->context_saved = false;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
static void serial_imx_save_context(struct imx_port *sport)
{
+ unsigned long flags = 0;
+
/* Save necessary regs */
+ spin_lock_irqsave(&sport->port.lock, flags);
sport->saved_reg[0] = readl(sport->port.membase + UCR1);
sport->saved_reg[1] = readl(sport->port.membase + UCR2);
sport->saved_reg[2] = readl(sport->port.membase + UCR3);
@@ -2260,12 +2313,20 @@ static void serial_imx_save_context(struct imx_port *sport)
sport->saved_reg[8] = readl(sport->port.membase + UBMR);
sport->saved_reg[9] = readl(sport->port.membase + IMX21_UTS);
sport->context_saved = true;
+
+ if (uart_console(&sport->port) && sport->port.sysrq)
+ sport->saved_reg[0] |= UCR1_RRDYEN;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
{
unsigned int val;
+ val = readl(sport->port.membase + USR1);
+ if (val & (USR1_AWAKE | USR1_RTSD))
+ writel(USR1_AWAKE | USR1_RTSD, sport->port.membase + USR1);
+
val = readl(sport->port.membase + UCR3);
if (on)
val |= UCR3_AWAKEN;
@@ -2293,10 +2354,15 @@ static int imx_serial_port_suspend_noirq(struct device *dev)
if (ret)
return ret;
+ /* enable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, true);
+
serial_imx_save_context(sport);
clk_disable(sport->clk_ipg);
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
@@ -2306,12 +2372,17 @@ static int imx_serial_port_resume_noirq(struct device *dev)
struct imx_port *sport = platform_get_drvdata(pdev);
int ret;
+ pinctrl_pm_select_default_state(dev);
+
ret = clk_enable(sport->clk_ipg);
if (ret)
return ret;
serial_imx_restore_context(sport);
+ /* disable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, false);
+
clk_disable(sport->clk_ipg);
return 0;
@@ -2322,9 +2393,6 @@ static int imx_serial_port_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct imx_port *sport = platform_get_drvdata(pdev);
- /* enable wakeup from i.MX UART */
- serial_imx_enable_wakeup(sport, true);
-
uart_suspend_port(&imx_reg, &sport->port);
/* Needed to enable clock in suspend_noirq */
@@ -2336,9 +2404,6 @@ static int imx_serial_port_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct imx_port *sport = platform_get_drvdata(pdev);
- /* disable wakeup from i.MX UART */
- serial_imx_enable_wakeup(sport, false);
-
uart_resume_port(&imx_reg, &sport->port);
clk_unprepare(sport->clk_ipg);