summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2010-08-19 17:28:49 +0530
committerYu-Huan Hsu <yhsu@nvidia.com>2010-08-27 11:48:17 -0700
commitd1df35d58810ad9019be68b7522ea730281bef91 (patch)
treed240a6b96bfd86cd0a06c0acd2adeda1efc7dfb2
parent72c8f3a64b53539d1c209cc32d1b33d9c7e855d0 (diff)
[arm/tegra]dma:Continuous double buffer repeat transfer.
To get the higher performance on uart receive, it is required to have the transfer mode of continuous double buffer of dma operation on the client buffer. The dma keeps filling same buffer and informs client when half buffer and full buffer transfer completes. Also added support to start and stop without enqueing/dequeueing. Bug 725085 Change-Id: I994af55d5e5b2e7f17b889aaa00ca57942bebac8 Reviewed-on: http://git-master/r/4630 Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com> Tested-by: Laxman Dewangan <ldewangan@nvidia.com> Tested-by: Rakesh Goyal <rgoyal@nvidia.com> Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
-rw-r--r--arch/arm/mach-tegra/dma.c184
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h9
2 files changed, 152 insertions, 41 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index feaa7fdb46bf..7eb0bb758b67 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -223,6 +223,45 @@ static unsigned int dma_active_count(struct tegra_dma_channel *ch,
return bytes_transferred;
}
+static unsigned int get_channel_status(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
+{
+ static DEFINE_SPINLOCK(global_dma_bit_access_lock);
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ unsigned long g_irq_flags;
+ unsigned int status;
+
+ if (is_stop_dma) {
+ /* STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number of pending
+ * bytes to be transfered.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
+ */
+ spin_lock_irqsave(&global_dma_bit_access_lock, g_irq_flags);
+ writel(0, addr + APB_DMA_GEN);
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock_irqrestore(&global_dma_bit_access_lock, g_irq_flags);
+
+ if (status & STA_ISE_EOC) {
+ pr_err("Got Dma Int here clearing");
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+ }
+ req->status = TEGRA_DMA_REQ_STOPPED;
+ } else {
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ if (status & STA_ISE_EOC) {
+ pr_err("Got Dma Int here clearing");
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+ }
+ }
+ return status;
+}
+
unsigned int tegra_dma_transferred_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
{
@@ -246,16 +285,82 @@ unsigned int tegra_dma_transferred_req(struct tegra_dma_channel *ch,
}
EXPORT_SYMBOL(tegra_dma_transferred_req);
+int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
+{
+ unsigned int status;
+ unsigned long irq_flags;
+ int bytes_transferred = 0;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("The dma request is not running\n");
+ return -1;
+ }
+
+ if (req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("The dma request is not running\n");
+ return -1;
+ }
+ status = get_channel_status(ch, req, is_stop_dma);
+ bytes_transferred = dma_active_count(ch, req, status);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return bytes_transferred;
+}
+EXPORT_SYMBOL(tegra_dma_get_transfer_count);
+
+int tegra_dma_start_dma(struct tegra_dma_channel *ch, struct tegra_dma_req *req)
+{
+ unsigned int csr;
+ unsigned int status;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node)!=req) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("The dma request is not on top request\n");
+ return -1;
+ }
+
+ if (req->status != TEGRA_DMA_REQ_STOPPED) {
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ pr_err("Dma state is not in STOPPED\n");
+ return -1;
+ }
+
+ /* clear interrupt if already there */
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ if (status & STA_ISE_EOC)
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+
+ csr = ch->csr;
+ csr |= CSR_IE_EOC;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+ ch->csr = csr;
+
+ req->bytes_transferred = 0;
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
+
+ csr |= CSR_ENB;
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dma_start_dma);
+
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *_req)
{
- static DEFINE_SPINLOCK(enable_lock);
struct tegra_dma_req *req = NULL;
int found = 0;
unsigned int status;
unsigned long irq_flags;
int stop = 0;
- void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -277,22 +382,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
if (!stop)
goto skip_status;
- /* STOP the DMA and get the transfer count.
- * Getting the transfer count is tricky.
- * - Globally disable DMA on all channels
- * - Read the channel's status register to know the number of pending
- * bytes to be transfered.
- * - Stop the dma channel
- * - Globally re-enable DMA to resume other transfers
- */
-
- spin_lock(&enable_lock);
- writel(0, addr + APB_DMA_GEN);
- status = readl(ch->addr + APB_DMA_CHAN_STA);
- tegra_dma_stop(ch);
- writel(GEN_ENABLE, addr + APB_DMA_GEN);
- spin_unlock(&enable_lock);
-
+ status = get_channel_status(ch, req, true);
req->bytes_transferred = dma_active_count(ch, req, status);
if (!list_empty(&ch->list)) {
@@ -665,13 +755,18 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
}
/* Load the next request into the hardware, if available
* */
- if (!list_is_last(&req->node, &ch->list)) {
- next_req = list_entry(req->node.next,
- typeof(*next_req), node);
- tegra_dma_update_hw_partial(ch, next_req);
+ if (!req->is_repeat_req) {
+ if (!list_is_last(&req->node, &ch->list)) {
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw_partial(ch, next_req);
+ }
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ } else {
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
}
- req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
- req->status = TEGRA_DMA_REQ_SUCCESS;
/* DMA lock is NOT held when callback is called */
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (likely(req->threshold))
@@ -692,26 +787,33 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
req->bytes_transferred = bytes_transferred;
req->status = TEGRA_DMA_REQ_SUCCESS;
- if (list_is_last(&req->node, &ch->list)) {
- tegra_dma_stop(ch);
- } else {
- /* It may be possible that req came after
- * half dma complete so it need to start
- * immediately */
- next_req = list_entry(req->node.next,
- typeof(*next_req), node);
- if (next_req->status !=
- TEGRA_DMA_REQ_INFLIGHT) {
+ if (!req->is_repeat_req) {
+ if (list_is_last(&req->node, &ch->list)) {
tegra_dma_stop(ch);
- tegra_dma_update_hw(ch, next_req);
+ } else {
+ /* It may be possible that req came after
+ * half dma complete so it need to start
+ * immediately */
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ if (next_req->status !=
+ TEGRA_DMA_REQ_INFLIGHT) {
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ }
}
- }
-
- list_del(&req->node);
+ list_del(&req->node);
- /* DMA lock is NOT held when callbak is called */
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- req->complete(req);
+ /* DMA lock is NOT held when callbak is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+ } else {
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
+ req->status = TEGRA_DMA_REQ_INFLIGHT;
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ if (likely(req->threshold))
+ req->threshold(req);
+ }
return;
} else {
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
index a088b000ac48..a381b2ff893a 100644
--- a/arch/arm/mach-tegra/include/mach/dma.h
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -66,6 +66,7 @@ enum tegra_dma_req_error {
TEGRA_DMA_REQ_SUCCESS = 0,
TEGRA_DMA_REQ_ERROR_ABORTED,
TEGRA_DMA_REQ_INFLIGHT,
+ TEGRA_DMA_REQ_STOPPED,
};
enum tegra_dma_req_buff_status {
@@ -129,6 +130,9 @@ struct tegra_dma_req {
/* DMA completion tracking information */
int buffer_status;
+ /* Repeat same buffer provided on this request*/
+ bool is_repeat_req;
+
/* Client specific data */
void *dev;
};
@@ -143,6 +147,11 @@ void tegra_dma_flush(struct tegra_dma_channel *ch);
unsigned int tegra_dma_transferred_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
+int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req, bool is_stop_dma);
+int tegra_dma_start_dma(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *_req);
+
bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
bool tegra_dma_is_empty(struct tegra_dma_channel *ch);