summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r--arch/arm/mach-tegra/dma.c71
1 files changed, 68 insertions, 3 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 35499916e2bd..374d428b8779 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -34,6 +34,8 @@
#include <mach/iomap.h>
#include <mach/clk.h>
+#include "apbio.h"
+
#define APB_DMA_GEN 0x000
#define GEN_ENABLE (1<<31)
@@ -355,6 +357,68 @@ static unsigned int dma_active_count(struct tegra_dma_channel *ch,
return bytes_transferred;
}
+static unsigned int get_channel_status(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, bool is_stop_dma)
+{
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
+ unsigned int status;
+
+ if (is_stop_dma) {
+ /*
+ * STOP the DMA and get the transfer count.
+ * Getting the transfer count is tricky.
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number
+ * of pending bytes to be transfered.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
+ */
+ spin_lock(&enable_lock);
+ writel(0, addr + APB_DMA_GEN);
+ udelay(20);
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock(&enable_lock);
+ if (status & STA_ISE_EOC) {
+ pr_err("Got Dma Int here clearing");
+ writel(status, ch->addr + APB_DMA_CHAN_STA);
+ }
+ req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
+ } else {
+ status = readl(ch->addr + APB_DMA_CHAN_STA);
+ }
+ return status;
+}
+
+/* should be called with the channel lock held */
+static unsigned int dma_active_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, unsigned int status)
+{
+ unsigned int to_transfer;
+ unsigned int req_transfer_count;
+ unsigned int bytes_transferred;
+
+ to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
+ req_transfer_count = ch->req_transfer_count + 1;
+ bytes_transferred = req_transfer_count;
+ if (status & STA_BUSY)
+ bytes_transferred -= to_transfer;
+ /*
+ * In continuous transfer mode, DMA only tracks the count of the
+ * half DMA buffer. So, if the DMA already finished half the DMA
+ * then add the half buffer to the completed count.
+ */
+ if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) {
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
+ bytes_transferred += req_transfer_count;
+ if (status & STA_ISE_EOC)
+ bytes_transferred += req_transfer_count;
+ }
+ bytes_transferred *= 4;
+ return bytes_transferred;
+}
+
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *_req)
{
@@ -382,7 +446,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
}
if (!stop)
- goto skip_status;
+ goto skip_stop_dma;
status = get_channel_status(ch, req, true);
req->bytes_transferred = dma_active_count(ch, req, status);
@@ -394,7 +458,8 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
}
-skip_status:
+
+skip_stop_dma:
req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -557,7 +622,7 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode,
va_list args;
dma_isr_handler isr_handler = NULL;
- if (WARN_ON(!tegra_dma_initialized))
+ if (!tegra_dma_initialized)
return NULL;
mutex_lock(&tegra_dma_lock);