summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2010-09-06 18:07:47 +0530
committerBharat Nihalani <bnihalani@nvidia.com>2010-09-16 04:03:17 -0700
commit94469e8891c5a00c2f298c8d9c016857d948e694 (patch)
treef9db558a76c48695e91b0de351e7d0829f5be528 /arch
parent95a256fdaa601b17ae13d4749cf846acc389fd75 (diff)
[arm/tegra] dma and serial: Adding pointer checks
Adding the valid pointer checks before accessing the pointers which is passed when public apis are called. Also resetting the pointers to null once the allocated handles are freed. (cherry picked from commit 0954407534a757b316bc35a0232968feed23243a) Change-Id: Ib8b99f0556fb9a98c74ba8911a00879451fad9e5 Reviewed-on: http://git-master/r/6578 Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com> Tested-by: Laxman Dewangan <ldewangan@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-tegra/dma.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index ad9f3a05a68b..a741cb9b3b2a 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -152,6 +152,12 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
+ if (tegra_dma_is_empty(ch))
+ return;
+
req = list_entry(ch->list.next, typeof(*req), node);
tegra_dma_dequeue_req(ch, req);
@@ -163,6 +169,9 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
unsigned int csr;
unsigned int status;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
csr = ch->csr;
csr &= ~CSR_IE_EOC;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
@@ -179,6 +188,9 @@ int tegra_dma_cancel(struct tegra_dma_channel *ch)
{
unsigned long irq_flags;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
while (!list_empty(&ch->list))
list_del(ch->list.next);
@@ -271,6 +283,9 @@ unsigned int tegra_dma_transferred_req(struct tegra_dma_channel *ch,
unsigned int bytes_transferred;
unsigned int status;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
if (list_entry(ch->list.next, struct tegra_dma_req, node)!=req) {
@@ -294,6 +309,9 @@ int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
unsigned long irq_flags;
int bytes_transferred = 0;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) {
@@ -320,6 +338,12 @@ int tegra_dma_start_dma(struct tegra_dma_channel *ch, struct tegra_dma_req *req)
unsigned int status;
unsigned long irq_flags;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
+ if (IS_ERR_OR_NULL(req))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
if (list_entry(ch->list.next, struct tegra_dma_req, node)!=req) {
@@ -364,6 +388,9 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
unsigned long irq_flags;
int stop = 0;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
if (list_entry(ch->list.next, struct tegra_dma_req, node)==_req)
@@ -410,6 +437,9 @@ bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
unsigned long irq_flags;
bool is_empty;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
if (list_empty(&ch->list))
is_empty = true;
@@ -426,6 +456,9 @@ bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
unsigned long irq_flags;
struct tegra_dma_req *req;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
spin_lock_irqsave(&ch->lock, irq_flags);
list_for_each_entry(req, &ch->list, node) {
if (req == _req) {
@@ -444,6 +477,12 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
unsigned long irq_flags;
int start_dma = 0;
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
+ if (IS_ERR_OR_NULL(req))
+ BUG();
+
if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
req->source_addr & 0x3 || req->dest_addr & 0x3) {
pr_err("Invalid DMA request for channel %d\n", ch->id);
@@ -500,6 +539,10 @@ EXPORT_SYMBOL(tegra_dma_allocate_channel);
void tegra_dma_free_channel(struct tegra_dma_channel *ch)
{
unsigned long irq_flags;
+
+ if (IS_ERR_OR_NULL(ch))
+ BUG();
+
if (ch->mode & TEGRA_DMA_SHARED)
return;
tegra_dma_cancel(ch);