summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Roell <troell@nvidia.com>2010-10-13 15:23:13 -0600
committerNiket Sirsi <nsirsi@nvidia.com>2010-11-06 10:05:14 -0800
commitc970c075df97de1e48fa80461e2e52e7446a666f (patch)
treef89b24a0c76c9af5afa044cb6934b7e2b4833266
parentaf5735282098803982b27cc85988f3c2c2968329 (diff)
[ARM/Tegra] nvmap fix for crash in spin_lock() call
Gate access to h->carveout.co_heap by (h->alloc && !h->pg_alloc) Bug 743185 (cherry picked from commit daba1dc0a3e050a2739293904cad9e65eb27a559) Change-Id: I5014fb40074e79e4db293767e3ef512ea35401d8 Reviewed-on: http://git-master/r/10148 Reviewed-by: Kirill Artamonov <kartamonov@nvidia.com> Tested-by: Kirill Artamonov <kartamonov@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
-rw-r--r--drivers/video/tegra/nvmap.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/drivers/video/tegra/nvmap.c b/drivers/video/tegra/nvmap.c
index 0804239471e1..6ae55b0d4b8c 100644
--- a/drivers/video/tegra/nvmap.c
+++ b/drivers/video/tegra/nvmap.c
@@ -1445,7 +1445,7 @@ static void nvmap_vma_close(struct vm_area_struct *vma) {
if (priv && !atomic_dec_return(&priv->ref)) {
struct nvmap_handle *h = priv->h;
if (h) {
- if (!h->alloc && !h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount--;
spin_unlock(&h->carveout.co_heap->lock);
@@ -1644,7 +1644,7 @@ static int _nvmap_handle_unpin(struct nvmap_handle *h)
_nvmap_insert_mru_vma(h);
ret=1;
}
- if (!h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->align
&= ~NVMAP_BLOCK_ALIGN_PINNED;
@@ -2654,7 +2654,7 @@ static int _nvmap_do_cache_maint(struct nvmap_handle *h,
prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
- if (!h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount++;
spin_unlock(&h->carveout.co_heap->lock);
@@ -2695,7 +2695,7 @@ static int _nvmap_do_cache_maint(struct nvmap_handle *h,
if (page) put_page(page);
}
- if (!h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount--;
spin_unlock(&h->carveout.co_heap->lock);
@@ -2827,7 +2827,7 @@ static ssize_t _nvmap_do_rw_handle(struct nvmap_handle *h, int is_read,
count = 1;
}
- if (!h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount++;
spin_unlock(&h->carveout.co_heap->lock);
@@ -2853,7 +2853,7 @@ static ssize_t _nvmap_do_rw_handle(struct nvmap_handle *h, int is_read,
h_offs += h_stride;
}
- if (!h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount--;
spin_unlock(&h->carveout.co_heap->lock);
@@ -3427,7 +3427,7 @@ NvError NvRmMemMap(NvRmMemHandle hMem, NvU32 Offset, NvU32 Size,
h->kern_map = vm_map_ram(h->pgalloc.pages,
h->size>>PAGE_SHIFT, -1, prot);
} else {
- if (!h->heap_pgalloc) {
+ if (h->alloc && !h->heap_pgalloc) {
spin_lock(&h->carveout.co_heap->lock);
BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount++;
spin_unlock(&h->carveout.co_heap->lock);
@@ -3826,9 +3826,11 @@ struct nvmap_handle *nvmap_alloc(
size_t mapaddr = h->carveout.base;
size_t mapsize = h->size;
- spin_lock(&h->carveout.co_heap->lock);
- BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount++;
- spin_unlock(&h->carveout.co_heap->lock);
+ if (h->alloc && !h->heap_pgalloc) {
+ spin_lock(&h->carveout.co_heap->lock);
+ BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount++;
+ spin_unlock(&h->carveout.co_heap->lock);
+ }
mapsize += (mapaddr & ~PAGE_MASK);
mapaddr &= PAGE_MASK;
@@ -3858,9 +3860,11 @@ void nvmap_free(struct nvmap_handle *h, void *map)
} else {
unsigned long addr = (unsigned long)map;
- spin_lock(&h->carveout.co_heap->lock);
- BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount--;
- spin_unlock(&h->carveout.co_heap->lock);
+ if (h->alloc && !h->heap_pgalloc) {
+ spin_lock(&h->carveout.co_heap->lock);
+ BLOCK(h->carveout.co_heap, h->carveout.block_idx)->mapcount--;
+ spin_unlock(&h->carveout.co_heap->lock);
+ }
addr &= PAGE_MASK;
iounmap((void *)addr);