summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManeet Singh <mmaneetsingh@nvidia.com>2014-09-11 20:12:33 -0700
committerWinnie Hsu <whsu@nvidia.com>2014-09-26 10:24:59 -0700
commit6a3fc6f05ccfb0c18751efd3a3aae783ba82607a (patch)
tree144d1c9205f772489fc2cf0cec8739556cdd4b17
parent729cdd9d85f7fe4ab63a20ec18526e3f1c550e74 (diff)
video: tegra: nvmap: fix use-after-free race condition
Incremented nvmap_handle ref count in utility function nvmap_get_id_from_dmabuf_fd() before the function release reference to dma buffer. This is required to avoid race conditions in nvmap code where nvmap_handle returned by this function could be freed concurrently while the caller is still using it. As a side effect of above change, every caller of this utility function must decrement nvmap_handle ref count after using the returned nvmap_handle. Bug 1553082 Change-Id: Iffc2e5819f8b493d5ed95a9d0c422ccd52438965 Signed-off-by: Maneet Singh <mmaneetsingh@nvidia.com> Reviewed-on: http://git-master/r/498135 (cherry picked from commit afddea745cc4f4a824be501ecbbb50f55e7e6f04) Reviewed-on: http://git-master/r/538986 GVS: Gerrit_Virtual_Submit Reviewed-by: Winnie Hsu <whsu@nvidia.com>
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dmabuf.c8
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c7
-rw-r--r--drivers/video/tegra/nvmap/nvmap_ioctl.c69
3 files changed, 52 insertions, 32 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_dmabuf.c b/drivers/video/tegra/nvmap/nvmap_dmabuf.c
index 6f47c498c33d..e0ade759a340 100644
--- a/drivers/video/tegra/nvmap/nvmap_dmabuf.c
+++ b/drivers/video/tegra/nvmap/nvmap_dmabuf.c
@@ -684,6 +684,11 @@ struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref)
/*
* Returns the nvmap handle ID associated with the passed dma_buf's fd. This
* does not affect the ref count of the dma_buf.
+ * NOTE: Callers of this utility function must invoke nvmap_handle_put after
+ * using the returned nvmap_handle. Call to nvmap_handle_get is required in
+ * this utility function to avoid race conditions in code where nvmap_handle
+ * returned by this function is freed concurrently while the caller is still
+ * using it.
*/
struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
int fd)
@@ -698,6 +703,8 @@ struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
if (dmabuf->ops == &nvmap_dma_buf_ops) {
info = dmabuf->priv;
handle = info->handle;
+ if (!nvmap_handle_get(handle))
+ handle = ERR_PTR(-EINVAL);
}
dma_buf_put(dmabuf);
return handle;
@@ -719,6 +726,7 @@ int nvmap_ioctl_share_dmabuf(struct file *filp, void __user *arg)
return -EINVAL;
op.fd = nvmap_get_dmabuf_fd(client, handle);
+ nvmap_handle_put(handle);
if (op.fd < 0)
return op.fd;
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index dda4cb66ad48..4281dafa578e 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -440,7 +440,11 @@ EXPORT_SYMBOL(nvmap_free_handle);
void nvmap_free_handle_user_id(struct nvmap_client *client,
unsigned long user_id)
{
- nvmap_free_handle(client, unmarshal_user_handle(user_id));
+ struct nvmap_handle *handle = unmarshal_user_handle(user_id);
+ if (handle) {
+ nvmap_free_handle(client, handle);
+ nvmap_handle_put(handle);
+ }
}
static void add_handle_ref(struct nvmap_client *client,
@@ -611,6 +615,7 @@ struct nvmap_handle_ref *nvmap_create_handle_from_fd(
if (IS_ERR(handle))
return ERR_CAST(handle);
ref = nvmap_duplicate_handle(client, handle, 1);
+ nvmap_handle_put(handle);
return ref;
}
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c
index dc319107786a..a71322b76f23 100644
--- a/drivers/video/tegra/nvmap/nvmap_ioctl.c
+++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c
@@ -47,6 +47,9 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
unsigned long sys_stride, unsigned long elem_size,
unsigned long count);
+/* NOTE: Callers of this utility function must invoke nvmap_handle_put after
+ * using the returned nvmap_handle.
+ */
struct nvmap_handle *unmarshal_user_handle(__u32 handle)
{
struct nvmap_handle *h;
@@ -104,8 +107,8 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
struct nvmap_handle *on_stack[16];
struct nvmap_handle **refs;
unsigned long __user *output = NULL;
- unsigned int i;
int err = 0;
+ u32 i, n_unmarshal_handles = 0;
#ifdef CONFIG_COMPAT
if (is32) {
@@ -148,6 +151,7 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
err = -EINVAL;
goto out;
}
+ n_unmarshal_handles++;
}
} else {
refs = on_stack;
@@ -157,6 +161,7 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
err = -EINVAL;
goto out;
}
+ n_unmarshal_handles++;
}
trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
@@ -219,6 +224,9 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg,
nvmap_unpin_ids(filp->private_data, op.count, refs);
out:
+ for (i = 0; i < n_unmarshal_handles; i++)
+ nvmap_handle_put(refs[i]);
+
if (refs != on_stack)
kfree(refs);
@@ -237,11 +245,6 @@ int nvmap_ioctl_getid(struct file *filp, void __user *arg)
if (!h)
return -EINVAL;
- h = nvmap_handle_get(h);
-
- if (!h)
- return -EPERM;
-
op.id = marshal_id(h);
nvmap_handle_put(h);
@@ -283,6 +286,7 @@ int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
return -EINVAL;
op.fd = nvmap_get_dmabuf_fd(client, handle);
+ nvmap_handle_put(handle);
if (op.fd < 0)
return op.fd;
@@ -298,24 +302,27 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
struct nvmap_alloc_handle op;
struct nvmap_client *client = filp->private_data;
struct nvmap_handle *handle;
+ int err;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
- handle = unmarshal_user_handle(op.handle);
- if (!handle)
+ if (op.align & (op.align - 1))
return -EINVAL;
- if (op.align & (op.align - 1))
+ handle = unmarshal_user_handle(op.handle);
+ if (!handle)
return -EINVAL;
/* user-space handles are aligned to page boundaries, to prevent
* data leakage. */
op.align = max_t(size_t, op.align, PAGE_SIZE);
- return nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
+ err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
0, /* no kind */
op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
+ nvmap_handle_put(handle);
+ return err;
}
int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
@@ -323,26 +330,29 @@ int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
struct nvmap_alloc_kind_handle op;
struct nvmap_client *client = filp->private_data;
struct nvmap_handle *handle;
+ int err;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
- handle = unmarshal_user_handle(op.handle);
- if (!handle)
+ if (op.align & (op.align - 1))
return -EINVAL;
- if (op.align & (op.align - 1))
+ handle = unmarshal_user_handle(op.handle);
+ if (!handle)
return -EINVAL;
/* user-space handles are aligned to page boundaries, to prevent
* data leakage. */
op.align = max_t(size_t, op.align, PAGE_SIZE);
- return nvmap_alloc_handle(client, handle,
+ err = nvmap_alloc_handle(client, handle,
op.heap_mask,
op.align,
op.kind,
op.flags);
+ nvmap_handle_put(handle);
+ return err;
}
int nvmap_create_fd(struct nvmap_client *client, struct nvmap_handle *h)
@@ -435,15 +445,9 @@ int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32)
return -EFAULT;
h = unmarshal_user_handle(op.handle);
-
if (!h)
return -EINVAL;
- h = nvmap_handle_get(h);
-
- if (!h)
- return -EPERM;
-
if(!h->alloc) {
nvmap_handle_put(h);
return -EFAULT;
@@ -536,10 +540,6 @@ int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32)
if (!h)
return -EINVAL;
- h = nvmap_handle_get(h);
- if (!h)
- return -EINVAL;
-
nvmap_ref_lock(client);
ref = __nvmap_validate_locked(client, h);
if (IS_ERR_OR_NULL(ref)) {
@@ -592,13 +592,12 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg,
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
- h = unmarshal_user_handle(op.handle);
- if (!h || !op.addr || !op.count || !op.elem_size)
+ if (!op.addr || !op.count || !op.elem_size)
return -EINVAL;
- h = nvmap_handle_get(h);
+ h = unmarshal_user_handle(op.handle);
if (!h)
- return -EPERM;
+ return -EINVAL;
trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
op.addr, op.hmem_stride,
@@ -635,11 +634,14 @@ static int __nvmap_cache_maint(struct nvmap_client *client,
unsigned long end;
int err = 0;
- handle = unmarshal_user_handle(op->handle);
- if (!handle || !op->addr || op->op < NVMAP_CACHE_OP_WB ||
+ if (!op->addr || op->op < NVMAP_CACHE_OP_WB ||
op->op > NVMAP_CACHE_OP_WB_INV)
return -EINVAL;
+ handle = unmarshal_user_handle(op->handle);
+ if (!handle)
+ return -EINVAL;
+
down_read(&current->mm->mmap_sem);
vma = find_vma(current->active_mm, (unsigned long)op->addr);
@@ -666,6 +668,7 @@ static int __nvmap_cache_maint(struct nvmap_client *client,
false);
out:
up_read(&current->mm->mmap_sem);
+ nvmap_handle_put(handle);
return err;
}
@@ -1127,7 +1130,8 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
u32 *offset_ptr;
u32 *size_ptr;
struct nvmap_handle **refs;
- int i, err = 0;
+ int err = 0;
+ u32 i, n_unmarshal_handles = 0;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
@@ -1169,6 +1173,7 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
err = -EINVAL;
goto free_mem;
}
+ n_unmarshal_handles++;
}
if (is_reserve_ioctl)
@@ -1179,6 +1184,8 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg,
op.op, op.nr);
free_mem:
+ for (i = 0; i < n_unmarshal_handles; i++)
+ nvmap_handle_put(refs[i]);
kfree(refs);
return err;
}