summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRob Clark <rob@ti.com>2012-01-18 18:33:02 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-08 14:14:12 -0800
commitc5b1247bd1c3ab6722acfa95213be9a16bfb664c (patch)
tree3ef9b02ee38d547cf00a7400186deee87e117214
parenta9e8d70c1ac6c9ccf6852c91e082e28249564e6e (diff)
staging: drm/omap: fix locking issue
The create/free mmap offset code must be synchronized. Yet only some callers of omap_gem_mmap_offset() held struct_mutex. Leading to various crashes around drm_mm_insert_helper_range(). (In the free-object path, which is currently the only place we drm_gem_free_mmap_offset(), struct_mutex is already held.) Signed-off-by: Rob Clark <rob@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/omapdrm/omap_gem.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
index ae1ad357f7f2..b7d6f886c5cf 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -116,6 +116,9 @@ struct omap_gem_object {
} *sync;
};
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+static uint64_t mmap_offset(struct drm_gem_object *obj);
+
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
* not necessarily pinned in TILER all the time, and (b) when they are
* they are not necessarily page aligned, we reserve one or more small
@@ -149,7 +152,7 @@ static void evict_entry(struct drm_gem_object *obj,
{
if (obj->dev->dev_mapping) {
size_t size = PAGE_SIZE * usergart[fmt].height;
- loff_t off = omap_gem_mmap_offset(obj) +
+ loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
}
@@ -189,8 +192,6 @@ static inline bool is_shmem(struct drm_gem_object *obj)
return obj->filp != NULL;
}
-static int get_pages(struct drm_gem_object *obj, struct page ***pages);
-
static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */
@@ -251,7 +252,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
}
/** get mmap offset */
-uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+static uint64_t mmap_offset(struct drm_gem_object *obj)
{
if (!obj->map_list.map) {
/* Make it mmapable */
@@ -267,6 +268,15 @@ uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
}
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+ mutex_lock(&obj->dev->struct_mutex);
+ offset = mmap_offset(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return offset;
+}
+
/** get mmap size */
size_t omap_gem_mmap_size(struct drm_gem_object *obj)
{