summaryrefslogtreecommitdiff
path: root/drivers/char/mspec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/mspec.c')
-rw-r--r--drivers/char/mspec.c68
1 files changed, 43 insertions, 25 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 7ac30612068b..04ac155d3a07 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -38,6 +38,7 @@
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -66,7 +67,7 @@
/*
* Page types allocated by the device.
*/
-enum {
+enum mspec_page_type {
MSPEC_FETCHOP = 1,
MSPEC_CACHED,
MSPEC_UNCACHED
@@ -82,15 +83,25 @@ static int is_sn2;
* One of these structures is allocated when an mspec region is mmaped. The
* structure is pointed to by the vma->vm_private_data field in the vma struct.
* This structure is used to record the addresses of the mspec pages.
+ * This structure is shared by all vma's that are split off from the
+ * original vma when split_vma()'s are done.
+ *
+ * The refcnt is incremented atomically because mm->mmap_sem does not
+ * protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
atomic_t refcnt; /* Number of vmas sharing the data. */
- spinlock_t lock; /* Serialize access to the vma. */
+ spinlock_t lock; /* Serialize access to this structure. */
int count; /* Number of pages allocated. */
- int type; /* Type of pages allocated. */
+ enum mspec_page_type type; /* Type of pages allocated. */
+ int flags; /* See VMD_xxx below. */
+ unsigned long vm_start; /* Original (unsplit) base. */
+ unsigned long vm_end; /* Original (unsplit) end. */
unsigned long maddr[0]; /* Array of MSPEC addresses. */
};
+#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */
+
/* used on shub2 to clear FOP cache in the HUB */
static unsigned long scratch_page[MAX_NUMNODES];
#define SH2_AMO_CACHE_ENTRIES 4
@@ -128,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len)
* mspec_open
*
* Called when a device mapping is created by a means other than mmap
- * (via fork, etc.). Increments the reference count on the underlying
- * mspec data so it is not freed prematurely.
+ * (via fork, munmap, etc.). Increments the reference count on the
+ * underlying mspec data so it is not freed prematurely.
*/
static void
mspec_open(struct vm_area_struct *vma)
@@ -144,43 +155,43 @@ mspec_open(struct vm_area_struct *vma)
* mspec_close
*
* Called when unmapping a device mapping. Frees all mspec pages
- * belonging to the vma.
+ * belonging to all the vma's sharing this vma_data structure.
*/
static void
mspec_close(struct vm_area_struct *vma)
{
struct vma_data *vdata;
- int i, pages, result, vdata_size;
+ int index, last_index;
+ unsigned long my_page;
vdata = vma->vm_private_data;
+
if (!atomic_dec_and_test(&vdata->refcnt))
return;
- pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
- for (i = 0; i < pages; i++) {
- if (vdata->maddr[i] == 0)
+ last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+ for (index = 0; index < last_index; index++) {
+ if (vdata->maddr[index] == 0)
continue;
/*
* Clear the page before sticking it back
* into the pool.
*/
- result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
- if (!result)
- uncached_free_page(vdata->maddr[i]);
+ my_page = vdata->maddr[index];
+ vdata->maddr[index] = 0;
+ if (!mspec_zero_block(my_page, PAGE_SIZE))
+ uncached_free_page(my_page);
else
printk(KERN_WARNING "mspec_close(): "
- "failed to zero page %i\n",
- result);
+ "failed to zero page %ld\n", my_page);
}
- if (vdata_size <= PAGE_SIZE)
- kfree(vdata);
- else
+ if (vdata->flags & VMD_VMALLOCED)
vfree(vdata);
+ else
+ kfree(vdata);
}
-
/*
* mspec_nopfn
*
@@ -194,7 +205,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
int index;
struct vma_data *vdata = vma->vm_private_data;
- index = (address - vma->vm_start) >> PAGE_SHIFT;
+ BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
+ index = (address - vdata->vm_start) >> PAGE_SHIFT;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
maddr = uncached_alloc_page(numa_node_id());
@@ -236,10 +248,11 @@ static struct vm_operations_struct mspec_vm_ops = {
* underlying pages.
*/
static int
-mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
+mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ enum mspec_page_type type)
{
struct vma_data *vdata;
- int pages, vdata_size;
+ int pages, vdata_size, flags = 0;
if (vma->vm_pgoff != 0)
return -EINVAL;
@@ -254,18 +267,23 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
if (vdata_size <= PAGE_SIZE)
vdata = kmalloc(vdata_size, GFP_KERNEL);
- else
+ else {
vdata = vmalloc(vdata_size);
+ flags = VMD_VMALLOCED;
+ }
if (!vdata)
return -ENOMEM;
memset(vdata, 0, vdata_size);
+ vdata->vm_start = vma->vm_start;
+ vdata->vm_end = vma->vm_end;
+ vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
vdata->refcnt = ATOMIC_INIT(1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;