summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Tuttle <ttuttle@google.com>2008-06-05 22:46:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-06 11:29:11 -0700
commitaae8679b0ebcaa92f99c1c3cb0cd651594a43915 (patch)
tree26d761d3c4cca09b58538a576948a7ba720f7d5a
parentd1ee2971f5bd8a16bc5ecfe1b00e14b4fe407c4f (diff)
pagemap: fix bug in add_to_pagemap, require aligned-length reads of /proc/pid/pagemap
Fix a bug in add_to_pagemap. Previously, since pm->out was a char *, put_user was only copying 1 byte of every PFN, resulting in the top 7 bytes of each PFN not being copied. By requiring that reads be a multiple of 8 bytes, I can make pm->out and pm->end u64*s instead of char*s, which makes put_user work properly, and also simplifies the logic in add_to_pagemap a bit. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Thomas Tuttle <ttuttle@google.com> Cc: Matt Mackall <mpm@selenic.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 88717c0f941b..17403629e330 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -496,7 +496,7 @@ const struct file_operations proc_clear_refs_operations = {
};
struct pagemapread {
- char __user *out, *end;
+ u64 __user *out, *end;
};
#define PM_ENTRY_BYTES sizeof(u64)
@@ -519,21 +519,11 @@ struct pagemapread {
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
- /*
- * Make sure there's room in the buffer for an
- * entire entry. Otherwise, only copy part of
- * the pfn.
- */
- if (pm->out + PM_ENTRY_BYTES >= pm->end) {
- if (copy_to_user(pm->out, &pfn, pm->end - pm->out))
- return -EFAULT;
- pm->out = pm->end;
- return PM_END_OF_BUFFER;
- }
-
if (put_user(pfn, pm->out))
return -EFAULT;
- pm->out += PM_ENTRY_BYTES;
+ pm->out++;
+ if (pm->out >= pm->end)
+ return PM_END_OF_BUFFER;
return 0;
}
@@ -634,7 +624,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
ret = -EINVAL;
/* file position must be aligned */
- if (*ppos % PM_ENTRY_BYTES)
+ if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
goto out_task;
ret = 0;
@@ -664,8 +654,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
goto out_pages;
}
- pm.out = buf;
- pm.end = buf + count;
+ pm.out = (u64 *)buf;
+ pm.end = (u64 *)(buf + count);
if (!ptrace_may_attach(task)) {
ret = -EIO;
@@ -690,9 +680,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (ret == PM_END_OF_BUFFER)
ret = 0;
/* don't need mmap_sem for these, but this looks cleaner */
- *ppos += pm.out - buf;
+ *ppos += (char *)pm.out - buf;
if (!ret)
- ret = pm.out - buf;
+ ret = (char *)pm.out - buf;
}
out_pages: