summaryrefslogtreecommitdiff
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index ce74452c02d9..d1e076a487cb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -57,6 +57,11 @@ DECLARE_RWSEM(nommu_vma_sem);
struct vm_operations_struct generic_file_vm_ops = {
};
+EXPORT_SYMBOL(vmalloc);
+EXPORT_SYMBOL(vfree);
+EXPORT_SYMBOL(vmalloc_to_page);
+EXPORT_SYMBOL(vmalloc_32);
+
/*
* Handle all mappings that got truncated by a "truncate()"
* system call.
@@ -142,6 +147,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return(i);
}
+EXPORT_SYMBOL(get_user_pages);
+
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
@@ -150,8 +157,7 @@ void vfree(void *addr)
kfree(addr);
}
-void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
- pgprot_t prot)
+void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
@@ -852,7 +858,7 @@ unsigned long do_mmap_pgoff(struct file *file,
error_getting_vma:
up_write(&nommu_vma_sem);
kfree(vml);
- printk("Allocation of vml for %lu byte allocation from process %d failed\n",
+ printk("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
show_free_areas();
return -ENOMEM;
@@ -909,7 +915,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
if ((*parent)->vma->vm_start == addr &&
- (*parent)->vma->vm_end == end)
+ ((len == 0) || ((*parent)->vma->vm_end == end)))
goto found;
printk("munmap of non-mmaped memory by process %d (%s): %p\n",
@@ -925,6 +931,8 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
realalloc -= kobjsize(vml);
askedalloc -= sizeof(*vml);
kfree(vml);
+
+ update_hiwater_vm(mm);
mm->total_vm -= len >> PAGE_SHIFT;
#ifdef DEBUG
@@ -1041,7 +1049,8 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL(find_vma);
-struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
+struct page *follow_page(struct mm_struct *mm, unsigned long address,
+ unsigned int foll_flags)
{
return NULL;
}
@@ -1054,7 +1063,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long to, unsigned long size, pgprot_t prot)
{
- return -EPERM;
+ vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
+ return 0;
}
void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
@@ -1071,18 +1081,6 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
}
-void update_mem_hiwater(struct task_struct *tsk)
-{
- unsigned long rss = get_mm_counter(tsk->mm, rss);
-
- if (likely(tsk->mm)) {
- if (tsk->mm->hiwater_rss < rss)
- tsk->mm->hiwater_rss = rss;
- if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
- tsk->mm->hiwater_vm = tsk->mm->total_vm;
- }
-}
-
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen,
int even_cows)
@@ -1167,7 +1165,11 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
leave 3% of the size of this process for other processes */
allowed -= current->mm->total_vm / 32;
- if (atomic_read(&vm_committed_space) < allowed)
+ /*
+ * cast `allowed' as a signed long because vm_committed_space
+ * sometimes has a negative value
+ */
+ if (atomic_read(&vm_committed_space) < (long)allowed)
return 0;
vm_unacct_memory(pages);