diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/fadvise.c | 2 | ||||
-rw-r--r-- | mm/filemap.c | 11 | ||||
-rw-r--r-- | mm/filemap_xip.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 32 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 10 | ||||
-rw-r--r-- | mm/nommu.c | 12 | ||||
-rw-r--r-- | mm/page-writeback.c | 89 | ||||
-rw-r--r-- | mm/page_alloc.c | 93 | ||||
-rw-r--r-- | mm/readahead.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 20 | ||||
-rw-r--r-- | mm/slab.c | 88 | ||||
-rw-r--r-- | mm/swapfile.c | 4 | ||||
-rw-r--r-- | mm/tiny-shmem.c | 4 | ||||
-rw-r--r-- | mm/truncate.c | 4 |
15 files changed, 279 insertions, 98 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c index 168c78a121bb..0df4c899e979 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -38,7 +38,7 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) if (!file) return -EBADF; - if (S_ISFIFO(file->f_dentry->d_inode->i_mode)) { + if (S_ISFIFO(file->f_path.dentry->d_inode->i_mode)) { ret = -ESPIPE; goto out; } diff --git a/mm/filemap.c b/mm/filemap.c index af7e2f5caea9..8332c77b1bd1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1181,8 +1181,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, if (pos < size) { retval = generic_file_direct_IO(READ, iocb, iov, pos, nr_segs); - if (retval > 0 && !is_sync_kiocb(iocb)) - retval = -EIOCBQUEUED; if (retval > 0) *ppos = pos + retval; } @@ -2047,15 +2045,14 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, * Sync the fs metadata but not the minor inode changes and * of course not the data as we did direct DMA for the IO. * i_mutex is held, which protects generic_osync_inode() from - * livelocking. + * livelocking. AIO O_DIRECT ops attempt to sync metadata here. */ - if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { + if ((written >= 0 || written == -EIOCBQUEUED) && + ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); if (err < 0) written = err; } - if (written == count && !is_sync_kiocb(iocb)) - written = -EIOCBQUEUED; return written; } EXPORT_SYMBOL(generic_file_direct_write); @@ -2269,7 +2266,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, if (count == 0) goto out; - err = remove_suid(file->f_dentry); + err = remove_suid(file->f_path.dentry); if (err) goto out; diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index b4fd0d7c9bfb..8d667617f558 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -379,7 +379,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, if (count == 0) goto out_backing; - ret = remove_suid(filp->f_dentry); + ret = remove_suid(filp->f_path.dentry); if (ret) goto out_backing; diff --git a/mm/memory.c b/mm/memory.c index 4198df0dff1c..bf6100236e62 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1110,23 +1110,29 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; spinlock_t *ptl; + int err = 0; pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) - return -ENOMEM; + return -EAGAIN; arch_enter_lazy_mmu_mode(); do { struct page *page = ZERO_PAGE(addr); pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); + + if (unlikely(!pte_none(*pte))) { + err = -EEXIST; + pte++; + break; + } page_cache_get(page); page_add_file_rmap(page); inc_mm_counter(mm, file_rss); - BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, zero_pte); } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); - return 0; + return err; } static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, @@ -1134,16 +1140,18 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, { pmd_t *pmd; unsigned long next; + int err; pmd = pmd_alloc(mm, pud, addr); if (!pmd) - return -ENOMEM; + return -EAGAIN; do { next = pmd_addr_end(addr, end); - if (zeromap_pte_range(mm, pmd, addr, next, prot)) - return -ENOMEM; + err = zeromap_pte_range(mm, pmd, addr, next, prot); + if (err) + break; } while (pmd++, addr = next, addr != end); - return 0; + return err; } static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, @@ -1151,16 +1159,18 @@ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, { pud_t *pud; unsigned long next; + int err; pud = pud_alloc(mm, pgd, addr); if (!pud) - return -ENOMEM; + return -EAGAIN; do { next = pud_addr_end(addr, end); - if (zeromap_pmd_range(mm, pud, addr, next, prot)) - return -ENOMEM; + err = zeromap_pmd_range(mm, pud, addr, next, prot); + if (err) + break; } while (pud++, addr = next, addr != end); - return 0; + return err; } int zeromap_page_range(struct vm_area_struct *vma, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b917d6fdc1bb..da9463946556 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1857,7 +1857,7 @@ int show_numa_map(struct seq_file *m, void *v) if (file) { seq_printf(m, " file="); - seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= "); + seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else if (vma->vm_start <= mm->start_stack && diff --git a/mm/mmap.c b/mm/mmap.c index 7be110e98d4c..9717337293c3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -188,7 +188,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) - atomic_inc(&file->f_dentry->d_inode->i_writecount); + atomic_inc(&file->f_path.dentry->d_inode->i_writecount); if (vma->vm_flags & VM_SHARED) mapping->i_mmap_writable--; @@ -399,7 +399,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma) struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) - atomic_dec(&file->f_dentry->d_inode->i_writecount); + atomic_dec(&file->f_path.dentry->d_inode->i_writecount); if (vma->vm_flags & VM_SHARED) mapping->i_mmap_writable++; @@ -907,7 +907,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) - if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))) + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC; if (!len) @@ -960,7 +960,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, return -EAGAIN; } - inode = file ? file->f_dentry->d_inode : NULL; + inode = file ? file->f_path.dentry->d_inode : NULL; if (file) { switch (flags & MAP_TYPE) { @@ -989,7 +989,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; - if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) { + if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; diff --git a/mm/nommu.c b/mm/nommu.c index af874569d0f1..23fb033e596d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -523,7 +523,7 @@ static int validate_mmap_request(struct file *file, */ mapping = file->f_mapping; if (!mapping) - mapping = file->f_dentry->d_inode->i_mapping; + mapping = file->f_path.dentry->d_inode->i_mapping; capabilities = 0; if (mapping && mapping->backing_dev_info) @@ -532,7 +532,7 @@ static int validate_mmap_request(struct file *file, if (!capabilities) { /* no explicit capabilities set, so assume some * defaults */ - switch (file->f_dentry->d_inode->i_mode & S_IFMT) { + switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) { case S_IFREG: case S_IFBLK: capabilities = BDI_CAP_MAP_COPY; @@ -563,11 +563,11 @@ static int validate_mmap_request(struct file *file, !(file->f_mode & FMODE_WRITE)) return -EACCES; - if (IS_APPEND(file->f_dentry->d_inode) && + if (IS_APPEND(file->f_path.dentry->d_inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; - if (locks_verify_locked(file->f_dentry->d_inode)) + if (locks_verify_locked(file->f_path.dentry->d_inode)) return -EAGAIN; if (!(capabilities & BDI_CAP_MAP_DIRECT)) @@ -598,7 +598,7 @@ static int validate_mmap_request(struct file *file, /* handle executable mappings and implied executable * mappings */ - if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) { + if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { if (prot & PROT_EXEC) return -EPERM; } @@ -833,7 +833,7 @@ unsigned long do_mmap_pgoff(struct file *file, continue; /* search for overlapping mappings on the same file */ - if (vma->vm_file->f_dentry->d_inode != file->f_dentry->d_inode) + if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode) continue; if (vma->vm_pgoff >= pgoff + pglen) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 8d9b19f239c3..237107c1b084 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -21,6 +21,7 @@ #include <linux/writeback.h> #include <linux/init.h> #include <linux/backing-dev.h> +#include <linux/task_io_accounting_ops.h> #include <linux/blkdev.h> #include <linux/mpage.h> #include <linux/rmap.h> @@ -761,23 +762,24 @@ int __set_page_dirty_nobuffers(struct page *page) struct address_space *mapping = page_mapping(page); struct address_space *mapping2; - if (mapping) { - write_lock_irq(&mapping->tree_lock); - mapping2 = page_mapping(page); - if (mapping2) { /* Race with truncate? */ - BUG_ON(mapping2 != mapping); - if (mapping_cap_account_dirty(mapping)) - __inc_zone_page_state(page, - NR_FILE_DIRTY); - radix_tree_tag_set(&mapping->page_tree, - page_index(page), PAGECACHE_TAG_DIRTY); - } - write_unlock_irq(&mapping->tree_lock); - if (mapping->host) { - /* !PageAnon && !swapper_space */ - __mark_inode_dirty(mapping->host, - I_DIRTY_PAGES); + if (!mapping) + return 1; + + write_lock_irq(&mapping->tree_lock); + mapping2 = page_mapping(page); + if (mapping2) { /* Race with truncate? */ + BUG_ON(mapping2 != mapping); + if (mapping_cap_account_dirty(mapping)) { + __inc_zone_page_state(page, NR_FILE_DIRTY); + task_io_account_write(PAGE_CACHE_SIZE); } + radix_tree_tag_set(&mapping->page_tree, + page_index(page), PAGECACHE_TAG_DIRTY); + } + write_unlock_irq(&mapping->tree_lock); + if (mapping->host) { + /* !PageAnon && !swapper_space */ + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return 1; } @@ -851,27 +853,26 @@ int test_clear_page_dirty(struct page *page) struct address_space *mapping = page_mapping(page); unsigned long flags; - if (mapping) { - write_lock_irqsave(&mapping->tree_lock, flags); - if (TestClearPageDirty(page)) { - radix_tree_tag_clear(&mapping->page_tree, - page_index(page), - PAGECACHE_TAG_DIRTY); - write_unlock_irqrestore(&mapping->tree_lock, flags); - /* - * We can continue to use `mapping' here because the - * page is locked, which pins the address_space - */ - if (mapping_cap_account_dirty(mapping)) { - page_mkclean(page); - dec_zone_page_state(page, NR_FILE_DIRTY); - } - return 1; - } + if (!mapping) + return TestClearPageDirty(page); + + write_lock_irqsave(&mapping->tree_lock, flags); + if (TestClearPageDirty(page)) { + radix_tree_tag_clear(&mapping->page_tree, + page_index(page), PAGECACHE_TAG_DIRTY); write_unlock_irqrestore(&mapping->tree_lock, flags); - return 0; + /* + * We can continue to use `mapping' here because the + * page is locked, which pins the address_space + */ + if (mapping_cap_account_dirty(mapping)) { + page_mkclean(page); + dec_zone_page_state(page, NR_FILE_DIRTY); + } + return 1; } - return TestClearPageDirty(page); + write_unlock_irqrestore(&mapping->tree_lock, flags); + return 0; } EXPORT_SYMBOL(test_clear_page_dirty); @@ -893,17 +894,17 @@ int clear_page_dirty_for_io(struct page *page) { struct address_space *mapping = page_mapping(page); - if (mapping) { - if (TestClearPageDirty(page)) { - if (mapping_cap_account_dirty(mapping)) { - page_mkclean(page); - dec_zone_page_state(page, NR_FILE_DIRTY); - } - return 1; + if (!mapping) + return TestClearPageDirty(page); + + if (TestClearPageDirty(page)) { + if (mapping_cap_account_dirty(mapping)) { + page_mkclean(page); + dec_zone_page_state(page, NR_FILE_DIRTY); } - return 0; + return 1; } - return TestClearPageDirty(page); + return 0; } EXPORT_SYMBOL(clear_page_dirty_for_io); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cace22b3ac25..e6b17b2989e0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -40,6 +40,7 @@ #include <linux/sort.h> #include <linux/pfn.h> #include <linux/backing-dev.h> +#include <linux/fault-inject.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -892,6 +893,91 @@ failed: #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ +#ifdef CONFIG_FAIL_PAGE_ALLOC + +static struct fail_page_alloc_attr { + struct fault_attr attr; + + u32 ignore_gfp_highmem; + u32 ignore_gfp_wait; + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + + struct dentry *ignore_gfp_highmem_file; + struct dentry *ignore_gfp_wait_file; + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +} fail_page_alloc = { + .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_wait = 1, + .ignore_gfp_highmem = 1, +}; + +static int __init setup_fail_page_alloc(char *str) +{ + return setup_fault_attr(&fail_page_alloc.attr, str); +} +__setup("fail_page_alloc=", setup_fail_page_alloc); + +static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +{ + if (gfp_mask & __GFP_NOFAIL) + return 0; + if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) + return 0; + if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) + return 0; + + return should_fail(&fail_page_alloc.attr, 1 << order); +} + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init fail_page_alloc_debugfs(void) +{ + mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + struct dentry *dir; + int err; + + err = init_fault_attr_dentries(&fail_page_alloc.attr, + "fail_page_alloc"); + if (err) + return err; + dir = fail_page_alloc.attr.dentries.dir; + + fail_page_alloc.ignore_gfp_wait_file = + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &fail_page_alloc.ignore_gfp_wait); + + fail_page_alloc.ignore_gfp_highmem_file = + debugfs_create_bool("ignore-gfp-highmem", mode, dir, + &fail_page_alloc.ignore_gfp_highmem); + + if (!fail_page_alloc.ignore_gfp_wait_file || + !fail_page_alloc.ignore_gfp_highmem_file) { + err = -ENOMEM; + debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); + debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); + cleanup_fault_attr_dentries(&fail_page_alloc.attr); + } + + return err; +} + +late_initcall(fail_page_alloc_debugfs); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#else /* CONFIG_FAIL_PAGE_ALLOC */ + +static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +{ + return 0; +} + +#endif /* CONFIG_FAIL_PAGE_ALLOC */ + /* * Return 1 if free pages are above 'mark'. This takes into account the order * of the allocation. @@ -1136,6 +1222,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, might_sleep_if(wait); + if (should_fail_alloc_page(gfp_mask, order)) + return NULL; + restart: z = zonelist->zones; /* the list of zones suitable for gfp_mask */ @@ -3244,7 +3333,7 @@ void *__init alloc_large_system_hash(const char *tablename, if (numentries > max) numentries = max; - log2qty = long_log2(numentries); + log2qty = ilog2(numentries); do { size = bucketsize << log2qty; @@ -3266,7 +3355,7 @@ void *__init alloc_large_system_hash(const char *tablename, printk("%s hash table entries: %d (order: %d, %lu bytes)\n", tablename, (1U << log2qty), - long_log2(size) - PAGE_SHIFT, + ilog2(size) - PAGE_SHIFT, size); if (_hash_shift) diff --git a/mm/readahead.c b/mm/readahead.c index a386f2b6b335..0f539e8e827a 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> +#include <linux/task_io_accounting_ops.h> #include <linux/pagevec.h> void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) @@ -151,6 +152,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, put_pages_list(pages); break; } + task_io_account_read(PAGE_CACHE_SIZE); } pagevec_lru_add(&lru_pvec); return ret; @@ -450,7 +452,7 @@ static int make_ahead_window(struct address_space *mapping, struct file *filp, * * Note that @filp is purely used for passing on to the ->readpage[s]() * handler: it may refer to a different file from @mapping (so we may not use - * @filp->f_mapping or @filp->f_dentry->d_inode here). + * @filp->f_mapping or @filp->f_path.dentry->d_inode here). * Also, @ra may not be equal to &@filp->f_ra. * */ diff --git a/mm/shmem.c b/mm/shmem.c index c820b4f77b8d..4bb28d218eb5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1225,7 +1225,7 @@ failed: struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct page *page = NULL; unsigned long idx; int error; @@ -1248,7 +1248,7 @@ static int shmem_populate(struct vm_area_struct *vma, unsigned long addr, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct mm_struct *mm = vma->vm_mm; enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE; unsigned long size; @@ -1293,14 +1293,14 @@ static int shmem_populate(struct vm_area_struct *vma, #ifdef CONFIG_NUMA int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { - struct inode *i = vma->vm_file->f_dentry->d_inode; + struct inode *i = vma->vm_file->f_path.dentry->d_inode; return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); } struct mempolicy * shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) { - struct inode *i = vma->vm_file->f_dentry->d_inode; + struct inode *i = vma->vm_file->f_path.dentry->d_inode; unsigned long idx; idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -1310,7 +1310,7 @@ shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) int shmem_lock(struct file *file, int lock, struct user_struct *user) { - struct inode *inode = file->f_dentry->d_inode; + struct inode *inode = file->f_path.dentry->d_inode; struct shmem_inode_info *info = SHMEM_I(inode); int retval = -ENOMEM; @@ -1422,7 +1422,7 @@ shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsig static ssize_t shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct inode *inode = file->f_dentry->d_inode; + struct inode *inode = file->f_path.dentry->d_inode; loff_t pos; unsigned long written; ssize_t err; @@ -1442,7 +1442,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t if (err || !count) goto out; - err = remove_suid(file->f_dentry); + err = remove_suid(file->f_path.dentry); if (err) goto out; @@ -1524,7 +1524,7 @@ out: static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) { - struct inode *inode = filp->f_dentry->d_inode; + struct inode *inode = filp->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; unsigned long index, offset; @@ -2493,8 +2493,8 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) d_instantiate(dentry, inode); inode->i_size = size; inode->i_nlink = 0; /* It is unlinked */ - file->f_vfsmnt = mntget(shm_mnt); - file->f_dentry = dentry; + file->f_path.mnt = mntget(shm_mnt); + file->f_path.dentry = dentry; file->f_mapping = inode->i_mapping; file->f_op = &shmem_file_operations; file->f_mode = FMODE_WRITE | FMODE_READ; diff --git a/mm/slab.c b/mm/slab.c index 068cb4503c15..2c655532f5ef 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -107,6 +107,7 @@ #include <linux/nodemask.h> #include <linux/mempolicy.h> #include <linux/mutex.h> +#include <linux/fault-inject.h> #include <linux/rtmutex.h> #include <asm/cacheflush.h> @@ -945,7 +946,8 @@ static void __devinit start_cpu_timer(int cpu) if (keventd_up() && reap_work->work.func == NULL) { init_reap_node(cpu); INIT_DELAYED_WORK(reap_work, cache_reap); - schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); + schedule_delayed_work_on(cpu, reap_work, + __round_jiffies_relative(HZ, cpu)); } } @@ -3088,12 +3090,89 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif +#ifdef CONFIG_FAILSLAB + +static struct failslab_attr { + + struct fault_attr attr; + + u32 ignore_gfp_wait; +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + struct dentry *ignore_gfp_wait_file; +#endif + +} failslab = { + .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_wait = 1, +}; + +static int __init setup_failslab(char *str) +{ + return setup_fault_attr(&failslab.attr, str); +} +__setup("failslab=", setup_failslab); + +static int should_failslab(struct kmem_cache *cachep, gfp_t flags) +{ + if (cachep == &cache_cache) + return 0; + if (flags & __GFP_NOFAIL) + return 0; + if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) + return 0; + + return should_fail(&failslab.attr, obj_size(cachep)); +} + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init failslab_debugfs(void) +{ + mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + struct dentry *dir; + int err; + + err = init_fault_attr_dentries(&failslab.attr, "failslab"); + if (err) + return err; + dir = failslab.attr.dentries.dir; + + failslab.ignore_gfp_wait_file = + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &failslab.ignore_gfp_wait); + + if (!failslab.ignore_gfp_wait_file) { + err = -ENOMEM; + debugfs_remove(failslab.ignore_gfp_wait_file); + cleanup_fault_attr_dentries(&failslab.attr); + } + + return err; +} + +late_initcall(failslab_debugfs); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#else /* CONFIG_FAILSLAB */ + +static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) +{ + return 0; +} + +#endif /* CONFIG_FAILSLAB */ + static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; check_irq_off(); + + if (should_failslab(cachep, flags)) + return NULL; + ac = cpu_cache_get(cachep); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); @@ -3182,7 +3261,7 @@ retry: for (z = zonelist->zones; *z && !obj; z++) { nid = zone_to_nid(*z); - if (cpuset_zone_allowed(*z, flags) && + if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) && cache->nodelists[nid] && cache->nodelists[nid]->free_objects) obj = ____cache_alloc_node(cache, @@ -3928,7 +4007,7 @@ static void cache_reap(struct work_struct *unused) if (!mutex_trylock(&cache_chain_mutex)) { /* Give up. Setup the next iteration. */ schedule_delayed_work(&__get_cpu_var(reap_work), - REAPTIMEOUT_CPUC); + round_jiffies_relative(REAPTIMEOUT_CPUC)); return; } @@ -3974,7 +4053,8 @@ next: next_reap_node(); refresh_cpu_vm_stats(smp_processor_id()); /* Set up the next iteration */ - schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); + schedule_delayed_work(&__get_cpu_var(reap_work), + round_jiffies_relative(REAPTIMEOUT_CPUC)); } #ifdef CONFIG_PROC_FS diff --git a/mm/swapfile.c b/mm/swapfile.c index c5431072f422..b9fc0e5de6d5 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1357,10 +1357,10 @@ static int swap_show(struct seq_file *swap, void *v) } file = ptr->swap_file; - len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\"); + len = seq_path(swap, file->f_path.mnt, file->f_path.dentry, " \t\n\\"); seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", len < 40 ? 40 - len : 1, " ", - S_ISBLK(file->f_dentry->d_inode->i_mode) ? + S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? "partition" : "file\t", ptr->pages << (PAGE_SHIFT - 10), ptr->inuse_pages << (PAGE_SHIFT - 10), diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c index 5f2cbf0f153c..c7f6e1914bc4 100644 --- a/mm/tiny-shmem.c +++ b/mm/tiny-shmem.c @@ -79,8 +79,8 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) d_instantiate(dentry, inode); inode->i_nlink = 0; /* It is unlinked */ - file->f_vfsmnt = mntget(shm_mnt); - file->f_dentry = dentry; + file->f_path.mnt = mntget(shm_mnt); + file->f_path.dentry = dentry; file->f_mapping = inode->i_mapping; file->f_op = &ramfs_file_operations; file->f_mode = FMODE_WRITE | FMODE_READ; diff --git a/mm/truncate.c b/mm/truncate.c index e07b1e682c38..9bfb8e853860 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pagevec.h> +#include <linux/task_io_accounting_ops.h> #include <linux/buffer_head.h> /* grr. try_to_release_page, do_invalidatepage */ @@ -69,7 +70,8 @@ truncate_complete_page(struct address_space *mapping, struct page *page) if (PagePrivate(page)) do_invalidatepage(page, 0); - clear_page_dirty(page); + if (test_clear_page_dirty(page)) + task_io_account_cancelled_write(PAGE_CACHE_SIZE); ClearPageUptodate(page); ClearPageMappedToDisk(page); remove_from_page_cache(page); |