diff options
author | Otavio Salvador <otavio@ossystems.com.br> | 2018-10-02 10:26:02 -0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-10-02 10:26:02 -0300 |
commit | 5f3fecbc0715a70437501e1d85e74726c4f561be (patch) | |
tree | f10a85be5659f9b2f8f3ca4aff41d8fd3aa01b37 /mm | |
parent | ded0ac8f40f9561d8ab2f8ef6c9ff8dc435f2b41 (diff) | |
parent | c98b0c5db65001d10d7f0af026801b9c193c369e (diff) |
Merge pull request #38 from gibsson/4.9-2.3.x-imx
Merge v4.9.130 into 4.9-2.3.x imx
Diffstat (limited to 'mm')
-rw-r--r-- | mm/debug.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/vmacache.c | 38 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
4 files changed, 6 insertions, 41 deletions
diff --git a/mm/debug.c b/mm/debug.c index 9feb699c5d25..bebe48aece6d 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -95,7 +95,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif @@ -125,7 +125,7 @@ void dump_mm(const struct mm_struct *mm) #endif "def_flags: %#lx(%pGv)\n", - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/shmem.c b/mm/shmem.c index 42ca5df2c0e3..4b5cca167baf 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2160,6 +2160,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode mpol_shared_policy_init(&info->policy, NULL); break; } + + lockdep_annotate_inode_mutex_key(inode); } else shmem_free_inode(sb); return inode; diff --git a/mm/vmacache.c b/mm/vmacache.c index 035fdeb35b43..c9ca3dd46b97 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -6,44 +6,6 @@ #include <linux/vmacache.h> /* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - -/* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this * task's vmacache pertains to a different mm (ie, its own). There is diff --git a/mm/vmscan.c b/mm/vmscan.c index f03ca5ab86b1..4e5846b8b5eb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3123,6 +3123,7 @@ static bool zone_balanced(struct zone *zone, int order, int classzone_idx) */ clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags); clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags); + clear_bit(PGDAT_WRITEBACK, &zone->zone_pgdat->flags); return true; } @@ -3300,7 +3301,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) * If we're getting trouble reclaiming, start doing writepage * even in laptop mode. */ - if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat)) + if (sc.priority < DEF_PRIORITY - 2) sc.may_writepage = 1; /* Call soft limit reclaim before calling shrink_node. */ |