diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2013-12-31 13:57:55 +0200 |
---|---|---|
committer | Tom Cherry <tcherry@nvidia.com> | 2014-01-22 16:02:41 -0800 |
commit | c0dcb6d1228081286d77cd9d2d955196a53fee34 (patch) | |
tree | 8293945b61d27d6bda39b1b827e7425519cac7ad | |
parent | d985815d1f996e7f43eacef69b728e9c404882a6 (diff) |
mm: Add TLB flush all threshold
With arm64 the range passed to flush_tlb_kernel_range() can be huge.
Add a parameter for how large area will be flushed by area. Beyond
that the whole TLB is flushed.
Set the threshold to 512MB.
Bug 1432908
Change-Id: I685fc2c3ffaad8979bfa8fde27e8c2948a2104cc
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/351026
Reviewed-by: Juha Tukkinen <jtukkinen@nvidia.com>
-rw-r--r-- | kernel/sysctl.c | 8 | ||||
-rw-r--r-- | mm/vmalloc.c | 14 |
2 files changed, 20 insertions, 2 deletions
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6d8d4a44dd57..2ba77228ff97 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -96,6 +96,7 @@ /* External variables not in a header file. */ extern int sysctl_lazy_vfree_pages; +extern int sysctl_lazy_vfree_tlb_flush_all_threshold; extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern int max_threads; @@ -1078,6 +1079,13 @@ static struct ctl_table vm_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "lazy_vfree_tlb_flush_all_threshold", + .data = &sysctl_lazy_vfree_tlb_flush_all_threshold, + .maxlen = sizeof(sysctl_lazy_vfree_tlb_flush_all_threshold), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ce2f94918a13..666a6085ce83 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -555,6 +555,12 @@ static void vmap_debug_free_range(unsigned long start, unsigned long end) int sysctl_lazy_vfree_pages = 32UL * 1024 * 1024 / PAGE_SIZE; +/* + * lazy_vfree_tlb_flush_all_threshold is the maximum size of TLB flush by + * area. Beyond that the whole TLB will be flushed. + */ +int sysctl_lazy_vfree_tlb_flush_all_threshold = SZ_512M; + static unsigned long lazy_max_pages(void) { unsigned int log; @@ -629,8 +635,12 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, if (nr) atomic_sub(nr, &vmap_lazy_nr); - if (nr || force_flush) - flush_tlb_kernel_range(*start, *end); + if (nr || force_flush) { + if (*end - *start > sysctl_lazy_vfree_tlb_flush_all_threshold) + flush_tlb_all(); + else + flush_tlb_kernel_range(*start, *end); + } if (nr) { spin_lock(&vmap_area_lock); |