From 114276ac0a3beb9c391a410349bd770653e185ce Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 26 May 2013 17:32:13 +0300 Subject: mm, sched: Drop voluntary schedule from might_fault() might_fault() is called from functions like copy_to_user() which most callers expect to be very fast, like a couple of instructions. So functions like memcpy_toiovec() call them many times in a loop. But might_fault() calls might_sleep() and with CONFIG_PREEMPT_VOLUNTARY this results in a function call. Let's not do this - just call __might_sleep() that produces a diagnostic for sleep within atomic, but drop might_preempt(). Here's a test sending traffic between the VM and the host, host is built with CONFIG_PREEMPT_VOLUNTARY: before: incoming: 7122.77 Mb/s outgoing: 8480.37 Mb/s after: incoming: 8619.24 Mb/s outgoing: 9455.42 Mb/s As a side effect, this fixes an issue pointed out by Ingo: might_fault might schedule differently depending on PROVE_LOCKING. Now there's no preemption point in both cases, so it's consistent. Signed-off-by: Michael S. Tsirkin Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1369577426-26721-10-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar --- mm/memory.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 6dc1882fbd72..c1f190f51f6f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4222,7 +4222,8 @@ void might_fault(void) if (segment_eq(get_fs(), KERNEL_DS)) return; - might_sleep(); + __might_sleep(__FILE__, __LINE__, 0); + /* * it would be nicer only to annotate paths which are not under * pagefault_disable, however that requires a larger audit and -- cgit v1.2.3 From 662bbcb2747c2422cf98d3d97619509379eee466 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 26 May 2013 17:32:23 +0300 Subject: mm, sched: Allow uaccess in atomic with pagefault_disable() This changes might_fault() so that it does not trigger a false positive diagnostic for e.g. the following sequence: spin_lock_irqsave() pagefault_disable() copy_to_user() pagefault_enable() spin_unlock_irqrestore() In particular vhost wants to do this, to call socket ops from under a lock. There are 3 cases to consider: - CONFIG_PROVE_LOCKING - might_fault is non-inline so it's easy to move the in_atomic test to fix up the false positive warning. - CONFIG_DEBUG_ATOMIC_SLEEP - might_fault is currently inline, but we are calling a non-inline __might_sleep anyway, so let's use the non-line version of might_fault that does the right thing. - !CONFIG_DEBUG_ATOMIC_SLEEP && !CONFIG_PROVE_LOCKING __might_sleep is a nop so might_fault is a nop. Make this explicit. Signed-off-by: Michael S. Tsirkin Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1369577426-26721-11-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar --- mm/memory.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index c1f190f51f6f..d7d54a114773 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4210,7 +4210,7 @@ void print_vma_addr(char *prefix, unsigned long ip) up_read(&mm->mmap_sem); } -#ifdef CONFIG_PROVE_LOCKING +#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void might_fault(void) { /* @@ -4222,14 +4222,17 @@ void might_fault(void) if (segment_eq(get_fs(), KERNEL_DS)) return; - __might_sleep(__FILE__, __LINE__, 0); - /* * it would be nicer only to annotate paths which are not under * pagefault_disable, however that requires a larger audit and * providing helpers like get_user_atomic. */ - if (!in_atomic() && current->mm) + if (in_atomic()) + return; + + __might_sleep(__FILE__, __LINE__, 0); + + if (current->mm) might_lock_read(¤t->mm->mmap_sem); } EXPORT_SYMBOL(might_fault); -- cgit v1.2.3 From d6e932177090463e5c709e9e61bbd705a33a1609 Mon Sep 17 00:00:00 2001 From: Libin Date: Wed, 3 Jul 2013 15:01:26 -0700 Subject: mm: use vma_pages() to replace (vm_end - vm_start) >> PAGE_SHIFT (*->vm_end - *->vm_start) >> PAGE_SHIFT operation is implemented as a inline funcion vma_pages() in linux/mm.h, so using it. Signed-off-by: Libin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 95d0cce63583..a101bbcacfd7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2904,7 +2904,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root, details->first_index, details->last_index) { vba = vma->vm_pgoff; - vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; + vea = vba + vma_pages(vma) - 1; /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ zba = details->first_index; if (zba < vba) -- cgit v1.2.3 From e6c495a96ce02574e765d5140039a64c8d4e8c9e Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 3 Jul 2013 15:03:31 -0700 Subject: mm: fix the TLB range flushed when __tlb_remove_page() runs out of slots zap_pte_range loops from @addr to @end. In the middle, if it runs out of batching slots, TLB entries needs to be flushed for @start to @interim, NOT @interim to @end. Since ARC port doesn't use page free batching I can't test it myself but this seems like the right thing to do. Observed this when working on a fix for the issue at thread: http://www.spinics.net/lists/linux-arch/msg21736.html Signed-off-by: Vineet Gupta Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: David Rientjes Cc: Peter Zijlstra Acked-by: Catalin Marinas Cc: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index a101bbcacfd7..407533219673 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, spinlock_t *ptl; pte_t *start_pte; pte_t *pte; + unsigned long range_start = addr; again: init_rss_vec(rss); @@ -1206,12 +1207,14 @@ again: force_flush = 0; #ifdef HAVE_GENERIC_MMU_GATHER - tlb->start = addr; - tlb->end = end; + tlb->start = range_start; + tlb->end = addr; #endif tlb_flush_mmu(tlb); - if (addr != end) + if (addr != end) { + range_start = addr; goto again; + } } return addr; -- cgit v1.2.3 From 1895418189e08c1d1eec4fbdb5fb41d793f57ba5 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Wed, 3 Jul 2013 15:04:21 -0700 Subject: mm: kill global variable num_physpages Now all references to num_physpages have been removed, so kill it. Signed-off-by: Jiang Liu Cc: Mel Gorman Cc: Michel Lespinasse Cc: Rik van Riel Cc: Jiang Liu Cc: Hugh Dickins Cc: David Rientjes Cc: Al Viro Cc: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 407533219673..b68812d682b6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -82,7 +82,6 @@ EXPORT_SYMBOL(max_mapnr); EXPORT_SYMBOL(mem_map); #endif -unsigned long num_physpages; /* * A number of key systems in x86 including ioremap() rely on the assumption * that high_memory defines the upper bound on direct map memory, then end @@ -92,7 +91,6 @@ unsigned long num_physpages; */ void * high_memory; -EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(high_memory); /* -- cgit v1.2.3 From 64363aad5ff1b878230e91223038c26a2205bff3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 8 Jul 2013 16:00:18 -0700 Subject: mm: remove unused VM_ macros and expand other in-place These VM_ macros aren't used very often and three of them aren't used at all. Expand the ones that are used in-place, and remove all the now unused #define VM_ macros. VM_READHINTMASK, VM_NormalReadHint and VM_ClearReadHint were added just before 2.4 and appears have never been used. Signed-off-by: Joe Perches Acked-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index b68812d682b6..1ce2e2a734fc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1150,7 +1150,7 @@ again: if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent) && - likely(!VM_SequentialReadHint(vma))) + likely(!(vma->vm_flags & VM_SEQ_READ))) mark_page_accessed(page); rss[MM_FILEPAGES]--; } -- cgit v1.2.3 From 179ef71cbc085252e3fe6b8159263a7ed1d88ea4 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 13 Aug 2013 16:00:49 -0700 Subject: mm: save soft-dirty bits on swapped pages Andy Lutomirski reported that if a page with _PAGE_SOFT_DIRTY bit set get swapped out, the bit is getting lost and no longer available when pte read back. To resolve this we introduce _PTE_SWP_SOFT_DIRTY bit which is saved in pte entry for the page being swapped out. When such page is to be read back from a swap cache we check for bit presence and if it's there we clear it and restore the former _PAGE_SOFT_DIRTY bit back. One of the problem was to find a place in pte entry where we can save the _PTE_SWP_SOFT_DIRTY bit while page is in swap. The _PAGE_PSE was chosen for that, it doesn't intersect with swap entry format stored in pte. Reported-by: Andy Lutomirski Signed-off-by: Cyrill Gorcunov Acked-by: Pavel Emelyanov Cc: Matt Mackall Cc: Xiao Guangrong Cc: Marcelo Tosatti Cc: KOSAKI Motohiro Cc: Stephen Rothwell Cc: Peter Zijlstra Cc: "Aneesh Kumar K.V" Reviewed-by: Minchan Kim Reviewed-by: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 1ce2e2a734fc..e98ecad2b9c8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3115,6 +3115,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, exclusive = 1; } flush_icache_page(vma, page); + if (pte_swp_soft_dirty(orig_pte)) + pte = pte_mksoft_dirty(pte); set_pte_at(mm, address, page_table, pte); if (page == swapcache) do_page_add_anon_rmap(page, vma, address, exclusive); -- cgit v1.2.3 From 41bb3476b361ef38576cf9d539b19bae2ac93167 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 13 Aug 2013 16:00:51 -0700 Subject: mm: save soft-dirty bits on file pages Andy reported that if file page get reclaimed we lose the soft-dirty bit if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address get encoded into pte entry. Thus when #pf happens on such non-present pte we can restore it back. Reported-by: Andy Lutomirski Signed-off-by: Cyrill Gorcunov Acked-by: Pavel Emelyanov Cc: Matt Mackall Cc: Xiao Guangrong Cc: Marcelo Tosatti Cc: KOSAKI Motohiro Cc: Stephen Rothwell Cc: Peter Zijlstra Cc: "Aneesh Kumar K.V" Cc: Minchan Kim Cc: Wanpeng Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index e98ecad2b9c8..40268410732a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1141,9 +1141,12 @@ again: continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, - addr) != page->index) - set_pte_at(mm, addr, pte, - pgoff_to_pte(page->index)); + addr) != page->index) { + pte_t ptfile = pgoff_to_pte(page->index); + if (pte_soft_dirty(ptent)) + pte_file_mksoft_dirty(ptfile); + set_pte_at(mm, addr, pte, ptfile); + } if (PageAnon(page)) rss[MM_ANONPAGES]--; else { @@ -3410,6 +3413,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) entry = maybe_mkwrite(pte_mkdirty(entry), vma); + else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) + pte_mksoft_dirty(entry); if (anon) { inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); -- cgit v1.2.3