summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@linux.intel.com>2015-09-08 14:59:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 15:35:28 -0700
commit843172978bb92997310d2f7fbc172ece423cfc02 (patch)
tree3dd6214e78238293b1ac4612e7fc35775b567da0 /mm/memory.c
parent01a33b4ace68bc35679a347f21d5ed6e222e30dc (diff)
dax: fix race between simultaneous faults
If two threads write-fault on the same hole at the same time, the winner of the race will return to userspace and complete their store, only to have the loser overwrite their store with zeroes. Fix this for now by taking the i_mmap_sem for write instead of read, and do so outside the call to get_block(). Now the loser of the race will see the block has already been zeroed, and will not zero it again. This severely limits our scalability. I have ideas for improving it, but those can wait for a later patch. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a3f9a8ccec0f..320c42e95e69 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2427,11 +2427,16 @@ void unmap_mapping_range(struct address_space *mapping,
details.last_index = ULONG_MAX;
- /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
- i_mmap_lock_write(mapping);
+ /*
+ * DAX already holds i_mmap_lock to serialise file truncate vs
+ * page fault and page fault vs page fault.
+ */
+ if (!IS_DAX(mapping->host))
+ i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
- i_mmap_unlock_write(mapping);
+ if (!IS_DAX(mapping->host))
+ i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);