From 3167760f83899ccda312b9ad9306ec9e5dda06d4 Mon Sep 17 00:00:00 2001 From: Dan Magenheimer Date: Wed, 21 Sep 2011 11:56:28 -0400 Subject: mm: cleancache: s/flush/invalidate/ Per akpm suggestions alter the use of the term flush to be invalidate. The next patch will do this across all MM. This change is completely cosmetic. [v9: akpm@linux-foundation.org: change "flush" to "invalidate", part 3] Signed-off-by: Dan Magenheimer Cc: Kamezawa Hiroyuki Cc: Jan Beulich Reviewed-by: Seth Jennings Cc: Jeremy Fitzhardinge Cc: Hugh Dickins Cc: Johannes Weiner Cc: Nitin Gupta Cc: Matthew Wilcox Cc: Chris Mason Cc: Rik Riel Cc: Andrew Morton [v10: Fixed fs: move code out of buffer.c conflict change] Signed-off-by: Konrad Rzeszutek Wilk --- mm/truncate.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'mm/truncate.c') diff --git a/mm/truncate.c b/mm/truncate.c index 632b15e29f74..b4d575c9a0ee 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -52,7 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) static inline void truncate_partial_page(struct page *page, unsigned partial) { zero_user_segment(page, partial, PAGE_CACHE_SIZE); - cleancache_flush_page(page->mapping, page); + cleancache_invalidate_page(page->mapping, page); if (page_has_private(page)) do_invalidatepage(page, partial); } @@ -213,7 +213,7 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t end; int i; - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); if (mapping->nrpages == 0) return; @@ -292,7 +292,7 @@ void truncate_inode_pages_range(struct address_space *mapping, mem_cgroup_uncharge_end(); index++; } - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); } EXPORT_SYMBOL(truncate_inode_pages_range); @@ -444,7 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int ret2 = 0; int did_range_unmap = 0; - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); pagevec_init(&pvec, 0); index = start; while (index <= end && pagevec_lookup(&pvec, mapping, index, @@ -500,7 +500,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, cond_resched(); index++; } - cleancache_flush_inode(mapping); + cleancache_invalidate_inode(mapping); return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); -- cgit v1.2.3 From 73c1e20430cbb96947fe3b835c6a3c13805eafd8 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Tue, 21 Feb 2012 10:57:20 +0800 Subject: mm: fix comment typo of truncate_inode_pages_range The typo of API truncate_inode_pages_range is not updated. Signed-off-by: Liu Bo Signed-off-by: Jiri Kosina --- mm/truncate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/truncate.c') diff --git a/mm/truncate.c b/mm/truncate.c index 632b15e29f74..a188058582e0 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -184,7 +184,7 @@ int invalidate_inode_page(struct page *page) } /** - * truncate_inode_pages - truncate range of pages specified by start & end byte offsets + * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate * @lstart: offset from which to truncate * @lend: offset to which to truncate -- cgit v1.2.3 From 623e3db9f9b7d6e7b2a99180f9cf0825c936ab7a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 28 Mar 2012 14:42:40 -0700 Subject: mm for fs: add truncate_pagecache_range() Holepunching filesystems ext4 and xfs are using truncate_inode_pages_range but forgetting to unmap pages first (ocfs2 remembers). This is not really a bug, since races already require truncate_inode_page() to handle that case once the page is locked; but it can be very inefficient if the file being punched happens to be mapped into many vmas. Provide a drop-in replacement truncate_pagecache_range() which does the unmapping pass first, handling the awkward mismatch between arguments to truncate_inode_pages_range() and arguments to unmap_mapping_range(). Note that holepunching does not unmap privately COWed pages in the range: POSIX requires that we do so when truncating, but it's hard to justify, difficult to implement without an i_size cutoff, and no filesystem is attempting to implement it. Signed-off-by: Hugh Dickins Cc: "Theodore Ts'o" Cc: Andreas Dilger Cc: Mark Fasheh Cc: Joel Becker Cc: Ben Myers Cc: Alex Elder Cc: Christoph Hellwig Cc: Dave Chinner Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/truncate.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'mm/truncate.c') diff --git a/mm/truncate.c b/mm/truncate.c index 18aded3a89fc..61a183b89df6 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -626,3 +626,43 @@ int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend) return 0; } + +/** + * truncate_pagecache_range - unmap and remove pagecache that is hole-punched + * @inode: inode + * @lstart: offset of beginning of hole + * @lend: offset of last byte of hole + * + * This function should typically be called before the filesystem + * releases resources associated with the freed range (eg. deallocates + * blocks). This way, pagecache will always stay logically coherent + * with on-disk format, and the filesystem would not have to deal with + * situations such as writepage being called for a page that has already + * had its underlying blocks deallocated. + */ +void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) +{ + struct address_space *mapping = inode->i_mapping; + loff_t unmap_start = round_up(lstart, PAGE_SIZE); + loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; + /* + * This rounding is currently just for example: unmap_mapping_range + * expands its hole outwards, whereas we want it to contract the hole + * inwards. However, existing callers of truncate_pagecache_range are + * doing their own page rounding first; and truncate_inode_pages_range + * currently BUGs if lend is not pagealigned-1 (it handles partial + * page at start of hole, but not partial page at end of hole). Note + * unmap_mapping_range allows holelen 0 for all, and we allow lend -1. + */ + + /* + * Unlike in truncate_pagecache, unmap_mapping_range is called only + * once (before truncating pagecache), and without "even_cows" flag: + * hole-punching should not remove private COWed pages from the hole. + */ + if ((u64)unmap_end > (u64)unmap_start) + unmap_mapping_range(mapping, unmap_start, + 1 + unmap_end - unmap_start, 0); + truncate_inode_pages_range(mapping, lstart, lend); +} +EXPORT_SYMBOL(truncate_pagecache_range); -- cgit v1.2.3