From 85dbe706074c8b9091a3bc5f24eff02a044701c3 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Thu, 20 Jun 2013 18:10:19 +0800 Subject: page_isolation: Fix a comment typo in test_pages_isolated() pageblock_nr_page should be pageblock_nr_pages, and fist is a typo of first. Signed-off-by: Tang Chen Signed-off-by: Jiri Kosina --- mm/page_isolation.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/page_isolation.c') diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 383bdbb98b04..0cee10ffb98d 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -226,9 +226,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int ret; /* - * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page - * is not aligned to pageblock_nr_pages. - * Then we just check pagetype fist. + * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages + * are not aligned to pageblock_nr_pages. + * Then we just check migratetype first. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); @@ -238,7 +238,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) return -EBUSY; - /* Check all pages are free or Marked as ISOLATED */ + /* Check all pages are free or marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, -- cgit v1.2.3 From c8721bbbdd36382de51cd6b7a56322e0acca2414 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Sep 2013 14:22:09 -0700 Subject: mm: memory-hotplug: enable memory hotplug to handle hugepage Until now we can't offline memory blocks which contain hugepages because a hugepage is considered as an unmovable page. But now with this patch series, a hugepage has become movable, so by using hugepage migration we can offline such memory blocks. What's different from other users of hugepage migration is that we need to decompose all the hugepages inside the target memory block into free buddy pages after hugepage migration, because otherwise free hugepages remaining in the memory block intervene the memory offlining. For this reason we introduce new functions dissolve_free_huge_page() and dissolve_free_huge_pages(). Other than that, what this patch does is straightforwardly to add hugepage migration code, that is, adding hugepage code to the functions which scan over pfn and collect hugepages to be migrated, and adding a hugepage allocation function to alloc_migrate_target(). As for larger hugepages (1GB for x86_64), it's not easy to do hotremove over them because it's larger than memory block. So we now simply leave it to fail as it is. [yongjun_wei@trendmicro.com.cn: remove duplicated include] Signed-off-by: Naoya Horiguchi Acked-by: Andi Kleen Cc: Hillf Danton Cc: Wanpeng Li Cc: Mel Gorman Cc: Hugh Dickins Cc: KOSAKI Motohiro Cc: Michal Hocko Cc: Rik van Riel Cc: "Aneesh Kumar K.V" Signed-off-by: Wei Yongjun Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_isolation.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'mm/page_isolation.c') diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 0cee10ffb98d..d1473b2e9481 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "internal.h" int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) @@ -252,6 +253,19 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, { gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + /* + * TODO: allocate a destination hugepage from a nearest neighbor node, + * accordance with memory policy of the user process if possible. For + * now as a simple work-around, we use the next node for destination. + */ + if (PageHuge(page)) { + nodemask_t src = nodemask_of_node(page_to_nid(page)); + nodemask_t dst; + nodes_complement(dst, src); + return alloc_huge_page_node(page_hstate(compound_head(page)), + next_node(page_to_nid(page), dst)); + } + if (PageHighMem(page)) gfp_mask |= __GFP_HIGHMEM; -- cgit v1.2.3