summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMaxime Coquelin <maxime.coquelin@stericsson.com>2013-04-04 11:15:05 +0530
committerDan Willemsen <dwillemsen@nvidia.com>2013-09-14 13:08:38 -0700
commitccc5d8f1fc45d4729e7fcb777d2644e3196f3511 (patch)
tree1ef9c5cf712c7a69e9b6193c197e0bc01e5cd7cd /mm
parent9b1fbdc0a4fe6347db6826431ffe721e86a68b3f (diff)
PASR: mm: Integrate PASR in Buddy allocator
Any allocators might call the PASR Framework for DDR power savings. Currently, only Linux Buddy allocator is patched, but HWMEM and PMEM physically contiguous memory allocators will follow. Linux Buddy allocator porting uses Buddy specificities to reduce the overhead induced by the PASR Framework counter updates. Indeed, the PASR Framework is called only when MAX_ORDER (4MB page blocs by default) buddies are inserted/removed from the free lists. To port PASR FW into a new allocator: * Call pasr_put(phys_addr, size) each time a memory chunk becomes unused. * Call pasr_get(phys_addr, size) each time a memory chunk becomes used. Bug 1201663 Bug 1033159 Change-Id: Ib90012d0707cf3c8b0cd9c979298f695cccc563e Signed-off-by: Maxime Coquelin <maxime.coquelin@stericsson.com> Signed-off-by: Prashant Gaikwad <pgaikwad@nvidia.com> Reviewed-on: http://git-master/r/216386 Reviewed-by: Simone Willett <swillett@nvidia.com> Tested-by: Simone Willett <swillett@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63858c4522b3..fc48fcdd0478 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,6 +56,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/pasr.h>
#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
@@ -570,6 +571,7 @@ static inline void __free_one_page(struct page *page,
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
+ pasr_kget(buddy, order);
rmv_page_order(buddy);
}
combined_idx = buddy_idx & page_idx;
@@ -603,6 +605,7 @@ static inline void __free_one_page(struct page *page,
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
zone->free_area[order].nr_free++;
+ pasr_kput(page, order);
}
static inline int free_pages_check(struct page *page)
@@ -835,6 +838,7 @@ static inline void expand(struct zone *zone, struct page *page,
#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
+ pasr_kput(page, high);
set_page_order(&page[size], high);
}
}
@@ -903,6 +907,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
+ pasr_kget(page, current_order);
expand(zone, page, order, current_order, area, migratetype);
return page;
}
@@ -1037,6 +1042,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
page = list_entry(area->free_list[migratetype].next,
struct page, lru);
area->nr_free--;
+ pasr_kget(page, current_order);
/*
* If breaking a large block of pages, move all free
@@ -1424,6 +1430,8 @@ static int __isolate_free_page(struct page *page, unsigned int order)
/* Remove page from free list */
list_del(&page->lru);
zone->free_area[order].nr_free--;
+ pasr_kget(page, order);
+
rmv_page_order(page);
/* Set the pageblock if the isolated page is at least a pageblock */
@@ -6143,6 +6151,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
list_del(&page->lru);
rmv_page_order(page);
zone->free_area[order].nr_free--;
+ pasr_kget(page, order);
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages -= 1 << order;