diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/amdtopology.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 75 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/pageattr-test.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 9 |
10 files changed, 54 insertions, 80 deletions
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index 5247d01329ca..2ca15b59fb3f 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c @@ -130,9 +130,8 @@ int __init amd_numa_init(void) } limit >>= 16; - limit <<= 24; - limit |= (1<<24)-1; limit++; + limit <<= 24; if (limit > end) limit = end; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 0e883364abb5..654be4ae3047 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -13,12 +13,12 @@ #include <linux/perf_event.h> /* perf_sw_event */ #include <linux/hugetlb.h> /* hstate_index_to_shift */ #include <linux/prefetch.h> /* prefetchw */ +#include <linux/context_tracking.h> /* exception_enter(), ... */ #include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ #include <asm/fixmap.h> /* VSYSCALL_START */ -#include <asm/context_tracking.h> /* exception_enter(), ... */ /* * Page fault error code bits: @@ -557,7 +557,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) /* * Pentium F0 0F C7 C8 bug workaround: */ - if (boot_cpu_data.f00f_bug) { + if (boot_cpu_has_bug(X86_BUG_F00F)) { nr = (address - idt_descr.address) >> 3; if (nr == 6) { @@ -1224,7 +1224,9 @@ good_area: dotraplinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) { - exception_enter(regs); + enum ctx_state prev_state; + + prev_state = exception_enter(); __do_page_fault(regs, error_code); - exception_exit(regs); + exception_exit(prev_state); } diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 6f31ee56c008..252b8f5489ba 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void) add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } - totalram_pages += totalhigh_pages; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 59b7fc453277..fdc5dca14fb3 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -515,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; + free_reserved_page(virt_to_page(addr)); } #endif } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 2d19001151d5..3ac7e319918d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) pkmap_page_table = pte; } -static void __init add_one_highpage_init(struct page *page) -{ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalhigh_pages++; -} - void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { @@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid, start_pfn, end_pfn); for ( ; pfn < e_pfn; pfn++) if (pfn_valid(pfn)) - add_one_highpage_init(pfn_to_page(pfn)); + free_highmem_page(pfn_to_page(pfn)); } } #else diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 474e28f10815..caad9a0ee19f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1011,14 +1011,12 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct) flush_tlb_all(); } -void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) +void __ref vmemmap_free(unsigned long start, unsigned long end) { - unsigned long start = (unsigned long)memmap; - unsigned long end = (unsigned long)(memmap + nr_pages); - remove_pagetable(start, end, false); } +#ifdef CONFIG_MEMORY_HOTREMOVE static void __meminit kernel_physical_mapping_remove(unsigned long start, unsigned long end) { @@ -1028,7 +1026,6 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) remove_pagetable(start, end, true); } -#ifdef CONFIG_MEMORY_HOTREMOVE int __ref arch_remove_memory(u64 start, u64 size) { unsigned long start_pfn = start >> PAGE_SHIFT; @@ -1067,10 +1064,9 @@ void __init mem_init(void) /* clear_bss() already clear the empty_zero_page */ - reservedpages = 0; - - /* this will put all low memory onto the freelists */ register_page_bootmem_info(); + + /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); absent_pages = absent_pages_in_range(0, max_pfn); @@ -1285,18 +1281,17 @@ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start; -int __meminit -vmemmap_populate(struct page *start_page, unsigned long size, int node) +static int __meminit vmemmap_populate_hugepages(unsigned long start, + unsigned long end, int node) { - unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long addr; unsigned long next; pgd_t *pgd; pud_t *pud; pmd_t *pmd; - for (; addr < end; addr = next) { - void *p = NULL; + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); pgd = vmemmap_pgd_populate(addr, node); if (!pgd) @@ -1306,31 +1301,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) if (!pud) return -ENOMEM; - if (!cpu_has_pse) { - next = (addr + PAGE_SIZE) & PAGE_MASK; - pmd = vmemmap_pmd_populate(pud, addr, node); - - if (!pmd) - return -ENOMEM; - - p = vmemmap_pte_populate(pmd, addr, node); + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + void *p; - if (!p) - return -ENOMEM; - - addr_end = addr + PAGE_SIZE; - p_end = p + PAGE_SIZE; - } else { - next = pmd_addr_end(addr, end); - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { + p = vmemmap_alloc_block_buf(PMD_SIZE, node); + if (p) { pte_t entry; - p = vmemmap_alloc_block_buf(PMD_SIZE, node); - if (!p) - return -ENOMEM; - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); @@ -1347,15 +1325,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; - } else - vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; + } + } else if (pmd_large(*pmd)) { + vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; } - + pr_warn_once("vmemmap: falling back to regular page backing\n"); + if (vmemmap_populate_basepages(addr, next, node)) + return -ENOMEM; } - sync_global_pgds((unsigned long)start_page, end - 1); return 0; } +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +{ + int err; + + if (cpu_has_pse) + err = vmemmap_populate_hugepages(start, end, node); + else + err = vmemmap_populate_basepages(start, end, node); + if (!err) + sync_global_pgds(start, end - 1); + return err; +} + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 78fe3f1ac49f..9a1e6583910c 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr) in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ - read_lock(&vmlist_lock); - for (p = vmlist; p; p = p->next) { - if (p->addr == (void __force *)addr) - break; - } - read_unlock(&vmlist_lock); + p = find_vm_area((void __force *)addr); if (!p) { printk(KERN_ERR "iounmap: bad address %p\n", addr); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 72fe01e9e414..a71c4e207679 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -114,14 +114,11 @@ void numa_clear_node(int cpu) */ void __init setup_node_to_cpumask_map(void) { - unsigned int node, num = 0; + unsigned int node; /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) { - for_each_node_mask(node, node_possible_map) - num = node; - nr_node_ids = num + 1; - } + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 0e38951e65eb..d0b1773d9d2e 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -130,13 +130,12 @@ static int pageattr_test(void) } failed += print_split(&sa); - srandom32(100); for (i = 0; i < NTEST; i++) { - unsigned long pfn = random32() % max_pfn_mapped; + unsigned long pfn = prandom_u32() % max_pfn_mapped; addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); - len[i] = random32() % 100; + len[i] = prandom_u32() % 100; len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); if (len[i] == 0) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index fb4e73ec24d8..bb32480c2d71 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -542,13 +542,14 @@ out_unlock: return do_split; } -int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) +static int +__split_large_page(pte_t *kpte, unsigned long address, struct page *base) { + pte_t *pbase = (pte_t *)page_address(base); unsigned long pfn, pfninc = 1; unsigned int i, level; pte_t *tmp; pgprot_t ref_prot; - struct page *base = virt_to_page(pbase); spin_lock(&pgd_lock); /* @@ -633,7 +634,6 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) static int split_large_page(pte_t *kpte, unsigned long address) { - pte_t *pbase; struct page *base; if (!debug_pagealloc) @@ -644,8 +644,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!base) return -ENOMEM; - pbase = (pte_t *)page_address(base); - if (__split_large_page(kpte, address, pbase)) + if (__split_large_page(kpte, address, base)) __free_page(base); return 0; |