/* * Based on arch/arm/kernel/setup.c * * Copyright (C) 1995-2001 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include unsigned int processor_id; EXPORT_SYMBOL(processor_id); unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); static const char *cpu_name; static const char *machine_name; unsigned int system_rev; EXPORT_SYMBOL(system_rev); unsigned int system_serial_low; EXPORT_SYMBOL(system_serial_low); unsigned int system_serial_high; EXPORT_SYMBOL(system_serial_high); struct machine_desc *machine_desc __initdata; phys_addr_t __fdt_pointer __initdata; /* * Standard memory resources */ static struct resource mem_res[] = { { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_MEM }, { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_MEM } }; #define kernel_code mem_res[0] #define kernel_data mem_res[1] void __init early_print(const char *str, ...) { char buf[256]; va_list ap; va_start(ap, str); vsnprintf(buf, sizeof(buf), str, ap); va_end(ap); printk("%s", buf); } struct mpidr_hash mpidr_hash; #ifdef CONFIG_SMP /** * smp_build_mpidr_hash - Pre-compute shifts required at each affinity * level in order to build a linear index from an * MPIDR value. Resulting algorithm is a collision * free hash carried out through shifting and ORing */ static void __init smp_build_mpidr_hash(void) { u32 i, affinity, fs[4], bits[4], ls; u64 mask = 0; /* * Pre-scan the list of MPIDRS and filter out bits that do * not contribute to affinity levels, ie they never toggle. */ for_each_possible_cpu(i) mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); pr_debug("mask of set bits %#llx\n", mask); /* * Find and stash the last and first bit set at all affinity levels to * check how many bits are required to represent them. */ for (i = 0; i < 4; i++) { affinity = MPIDR_AFFINITY_LEVEL(mask, i); /* * Find the MSB bit and LSB bits position * to determine how many bits are required * to express the affinity level. */ ls = fls(affinity); fs[i] = affinity ? ffs(affinity) - 1 : 0; bits[i] = ls - fs[i]; } /* * An index can be created from the MPIDR_EL1 by isolating the * significant bits at each affinity level and by shifting * them in order to compress the 32 bits values space to a * compressed set of values. This is equivalent to hashing * the MPIDR_EL1 through shifting and ORing. It is a collision free * hash though not minimal since some levels might contain a number * of CPUs that is not an exact power of 2 and their bit * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. */ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - (bits[1] + bits[0]); mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + fs[3] - (bits[2] + bits[1] + bits[0]); mpidr_hash.mask = mask; mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", mpidr_hash.shift_aff[0], mpidr_hash.shift_aff[1], mpidr_hash.shift_aff[2], mpidr_hash.shift_aff[3], mpidr_hash.mask, mpidr_hash.bits); /* * 4x is an arbitrary value used to warn on a hash table much bigger * than expected on most systems. */ if (mpidr_hash_size() > 4 * num_possible_cpus()) pr_warn("Large number of MPIDR hash buckets detected\n"); __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); } #endif static void __init setup_processor(void) { struct cpu_info *cpu_info; u64 reg_value; /* * locate processor in the list of supported processor * types. The linker builds this table for us from the * entries in arch/arm/mm/proc.S */ cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { printk("CPU configuration botched (ID %08x), unable to continue.\n", read_cpuid_id()); while (1); } cpu_name = cpu_info->cpu_name; printk("CPU: %s [%08x] revision %d\n", cpu_name, read_cpuid_id(), read_cpuid_id() & 15); sprintf(init_utsname()->machine, "aarch64"); elf_hwcap = 0; /* Read the number of ASID bits */ reg_value = read_cpuid(ID_AA64MMFR0_EL1) & 0xf0; if (reg_value == 0x00) max_asid_bits = 8; else if (reg_value == 0x20) max_asid_bits = 16; else BUG_ON(1); cpu_last_asid = 1 << max_asid_bits; } static struct machine_desc * __init setup_machine_fdt(phys_addr_t dt_phys) { struct boot_param_header *devtree; struct machine_desc *mdesc, *mdesc_best = NULL; unsigned int score, mdesc_score = ~1; unsigned long dt_root; /* Check we have a non-NULL DT pointer */ if (!dt_phys) { early_print("\n" "Error: NULL or invalid device tree blob\n" "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" "\nPlease check your bootloader.\n"); while (true) cpu_relax(); } devtree = phys_to_virt(dt_phys); /* Check device tree validity */ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { early_print("\n" "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" "Expected 0x%x, found 0x%x\n" "\nPlease check your bootloader.\n", dt_phys, devtree, OF_DT_HEADER, be32_to_cpu(devtree->magic)); while (true) cpu_relax(); } initial_boot_params = devtree; dt_root = of_get_flat_dt_root(); for_each_machine_desc(mdesc) { score = of_flat_dt_match(dt_root, mdesc->dt_compat); if (score > 0 && score < mdesc_score) { mdesc_best = mdesc; mdesc_score = score; } } if (!mdesc_best) { const char *prop; long size; pr_info("\nError: unrecognized/unsupported " "device tree compatible list:\n[ "); prop = of_get_flat_dt_prop(dt_root, "compatible", &size); while (size > 0) { pr_info("'%s' ", prop); size -= strlen(prop) + 1; prop += strlen(prop) + 1; } pr_info("]\n\n"); while (true) /* can't use cpu_relax() here as it may require MMU setup */; } machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); if (!machine_name) machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); if (!machine_name) machine_name = ""; pr_info("Machine: %s\n", machine_name); /* Retrieve various information from the /chosen node */ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); /* Initialize {size,address}-cells info */ of_scan_flat_dt(early_init_dt_scan_root, NULL); /* Setup memory, calling early_init_dt_add_memory_arch */ of_scan_flat_dt(early_init_dt_scan_memory, NULL); return mdesc_best; } void __init early_init_dt_add_memory_arch(u64 base, u64 size) { base &= PAGE_MASK; size &= PAGE_MASK; if (base + size < PHYS_OFFSET) { pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", base, base + size); return; } if (base < PHYS_OFFSET) { pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", base, PHYS_OFFSET); size -= PHYS_OFFSET - base; base = PHYS_OFFSET; } memblock_add(base, size); } void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return __va(memblock_alloc(size, align)); } /* * Limit the memory size that was specified via FDT. */ static int __init early_mem(char *p) { phys_addr_t limit; if (!p) return 1; limit = memparse(p, &p) & PAGE_MASK; pr_notice("Memory limited to %lldMB\n", limit >> 20); memblock_enforce_memory_limit(limit); return 0; } early_param("mem", early_mem); static void __init request_standard_resources(void) { struct memblock_region *region; struct resource *res; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(_etext - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); } } u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { struct machine_desc *mdesc; setup_processor(); mdesc = setup_machine_fdt(__fdt_pointer); machine_desc = mdesc; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; *cmdline_p = boot_command_line; parse_early_param(); arm64_memblock_init(); paging_init(); request_standard_resources(); unflatten_device_tree(); psci_init(); if (mdesc->restart) arm_pm_restart = mdesc->restart; cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; #ifdef CONFIG_SMP smp_set_ops(machine_desc->smp); smp_init_cpus(); smp_build_mpidr_hash(); #endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif /* Supply the real ARCH timer counter to skip the * arch_timer_read_zero (arm_arch_timer.c) which * causes hang in udelay. Proper counter setup will * be performed in a later state in time_init. */ if (is_hyp_mode_available()) arch_timer_read_counter = arch_counter_get_cntpct; else arch_timer_read_counter = arch_counter_get_cntvct; if (machine_desc->init_early) machine_desc->init_early(); } static int __init arm64_device_init(void) { #if defined(CONFIG_COMMON_CLK) of_clk_init(NULL); #endif if (!machine_desc->init_machine) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } arch_initcall(arm64_device_init); static DEFINE_PER_CPU(struct cpu, cpu_data); static int __init topology_init(void) { int i; for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data, i); cpu->hotpluggable = 1; register_cpu(cpu, i); } return 0; } subsys_initcall(topology_init); static const char *hwcap_str[] = { "fp", "asimd", NULL }; static void denver_show(struct seq_file *m) { u32 aidr; asm volatile("mrs %0, AIDR_EL1" : "=r" (aidr) : ); seq_printf(m, "MTS version\t: %u\n", aidr); } static int c_show(struct seq_file *m, void *v) { int i; seq_printf(m, "Processor\t: %s rev %d (%s)\n", cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); for_each_online_cpu(i) { /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", loops_per_jiffy / (500000UL/HZ), loops_per_jiffy / (5000UL/HZ) % 100); } /* dump out the processor features */ seq_puts(m, "Features\t: "); for (i = 0; hwcap_str[i]; i++) if (elf_hwcap & (1 << i)) seq_printf(m, "%s ", hwcap_str[i]); #ifdef CONFIG_ARMV7_COMPAT_CPUINFO /* Print out the non-optional ARMv8 HW capabilities */ seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi "); seq_printf(m, "vfpv4 idiva idivt "); #endif seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); seq_printf(m, "CPU architecture: 8\n"); seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); seq_puts(m, "\n"); seq_printf(m, "Hardware\t: %s\n", machine_name); seq_printf(m, "Revision\t: %04x\n", system_rev); seq_printf(m, "Serial\t\t: %08x%08x\n", system_serial_high, system_serial_low); if ((read_cpuid_id() >> 24) == 'N') denver_show(m); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < 1 ? (void *)1 : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return NULL; } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show }; static int __init customize_machine(void) { /* customizes platform devices, or adds new ones */ if (machine_desc->init_machine) machine_desc->init_machine(); return 0; } arch_initcall(customize_machine); static int __init init_machine_late(void) { if (machine_desc->init_late) machine_desc->init_late(); return 0; } late_initcall(init_machine_late);