summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/mips/ralink.txt17
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--arch/mips/Kbuild4
-rw-r--r--arch/mips/Kconfig52
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/Kconfig3
-rw-r--r--arch/mips/alchemy/Platform22
-rw-r--r--arch/mips/ar7/memory.c1
-rw-r--r--arch/mips/ath79/setup.c16
-rw-r--r--arch/mips/bcm63xx/Kconfig4
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c6
-rw-r--r--arch/mips/bcm63xx/clk.c43
-rw-r--r--arch/mips/bcm63xx/cpu.c142
-rw-r--r--arch/mips/bcm63xx/dev-flash.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c26
-rw-r--r--arch/mips/bcm63xx/irq.c22
-rw-r--r--arch/mips/bcm63xx/prom.c2
-rw-r--r--arch/mips/bcm63xx/reset.c28
-rw-r--r--arch/mips/bcm63xx/setup.c5
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c5
-rw-r--r--arch/mips/configs/malta_defconfig69
-rw-r--r--arch/mips/configs/malta_kvm_defconfig456
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig453
-rw-r--r--arch/mips/configs/maltaaprp_defconfig195
-rw-r--r--arch/mips/configs/maltasmtc_defconfig196
-rw-r--r--arch/mips/configs/maltasmvp_defconfig199
-rw-r--r--arch/mips/configs/maltaup_defconfig194
-rw-r--r--arch/mips/configs/sead3_defconfig3
-rw-r--r--arch/mips/configs/sead3micro_defconfig122
-rw-r--r--arch/mips/fw/lib/Makefile2
-rw-r--r--arch/mips/fw/lib/cmdline.c101
-rw-r--r--arch/mips/include/asm/asm.h2
-rw-r--r--arch/mips/include/asm/bootinfo.h1
-rw-r--r--arch/mips/include/asm/branch.h40
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/dma-coherence.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h1
-rw-r--r--arch/mips/include/asm/fpu_emulator.h6
-rw-r--r--arch/mips/include/asm/fw/fw.h47
-rw-r--r--arch/mips/include/asm/gic.h16
-rw-r--r--arch/mips/include/asm/hazards.h371
-rw-r--r--arch/mips/include/asm/inst.h12
-rw-r--r--arch/mips/include/asm/irqflags.h153
-rw-r--r--arch/mips/include/asm/kvm.h55
-rw-r--r--arch/mips/include/asm/kvm_host.h667
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h141
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h105
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h1
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h9
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h84
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h53
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h27
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h252
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h55
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h3
-rw-r--r--arch/mips/include/asm/mips-boards/prom.h47
-rw-r--r--arch/mips/include/asm/mips_machine.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h19
-rw-r--r--arch/mips/include/asm/mmu_context.h116
-rw-r--r--arch/mips/include/asm/netlogic/haldefs.h92
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h20
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h53
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/usb.h64
-rw-r--r--arch/mips/include/asm/pgtable.h1
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/sn/sn_private.h2
-rw-r--r--arch/mips/include/asm/sn/types.h1
-rw-r--r--arch/mips/include/asm/spinlock.h120
-rw-r--r--arch/mips/include/asm/stackframe.h12
-rw-r--r--arch/mips/include/asm/thread_info.h8
-rw-r--r--arch/mips/include/asm/time.h8
-rw-r--r--arch/mips/include/asm/uaccess.h25
-rw-r--r--arch/mips/include/asm/uasm.h84
-rw-r--r--arch/mips/include/uapi/asm/inst.h564
-rw-r--r--arch/mips/kernel/Makefile7
-rw-r--r--arch/mips/kernel/asm-offsets.c66
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c4
-rw-r--r--arch/mips/kernel/branch.c178
-rw-r--r--arch/mips/kernel/cevt-gic.c104
-rw-r--r--arch/mips/kernel/cevt-r4k.c13
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/csrc-gic.c13
-rw-r--r--arch/mips/kernel/genex.S75
-rw-r--r--arch/mips/kernel/irq-gic.c47
-rw-r--r--arch/mips/kernel/mips_machine.c22
-rw-r--r--arch/mips/kernel/proc.c6
-rw-r--r--arch/mips/kernel/process.c101
-rw-r--r--arch/mips/kernel/prom.c33
-rw-r--r--arch/mips/kernel/scall32-o32.S9
-rw-r--r--arch/mips/kernel/setup.c22
-rw-r--r--arch/mips/kernel/signal.c9
-rw-r--r--arch/mips/kernel/smp-mt.c3
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/smtc-asm.S3
-rw-r--r--arch/mips/kernel/smtc.c10
-rw-r--r--arch/mips/kernel/traps.c318
-rw-r--r--arch/mips/kernel/unaligned.c1489
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig49
-rw-r--r--arch/mips/kvm/Makefile13
-rw-r--r--arch/mips/kvm/kvm_cb.c14
-rw-r--r--arch/mips/kvm/kvm_locore.S650
-rw-r--r--arch/mips/kvm/kvm_mips.c958
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c149
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c1826
-rw-r--r--arch/mips/kvm/kvm_mips_int.c243
-rw-r--r--arch/mips/kvm/kvm_mips_int.h49
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c82
-rw-r--r--arch/mips/kvm/kvm_tlb.c928
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c482
-rw-r--r--arch/mips/kvm/trace.h46
-rw-r--r--arch/mips/lib/bitops.c14
-rw-r--r--arch/mips/lib/dump_tlb.c5
-rw-r--r--arch/mips/lib/memset.S84
-rw-r--r--arch/mips/lib/mips-atomic.c149
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c7
-rw-r--r--arch/mips/lib/strlen_user.S9
-rw-r--r--arch/mips/lib/strncpy_user.S32
-rw-r--r--arch/mips/lib/strnlen_user.S2
-rw-r--r--arch/mips/math-emu/cp1emu.c919
-rw-r--r--arch/mips/math-emu/dsemul.c30
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c30
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/dma-default.c25
-rw-r--r--arch/mips/mm/page.c10
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c132
-rw-r--r--arch/mips/mm/uasm-micromips.c221
-rw-r--r--arch/mips/mm/uasm-mips.c205
-rw-r--r--arch/mips/mm/uasm.c326
-rw-r--r--arch/mips/mti-malta/Makefile5
-rw-r--r--arch/mips/mti-malta/Platform6
-rw-r--r--arch/mips/mti-malta/malta-cmdline.c59
-rw-r--r--arch/mips/mti-malta/malta-display.c38
-rw-r--r--arch/mips/mti-malta/malta-init.c153
-rw-r--r--arch/mips/mti-malta/malta-int.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c104
-rw-r--r--arch/mips/mti-malta/malta-setup.c87
-rw-r--r--arch/mips/mti-malta/malta-time.c55
-rw-r--r--arch/mips/mti-sead3/Makefile8
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c24
-rw-r--r--arch/mips/mti-sead3/sead3-cmdline.c46
-rw-r--r--arch/mips/mti-sead3/sead3-console.c2
-rw-r--r--arch/mips/mti-sead3/sead3-display.c1
-rw-r--r--arch/mips/mti-sead3/sead3-init.c130
-rw-r--r--arch/mips/mti-sead3/sead3-int.c1
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c4
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/Kconfig17
-rw-r--r--arch/mips/netlogic/common/smp.c21
-rw-r--r--arch/mips/netlogic/dts/Makefile1
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts2
-rw-r--r--arch/mips/netlogic/dts/xlp_svp.dts124
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c62
-rw-r--r--arch/mips/netlogic/xlp/setup.c22
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c49
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/pci-ar71xx.c6
-rw-r--r--arch/mips/pci/pci-ar724x.c18
-rw-r--r--arch/mips/pci/pci-bcm63xx.c11
-rw-r--r--arch/mips/powertv/init.c3
-rw-r--r--arch/mips/powertv/init.h2
-rw-r--r--arch/mips/powertv/memory.c1
-rw-r--r--arch/mips/powertv/powertv_setup.c1
-rw-r--r--arch/mips/ralink/Kconfig23
-rw-r--r--arch/mips/ralink/Makefile3
-rw-r--r--arch/mips/ralink/Platform18
-rw-r--r--arch/mips/ralink/common.h11
-rw-r--r--arch/mips/ralink/dts/Makefile3
-rw-r--r--arch/mips/ralink/dts/mt7620a.dtsi58
-rw-r--r--arch/mips/ralink/dts/mt7620a_eval.dts16
-rw-r--r--arch/mips/ralink/dts/rt2880.dtsi58
-rw-r--r--arch/mips/ralink/dts/rt2880_eval.dts46
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi52
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts12
-rw-r--r--arch/mips/ralink/dts/rt3883.dtsi58
-rw-r--r--arch/mips/ralink/dts/rt3883_eval.dts16
-rw-r--r--arch/mips/ralink/early_printk.c4
-rw-r--r--arch/mips/ralink/irq.c5
-rw-r--r--arch/mips/ralink/mt7620.c234
-rw-r--r--arch/mips/ralink/of.c9
-rw-r--r--arch/mips/ralink/rt288x.c143
-rw-r--r--arch/mips/ralink/rt305x.c70
-rw-r--r--arch/mips/ralink/rt3883.c246
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c16
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c1
-rw-r--r--drivers/video/au1100fb.c22
-rw-r--r--virt/kvm/kvm_main.c2
205 files changed, 16155 insertions, 2325 deletions
diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
new file mode 100644
index 000000000000..b35a8d04f8b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/ralink.txt
@@ -0,0 +1,17 @@
+Ralink MIPS SoC device tree bindings
+
+1. SoCs
+
+Each device tree must specify a compatible value for the Ralink SoC
+it uses in the compatible property of the root node. The compatible
+value must be one of the following values:
+
+ ralink,rt2880-soc
+ ralink,rt3050-soc
+ ralink,rt3052-soc
+ ralink,rt3350-soc
+ ralink,rt3352-soc
+ ralink,rt3883-soc
+ ralink,rt5350-soc
+ ralink,mt7620a-soc
+ ralink,mt7620n-soc
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4d1919bf2332..6931c4348d24 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -42,6 +42,7 @@ onnn ON Semiconductor Corp.
picochip Picochip Ltd
powervr PowerVR (deprecated, use img)
qcom Qualcomm, Inc.
+ralink Mediatek/Ralink Technology Corp.
ramtron Ramtron International
realtek Realtek Semiconductor Corp.
renesas Renesas Electronics Corporation
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 7dd65cfae837..d2cfe45f332b 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -17,3 +17,7 @@ obj- := $(platform-)
obj-y += kernel/
obj-y += mm/
obj-y += math-emu/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a90cfc702bb1..7a58ab933b20 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -304,7 +304,6 @@ config MIPS_MALTA
select HW_HAS_PCI
select I8253
select I8259
- select MIPS_BOARDS_GEN
select MIPS_BONITO64
select MIPS_CPU_SCACHE
select PCI_GT64XXX_PCI0
@@ -335,12 +334,12 @@ config MIPS_SEAD3
select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
+ select CSRC_GIC
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select DMA_NONCOHERENT
select IRQ_CPU
select IRQ_GIC
- select MIPS_BOARDS_GEN
select MIPS_CPU_SCACHE
select MIPS_MSC
select SYS_HAS_CPU_MIPS32_R1
@@ -352,6 +351,7 @@ config MIPS_SEAD3
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_SMARTMIPS
+ select SYS_SUPPORTS_MICROMIPS
select USB_ARCH_HAS_EHCI
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
@@ -910,6 +910,9 @@ config CEVT_GT641XX
config CEVT_R4K
bool
+config CEVT_GIC
+ bool
+
config CEVT_SB1250
bool
@@ -982,9 +985,6 @@ config MIPS_MSC
config MIPS_NILE4
bool
-config MIPS_DISABLE_OBSOLETE_IDE
- bool
-
config SYNC_R4K
bool
@@ -1075,9 +1075,6 @@ config IRQ_GT641XX
config IRQ_GIC
bool
-config MIPS_BOARDS_GEN
- bool
-
config PCI_GT64XXX_PCI0
bool
@@ -1147,7 +1144,7 @@ config BOOT_ELF32
config MIPS_L1_CACHE_SHIFT
int
- default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
+ default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
default "6" if MIPS_CPU_SCACHE
default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
default "5"
@@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select HAVE_KVM
help
Choose this option to build a kernel for release 2 or later of the
MIPS32 architecture. Most modern embedded systems with a 32-bit
@@ -1736,6 +1734,20 @@ config 64BIT
endchoice
+config KVM_GUEST
+ bool "KVM Guest Kernel"
+ help
+ Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+
+config KVM_HOST_FREQ
+ int "KVM Host Processor Frequency (MHz)"
+ depends on KVM_GUEST
+ default 500
+ help
+ Select this option if building a guest kernel for KVM to skip
+ RTC emulation when determining guest CPU Frequency. Instead, the guest
+ processor frequency is automatically derived from the host frequency.
+
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
@@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER
The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option.
+config CEVT_GIC
+ bool "Use GIC global counter for clock events"
+ depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
+ help
+ Use the GIC global counter for the clock events. The R4K clock
+ event driver is always present, so if the platform ends up not
+ detecting a GIC, it will fall back to the R4K timer for the
+ generation of clock events.
+
config BOARD_SCACHE
bool
@@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS
depends on CPU_SB1 && CPU_SB1_PASS_2
default y
+
config 64BIT_PHYS_ADDR
bool
@@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS
you don't know you probably don't have SmartMIPS and should say N
here.
+config CPU_MICROMIPS
+ depends on SYS_SUPPORTS_MICROMIPS
+ bool "Build kernel using microMIPS ISA"
+ help
+ When this option is enabled the kernel will be built using the
+ microMIPS ISA
+
config CPU_HAS_WB
bool
@@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM
config SYS_SUPPORTS_SMARTMIPS
bool
+config SYS_SUPPORTS_MICROMIPS
+ bool
+
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA && !CPU_LOONGSON2
@@ -2556,3 +2588,5 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 6f7978f95090..dd58a04ef4bc 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*e
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips)
+cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index c8862bdc2ff2..7032ac7ecd1b 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -31,7 +31,6 @@ config MIPS_DB1000
select ALCHEMY_GPIOINT_AU1000
select DMA_NONCOHERENT
select HW_HAS_PCI
- select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@ config MIPS_DB1235
select ARCH_REQUIRE_GPIOLIB
select HW_HAS_PCI
select DMA_COHERENT
- select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_HAS_EARLY_PRINTK
@@ -57,7 +55,6 @@ config MIPS_GPR
select ALCHEMY_GPIOINT_AU1000
select HW_HAS_PCI
select DMA_NONCOHERENT
- select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index fa1bdd1aea15..b3afcdd8d77a 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
#
-# AMD Alchemy Pb1100 eval board
-#
-platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1500 eval board
-#
-platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1550 eval board
-#
-platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000
-
-#
-# AMD Alchemy Db1000/Db1500/Db1100 eval boards
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
#
platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000
#
-# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards
+# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
#
platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 28abfeef09d6..92dfa481205b 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -30,7 +30,6 @@
#include <asm/sections.h>
#include <asm/mach-ar7/ar7.h>
-#include <asm/mips-boards/prom.h>
static int __init memsize(void)
{
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index d5b3c9057018..a0233a2c1988 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -51,20 +51,6 @@ static void ath79_halt(void)
cpu_wait();
}
-static void __init ath79_detect_mem_size(void)
-{
- unsigned long size;
-
- for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
- size <<= 1) {
- if (!memcmp(ath79_detect_mem_size,
- ath79_detect_mem_size + size, 1024))
- break;
- }
-
- add_memory_region(0, size, BOOT_MEM_RAM);
-}
-
static void __init ath79_detect_sys_type(void)
{
char *chip = "????";
@@ -212,7 +198,7 @@ void __init plat_mem_setup(void)
AR71XX_DDR_CTRL_SIZE);
ath79_detect_sys_type();
- ath79_detect_mem_size();
+ detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
ath79_clocks_init();
_machine_restart = ath79_restart;
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
index d03e8799d1cf..5639662fd503 100644
--- a/arch/mips/bcm63xx/Kconfig
+++ b/arch/mips/bcm63xx/Kconfig
@@ -25,6 +25,10 @@ config BCM63XX_CPU_6358
bool "support 6358 CPU"
select HW_HAS_PCI
+config BCM63XX_CPU_6362
+ bool "support 6362 CPU"
+ select HW_HAS_PCI
+
config BCM63XX_CPU_6368
bool "support 6368 CPU"
select HW_HAS_PCI
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 9aa7d44898ed..a9505c4867e8 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -726,11 +726,11 @@ void __init board_prom_init(void)
u32 val;
/* read base address of boot chip select (0)
- * 6328 does not have MPI but boots from a fixed address
+ * 6328/6362 do not have MPI but boot from a fixed address
*/
- if (BCMCPU_IS_6328())
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
val = 0x18000000;
- else {
+ } else {
val = bcm_mpi_readl(MPI_CSBASE_REG(0));
val &= MPI_CSBASE_BASE_MASK;
}
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index b9e948d59430..c726a97fc798 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -15,7 +15,13 @@
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_reset.h>
-#include <bcm63xx_clk.h>
+
+struct clk {
+ void (*set)(struct clk *, int);
+ unsigned int rate;
+ unsigned int usage;
+ int id;
+};
static DEFINE_MUTEX(clocks_mutex);
@@ -119,11 +125,18 @@ static struct clk clk_ephy = {
*/
static void enetsw_set(struct clk *clk, int enable)
{
- if (!BCMCPU_IS_6368())
+ if (BCMCPU_IS_6328())
+ bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
+ else if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
+ CKCTL_6368_SWPKT_USB_EN |
+ CKCTL_6368_SWPKT_SAR_EN,
+ enable);
+ else
return;
- bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
- CKCTL_6368_SWPKT_USB_EN |
- CKCTL_6368_SWPKT_SAR_EN, enable);
+
if (enable) {
/* reset switch core afer clock change */
bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable)
bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
else if (BCMCPU_IS_6348())
bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
else if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
}
@@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6328())
bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
else if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
}
@@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable)
mask = CKCTL_6348_SPI_EN;
else if (BCMCPU_IS_6358())
mask = CKCTL_6358_SPI_EN;
+ else if (BCMCPU_IS_6362())
+ mask = CKCTL_6362_SPI_EN;
else
/* BCMCPU_IS_6368 */
mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@ static struct clk clk_xtm = {
*/
static void ipsec_set(struct clk *clk, int enable)
{
- bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+ if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
+ else if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
}
static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@ static struct clk clk_ipsec = {
static void pcie_set(struct clk *clk, int enable)
{
- bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+ if (BCMCPU_IS_6328())
+ bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
}
static struct clk clk_pcie = {
@@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id)
return &clk_periph;
if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
return &clk_pcm;
- if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
+ if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
return &clk_ipsec;
- if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
+ if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
return &clk_pcie;
return ERR_PTR(-ENOENT);
}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index a7afb289b15a..79fe32df5e96 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -25,7 +25,7 @@ const int *bcm63xx_irqs;
EXPORT_SYMBOL(bcm63xx_irqs);
static u16 bcm63xx_cpu_id;
-static u16 bcm63xx_cpu_rev;
+static u8 bcm63xx_cpu_rev;
static unsigned int bcm63xx_cpu_freq;
static unsigned int bcm63xx_memory_size;
@@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = {
};
+static const unsigned long bcm6362_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6362)
+};
+
+static const int bcm6362_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6362)
+
+};
+
static const unsigned long bcm6368_regs_base[] = {
__GEN_CPU_REGS_TABLE(6368)
};
@@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void)
EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
-u16 bcm63xx_get_cpu_rev(void)
+u8 bcm63xx_get_cpu_rev(void)
{
return bcm63xx_cpu_rev;
}
@@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void)
return (16 * 1000000 * n1 * n2) / m1;
}
+ case BCM6362_CPU_ID:
+ {
+ unsigned int tmp, mips_pll_fcvo;
+
+ tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+ mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
+ >> STRAPBUS_6362_FCVO_SHIFT;
+ switch (mips_pll_fcvo) {
+ case 0x03:
+ case 0x0b:
+ case 0x13:
+ case 0x1b:
+ return 240000000;
+ case 0x04:
+ case 0x0c:
+ case 0x14:
+ case 0x1c:
+ return 160000000;
+ case 0x05:
+ case 0x0e:
+ case 0x16:
+ case 0x1e:
+ case 0x1f:
+ return 400000000;
+ case 0x06:
+ return 440000000;
+ case 0x07:
+ case 0x17:
+ return 384000000;
+ case 0x15:
+ case 0x1d:
+ return 200000000;
+ default:
+ return 320000000;
+ }
+ }
case BCM6368_CPU_ID:
{
unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void)
unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
u32 val;
- if (BCMCPU_IS_6328())
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
return bcm_ddr_readl(DDR_CSEND_REG) << 24;
if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void)
void __init bcm63xx_cpu_init(void)
{
- unsigned int tmp, expected_cpu_id;
+ unsigned int tmp;
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
+ u32 chipid_reg;
/* soc registers location depends on cpu type */
- expected_cpu_id = 0;
+ chipid_reg = 0;
switch (c->cputype) {
case CPU_BMIPS3300:
- if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
- expected_cpu_id = BCM6348_CPU_ID;
- bcm63xx_regs_base = bcm6348_regs_base;
- bcm63xx_irqs = bcm6348_irqs;
- } else {
+ if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
__cpu_name[cpu] = "Broadcom BCM6338";
- expected_cpu_id = BCM6338_CPU_ID;
- bcm63xx_regs_base = bcm6338_regs_base;
- bcm63xx_irqs = bcm6338_irqs;
- }
- break;
+ /* fall-through */
case CPU_BMIPS32:
- expected_cpu_id = BCM6345_CPU_ID;
- bcm63xx_regs_base = bcm6345_regs_base;
- bcm63xx_irqs = bcm6345_irqs;
+ chipid_reg = BCM_6345_PERF_BASE;
break;
case CPU_BMIPS4350:
- if ((read_c0_prid() & 0xf0) == 0x10) {
- expected_cpu_id = BCM6358_CPU_ID;
- bcm63xx_regs_base = bcm6358_regs_base;
- bcm63xx_irqs = bcm6358_irqs;
- } else {
- /* all newer chips have the same chip id location */
- u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
-
- switch (chip_id) {
- case BCM6328_CPU_ID:
- expected_cpu_id = BCM6328_CPU_ID;
- bcm63xx_regs_base = bcm6328_regs_base;
- bcm63xx_irqs = bcm6328_irqs;
- break;
- case BCM6368_CPU_ID:
- expected_cpu_id = BCM6368_CPU_ID;
- bcm63xx_regs_base = bcm6368_regs_base;
- bcm63xx_irqs = bcm6368_irqs;
- break;
- }
- }
+ if ((read_c0_prid() & 0xf0) == 0x10)
+ chipid_reg = BCM_6345_PERF_BASE;
+ else
+ chipid_reg = BCM_6368_PERF_BASE;
break;
}
@@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void)
* really early to panic, but delaying panic would not help since we
* will never get any working console
*/
- if (!expected_cpu_id)
+ if (!chipid_reg)
panic("unsupported Broadcom CPU");
- /*
- * bcm63xx_regs_base is set, we can access soc registers
- */
-
- /* double check CPU type */
- tmp = bcm_perf_readl(PERF_REV_REG);
+ /* read out CPU type */
+ tmp = bcm_readl(chipid_reg);
bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
- if (bcm63xx_cpu_id != expected_cpu_id)
- panic("bcm63xx CPU id mismatch");
+ switch (bcm63xx_cpu_id) {
+ case BCM6328_CPU_ID:
+ bcm63xx_regs_base = bcm6328_regs_base;
+ bcm63xx_irqs = bcm6328_irqs;
+ break;
+ case BCM6338_CPU_ID:
+ bcm63xx_regs_base = bcm6338_regs_base;
+ bcm63xx_irqs = bcm6338_irqs;
+ break;
+ case BCM6345_CPU_ID:
+ bcm63xx_regs_base = bcm6345_regs_base;
+ bcm63xx_irqs = bcm6345_irqs;
+ break;
+ case BCM6348_CPU_ID:
+ bcm63xx_regs_base = bcm6348_regs_base;
+ bcm63xx_irqs = bcm6348_irqs;
+ break;
+ case BCM6358_CPU_ID:
+ bcm63xx_regs_base = bcm6358_regs_base;
+ bcm63xx_irqs = bcm6358_irqs;
+ break;
+ case BCM6362_CPU_ID:
+ bcm63xx_regs_base = bcm6362_regs_base;
+ bcm63xx_irqs = bcm6362_irqs;
+ break;
+ case BCM6368_CPU_ID:
+ bcm63xx_regs_base = bcm6368_regs_base;
+ bcm63xx_irqs = bcm6368_irqs;
+ break;
+ default:
+ panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
+ break;
+ }
bcm63xx_cpu_freq = detect_cpu_clock();
bcm63xx_memory_size = detect_memory_size();
diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c
index 58371c7deac2..588d1ec622e4 100644
--- a/arch/mips/bcm63xx/dev-flash.c
+++ b/arch/mips/bcm63xx/dev-flash.c
@@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void)
return BCM63XX_FLASH_TYPE_PARALLEL;
else
return BCM63XX_FLASH_TYPE_SERIAL;
+ case BCM6362_CPU_ID:
+ val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+ if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ else
+ return BCM63XX_FLASH_TYPE_NAND;
case BCM6368_CPU_ID:
val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e97fd60e92ef..3065bb61820d 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -22,10 +22,6 @@
/*
* register offsets
*/
-static const unsigned long bcm6338_regs_spi[] = {
- __GEN_SPI_REGS_TABLE(6338)
-};
-
static const unsigned long bcm6348_regs_spi[] = {
__GEN_SPI_REGS_TABLE(6348)
};
@@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = {
__GEN_SPI_REGS_TABLE(6358)
};
-static const unsigned long bcm6368_regs_spi[] = {
- __GEN_SPI_REGS_TABLE(6368)
-};
-
const unsigned long *bcm63xx_regs_spi;
EXPORT_SYMBOL(bcm63xx_regs_spi);
static __init void bcm63xx_spi_regs_init(void)
{
- if (BCMCPU_IS_6338())
- bcm63xx_regs_spi = bcm6338_regs_spi;
- if (BCMCPU_IS_6348())
+ if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
bcm63xx_regs_spi = bcm6348_regs_spi;
- if (BCMCPU_IS_6358())
+ if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
bcm63xx_regs_spi = bcm6358_regs_spi;
- if (BCMCPU_IS_6368())
- bcm63xx_regs_spi = bcm6368_regs_spi;
}
#else
static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void)
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
- spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
- spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
- spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
- spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+ spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+ spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
}
- if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+ if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index da24c2bd9b7c..c0ab3887f42e 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358
#define ext_irq_cfg_reg2 0
#endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+#define irq_stat_reg PERF_IRQSTAT_6362_REG
+#define irq_mask_reg PERF_IRQMASK_6362_REG
+#define irq_bits 64
+#define is_ext_irq_cascaded 1
+#define ext_irq_start (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
+#define ext_irq_end (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
+#define ext_irq_count 4
+#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6362
+#define ext_irq_cfg_reg2 0
+#endif
#ifdef CONFIG_BCM63XX_CPU_6368
#define irq_stat_reg PERF_IRQSTAT_6368_REG
#define irq_mask_reg PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void)
ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
break;
+ case BCM6362_CPU_ID:
+ irq_stat_addr += PERF_IRQSTAT_6362_REG;
+ irq_mask_addr += PERF_IRQMASK_6362_REG;
+ irq_bits = 64;
+ ext_irq_count = 4;
+ is_ext_irq_cascaded = 1;
+ ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+ ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
+ break;
case BCM6368_CPU_ID:
irq_stat_addr += PERF_IRQSTAT_6368_REG;
irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d,
case BCM6338_CPU_ID:
case BCM6345_CPU_ID:
case BCM6358_CPU_ID:
+ case BCM6362_CPU_ID:
case BCM6368_CPU_ID:
if (levelsense)
reg |= EXTIRQ_CFG_LEVELSENSE(irq);
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 10eaff458071..fd698087fbfd 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -36,6 +36,8 @@ void __init prom_init(void)
mask = CKCTL_6348_ALL_SAFE_EN;
else if (BCMCPU_IS_6358())
mask = CKCTL_6358_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6362())
+ mask = CKCTL_6362_ALL_SAFE_EN;
else if (BCMCPU_IS_6368())
mask = CKCTL_6368_ALL_SAFE_EN;
else
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
index 68a31bb90cbf..317931c6cf58 100644
--- a/arch/mips/bcm63xx/reset.c
+++ b/arch/mips/bcm63xx/reset.c
@@ -85,6 +85,20 @@
#define BCM6358_RESET_PCIE 0
#define BCM6358_RESET_PCIE_EXT 0
+#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK
+#define BCM6362_RESET_ENET 0
+#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK
+#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK
+#define BCM6362_RESET_DSL 0
+#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK
+#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK
+#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK
+#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK
+#define BCM6362_RESET_MPI 0
+#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \
+ SOFTRESET_6362_PCIE_CORE_MASK)
+#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
+
#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
#define BCM6368_RESET_ENET 0
#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6358)
};
+static const u32 bcm6362_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6362)
+};
+
static const u32 bcm6368_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6368)
};
@@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void)
} else if (BCMCPU_IS_6358()) {
reset_reg = PERF_SOFTRESET_6358_REG;
bcm63xx_reset_bits = bcm6358_reset_bits;
+ } else if (BCMCPU_IS_6362()) {
+ reset_reg = PERF_SOFTRESET_6362_REG;
+ bcm63xx_reset_bits = bcm6362_reset_bits;
} else if (BCMCPU_IS_6368()) {
reset_reg = PERF_SOFTRESET_6368_REG;
bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = {
#define reset_reg PERF_SOFTRESET_6358_REG
#endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+static const u32 bcm63xx_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6362)
+};
+#define reset_reg PERF_SOFTRESET_6362_REG
+#endif
+
#ifdef CONFIG_BCM63XX_CPU_6368
static const u32 bcm63xx_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6368)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 35e18e98beb9..24a24445db64 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void)
case BCM6358_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
break;
+ case BCM6362_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
+ break;
}
for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p)
const char *get_system_type(void)
{
static char buf[128];
- snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)",
+ snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
board_get_name(),
bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
return buf;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 156aa6143e11..a22f06a6f7ca 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d,
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
- hw += gpiod->base_hwirq;
- line = hw >> 6;
- bit = hw & 63;
+ line = (hw + gpiod->base_hwirq) >> 6;
+ bit = (hw + gpiod->base_hwirq) & 63;
if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index cd732e5b4fd5..ce1d3eeeb737 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_MIPS_MT_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RELAY=y
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PCI=y
-CONFIG_PM=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@ CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=m
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
CONFIG_MARVELL_PHY=m
CONFIG_DAVICOM_PHY=m
CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
CONFIG_ICPLUS_PHY=m
CONFIG_REALTEK_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_AX88796=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_TC35815=m
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
CONFIG_PRISM54=m
@@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
CONFIG_IPW2100=m
CONFIG_IPW2100_MONITOR=y
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
CONFIG_LIBERTAS=m
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_HID=m
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_UIO=m
@@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
@@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644
index 000000000000..341bb47204d6
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -0,0 +1,456 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DYN_TRANS=y
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644
index 000000000000..2b8558b71080
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -0,0 +1,453 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644
index 000000000000..93057a760dfa
--- /dev/null
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -0,0 +1,195 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="aprp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644
index 000000000000..4e54b75d89be
--- /dev/null
+++ b/arch/mips/configs/maltasmtc_defconfig
@@ -0,0 +1,196 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMTC=y
+# CONFIG_MIPS_MT_FPAFF is not set
+CONFIG_NR_CPUS=9
+CONFIG_HZ_48=y
+CONFIG_LOCALVERSION="smtc"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644
index 000000000000..8a666021b870
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -0,0 +1,199 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="cmp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644
index 000000000000..9868fc9c1133
--- /dev/null
+++ b/arch/mips/configs/maltaup_defconfig
@@ -0,0 +1,194 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="up"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig
index e3eec68d9132..0abe681c11a0 100644
--- a/arch/mips/configs/sead3_defconfig
+++ b/arch/mips/configs/sead3_defconfig
@@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
# CONFIG_FTRACE is not set
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ARC4=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644
index 000000000000..2a0da5bf4b64
--- /dev/null
+++ b/arch/mips/configs/sead3micro_defconfig
@@ -0,0 +1,122 @@
+CONFIG_MIPS_SEAD3=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=32
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_SENSORS_ADT7475=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_DEBUG=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/lib/Makefile b/arch/mips/fw/lib/Makefile
index 84befc968fc4..529150516777 100644
--- a/arch/mips/fw/lib/Makefile
+++ b/arch/mips/fw/lib/Makefile
@@ -2,4 +2,6 @@
# Makefile for generic prom monitor library routines under Linux.
#
+lib-y += cmdline.o
+
lib-$(CONFIG_64BIT) += call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644
index 000000000000..ffd0345780ae
--- /dev/null
+++ b/arch/mips/fw/lib/cmdline.c
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+int fw_argc;
+int *_fw_argv;
+int *_fw_envp;
+
+void __init fw_init_cmdline(void)
+{
+ int i;
+
+ /* Validate command line parameters. */
+ if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
+ fw_argc = 0;
+ _fw_argv = NULL;
+ } else {
+ fw_argc = (fw_arg0 & 0x0000ffff);
+ _fw_argv = (int *)fw_arg1;
+ }
+
+ /* Validate environment pointer. */
+ if (fw_arg2 < CKSEG0)
+ _fw_envp = NULL;
+ else
+ _fw_envp = (int *)fw_arg2;
+
+ for (i = 1; i < fw_argc; i++) {
+ strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
+ if (i < (fw_argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+
+char * __init fw_getcmdline(void)
+{
+ return &(arcs_cmdline[0]);
+}
+
+char *fw_getenv(char *envname)
+{
+ char *result = NULL;
+
+ if (_fw_envp != NULL) {
+ /*
+ * Return a pointer to the given environment variable.
+ * YAMON uses "name", "value" pairs, while U-Boot uses
+ * "name=value".
+ */
+ int i, yamon, index = 0;
+
+ yamon = (strchr(fw_envp(index), '=') == NULL);
+ i = strlen(envname);
+
+ while (fw_envp(index)) {
+ if (strncmp(envname, fw_envp(index), i) == 0) {
+ if (yamon) {
+ result = fw_envp(index + 1);
+ break;
+ } else if (fw_envp(index)[i] == '=') {
+ result = (fw_envp(index + 1) + i);
+ break;
+ }
+ }
+
+ /* Increment array index. */
+ if (yamon)
+ index += 2;
+ else
+ index += 1;
+ }
+ }
+
+ return result;
+}
+
+unsigned long fw_getenvl(char *envname)
+{
+ unsigned long envl = 0UL;
+ char *str;
+ long val;
+ int tmp;
+
+ str = fw_getenv(envname);
+ if (str) {
+ tmp = kstrtol(str, 0, &val);
+ envl = (unsigned long)val;
+ }
+
+ return envl;
+}
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 164a21e65b42..879691d194af 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -296,6 +296,7 @@ symbol = value
#define LONG_SUBU subu
#define LONG_L lw
#define LONG_S sw
+#define LONG_SP swp
#define LONG_SLL sll
#define LONG_SLLV sllv
#define LONG_SRL srl
@@ -318,6 +319,7 @@ symbol = value
#define LONG_SUBU dsubu
#define LONG_L ld
#define LONG_S sd
+#define LONG_SP sdp
#define LONG_SLL dsll
#define LONG_SLLV dsllv
#define LONG_SRL dsrl
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index b71dd5b16085..4d2cdea5aa37 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -104,6 +104,7 @@ struct boot_mem_map {
extern struct boot_mem_map boot_mem_map;
extern void add_memory_region(phys_t start, phys_t size, long type);
+extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max);
extern void prom_init(void);
extern void prom_free_prom_memory(void);
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index 888766ae1f85..e28a3e0eb3cb 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -11,6 +11,14 @@
#include <asm/ptrace.h>
#include <asm/inst.h>
+extern int __isa_exception_epc(struct pt_regs *regs);
+extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn);
+extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
+extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+
+
static inline int delay_slot(struct pt_regs *regs)
{
return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs)
static inline unsigned long exception_epc(struct pt_regs *regs)
{
- if (!delay_slot(regs))
+ if (likely(!delay_slot(regs)))
return regs->cp0_epc;
+ if (get_isa16_mode(regs->cp0_epc))
+ return __isa_exception_epc(regs);
+
return regs->cp0_epc + 4;
}
#define BRANCH_LIKELY_TAKEN 0x0001
-extern int __compute_return_epc(struct pt_regs *regs);
-extern int __compute_return_epc_for_insn(struct pt_regs *regs,
- union mips_instruction insn);
-
static inline int compute_return_epc(struct pt_regs *regs)
{
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (cpu_has_mmips)
+ return __microMIPS_compute_return_epc(regs);
+ if (cpu_has_mips16)
+ return __MIPS16e_compute_return_epc(regs);
+ return regs->cp0_epc;
+ }
+
if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
@@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs)
return __compute_return_epc(regs);
}
+static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
+ union mips16e_instruction *inst)
+{
+ if (likely(!delay_slot(regs))) {
+ if (inst->ri.opcode == MIPS16e_extend_op) {
+ regs->cp0_epc += 4;
+ return 0;
+ }
+ regs->cp0_epc += 2;
+ return 0;
+ }
+
+ return __MIPS16e_compute_return_epc(regs);
+}
+
#endif /* _ASM_BRANCH_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1a57e8b4d092..e5ec8fcd8afa 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -113,6 +113,9 @@
#ifndef cpu_has_pindexed_dcache
#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
#endif
+#ifndef cpu_has_local_ebase
+#define cpu_has_local_ebase 1
+#endif
/*
* I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644
index 000000000000..242cbb3ca582
--- /dev/null
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+#ifndef __ASM_DMA_COHERENCE_H
+#define __ASM_DMA_COHERENCE_H
+
+extern int coherentio;
+extern int hw_coherentio;
+
+#endif
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index f8fc74b6cb47..84238c574d5e 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,6 +2,7 @@
#define _ASM_DMA_MAPPING_H
#include <asm/scatterlist.h>
+#include <asm/dma-coherence.h>
#include <asm/cache.h>
#include <asm-generic/dma-coherent.h>
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3b4092705567..2abb587d5ab4 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -54,6 +54,12 @@ do { \
extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
unsigned long cpc);
extern int do_dsemulret(struct pt_regs *xcp);
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx, int has_fpu,
+ void *__user *fault_addr);
+int process_fpemu_return(int sig, void __user *fault_addr);
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc);
/*
* Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644
index 000000000000..d6c50a7e9ede
--- /dev/null
+++ b/arch/mips/include/asm/fw/fw.h
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_FW_H_
+#define __ASM_FW_H_
+
+#include <asm/bootinfo.h> /* For cleaner code... */
+
+enum fw_memtypes {
+ fw_dontuse,
+ fw_code,
+ fw_free,
+};
+
+typedef struct {
+ unsigned long base; /* Within KSEG0 */
+ unsigned int size; /* bytes */
+ enum fw_memtypes type; /* fw_memtypes */
+} fw_memblock_t;
+
+/* Maximum number of memory block descriptors. */
+#define FW_MAX_MEMBLOCKS 32
+
+extern int fw_argc;
+extern int *_fw_argv;
+extern int *_fw_envp;
+
+/*
+ * Most firmware like YAMON, PMON, etc. pass arguments and environment
+ * variables as 32-bit pointers. These take care of sign extension.
+ */
+#define fw_argv(index) ((char *)(long)_fw_argv[(index)])
+#define fw_envp(index) ((char *)(long)_fw_envp[(index)])
+
+extern void fw_init_cmdline(void);
+extern char *fw_getcmdline(void);
+extern fw_memblock_t *fw_getmdesc(void);
+extern void fw_meminit(void);
+extern char *fw_getenv(char *name);
+extern unsigned long fw_getenvl(char *name);
+extern void fw_init_early_console(char port);
+
+#endif /* __ASM_FW_H_ */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index bdc9786ab5a7..7153b32de18e 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -202,7 +202,7 @@
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
-#define GIC_VPE_COMPARE_HI 0x00a4
+#define GIC_VPE_COMPARE_HI_OFS 0x00a4
#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
#define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@ struct gic_shared_intr_map {
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET (1)
-extern int gic_present;
+#include <linux/clocksource.h>
+#include <linux/irq.h>
+
+extern unsigned int gic_present;
+extern unsigned int gic_frequency;
extern unsigned long _gic_base;
extern unsigned int gic_irq_base;
extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[];
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
unsigned int intrmap_size, unsigned int irqbase);
-
extern void gic_clocksource_init(unsigned int);
-extern unsigned int gic_get_int(void);
+extern unsigned int gic_compare_int (void);
+extern cycle_t gic_read_count(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern void gic_bind_eic_interrupt(int irq, int set);
extern unsigned int gic_get_timer_pending(void);
+extern unsigned int gic_get_int(void);
extern void gic_enable_interrupt(int irq_vec);
extern void gic_disable_interrupt(int irq_vec);
extern void gic_irq_ack(struct irq_data *d);
extern void gic_finish_irq(struct irq_data *d);
extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
-
#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 44d6a5bde4a1..e3ee92d4dbe7 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -10,34 +10,13 @@
#ifndef _ASM_HAZARDS_H
#define _ASM_HAZARDS_H
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
-
-#include <asm/cpu-features.h>
-
-#define ASMMACRO(name, code...) \
-__asm__(".macro " #name "; " #code "; .endm"); \
- \
-static inline void name(void) \
-{ \
- __asm__ __volatile__ (#name); \
-}
-
-/*
- * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
+#include <linux/stringify.h>
-ASMMACRO(_ssnop,
- sll $0, $0, 1
- )
+#define ___ssnop \
+ sll $0, $0, 1
-ASMMACRO(_ehb,
- sll $0, $0, 3
- )
+#define ___ehb \
+ sll $0, $0, 3
/*
* TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
* MIPSR2 defines ehb for hazard avoidance
*/
-ASMMACRO(mtc0_tlbw_hazard,
- _ehb
- )
-ASMMACRO(tlbw_use_hazard,
- _ehb
- )
-ASMMACRO(tlb_probe_hazard,
- _ehb
- )
-ASMMACRO(irq_enable_hazard,
- _ehb
- )
-ASMMACRO(irq_disable_hazard,
- _ehb
- )
-ASMMACRO(back_to_back_c0_hazard,
- _ehb
- )
+#define __mtc0_tlbw_hazard \
+ ___ehb
+
+#define __tlbw_use_hazard \
+ ___ehb
+
+#define __tlb_probe_hazard \
+ ___ehb
+
+#define __irq_enable_hazard \
+ ___ehb
+
+#define __irq_disable_hazard \
+ ___ehb
+
+#define __back_to_back_c0_hazard \
+ ___ehb
+
/*
* gcc has a tradition of misscompiling the previous construct using the
* address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do { \
* These are slightly complicated by the fact that we guarantee R1 kernels to
* run fine on R2 processors.
*/
-ASMMACRO(mtc0_tlbw_hazard,
- _ssnop; _ssnop; _ehb
- )
-ASMMACRO(tlbw_use_hazard,
- _ssnop; _ssnop; _ssnop; _ehb
- )
-ASMMACRO(tlb_probe_hazard,
- _ssnop; _ssnop; _ssnop; _ehb
- )
-ASMMACRO(irq_enable_hazard,
- _ssnop; _ssnop; _ssnop; _ehb
- )
-ASMMACRO(irq_disable_hazard,
- _ssnop; _ssnop; _ssnop; _ehb
- )
-ASMMACRO(back_to_back_c0_hazard,
- _ssnop; _ssnop; _ssnop; _ehb
- )
+
+#define __mtc0_tlbw_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __tlbw_use_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __tlb_probe_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __irq_enable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __irq_disable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __back_to_back_c0_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
/*
* gcc has a tradition of misscompiling the previous construct using the
* address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do { \
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
*/
-ASMMACRO(mtc0_tlbw_hazard,
- )
-ASMMACRO(tlbw_use_hazard,
- )
-ASMMACRO(tlb_probe_hazard,
- )
-ASMMACRO(irq_enable_hazard,
- )
-ASMMACRO(irq_disable_hazard,
- )
-ASMMACRO(back_to_back_c0_hazard,
- )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
#define instruction_hazard() do { } while (0)
#elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
/*
* Mostly like R4000 for historic reasons
*/
-ASMMACRO(mtc0_tlbw_hazard,
- )
-ASMMACRO(tlbw_use_hazard,
- )
-ASMMACRO(tlb_probe_hazard,
- )
-ASMMACRO(irq_enable_hazard,
- )
-ASMMACRO(irq_disable_hazard,
- _ssnop; _ssnop; _ssnop
- )
-ASMMACRO(back_to_back_c0_hazard,
- )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop
+
+#define __back_to_back_c0_hazard
+
#define instruction_hazard() do { } while (0)
#else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
* hazard so this is nice trick to have an optimal code for a range of
* processors.
*/
-ASMMACRO(mtc0_tlbw_hazard,
- nop; nop
- )
-ASMMACRO(tlbw_use_hazard,
- nop; nop; nop
- )
-ASMMACRO(tlb_probe_hazard,
- nop; nop; nop
- )
-ASMMACRO(irq_enable_hazard,
- _ssnop; _ssnop; _ssnop;
- )
-ASMMACRO(irq_disable_hazard,
- nop; nop; nop
- )
-ASMMACRO(back_to_back_c0_hazard,
- _ssnop; _ssnop; _ssnop;
- )
+#define __mtc0_tlbw_hazard \
+ nop; \
+ nop
+
+#define __tlbw_use_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __tlb_probe_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __irq_enable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop
+
+#define __irq_disable_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __back_to_back_c0_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop
+
#define instruction_hazard() do { } while (0)
#endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
/* FPU hazards */
#if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
- .set push;
- .set mips64;
- .set noreorder;
- _ssnop;
- bnezl $0, .+4;
- _ssnop;
- .set pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard \
+ .set push; \
+ .set mips64; \
+ .set noreorder; \
+ ___ssnop; \
+ bnezl $0, .+4; \
+ ___ssnop; \
+ .set pop
+
+#define __disable_fpu_hazard
#elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
- _ehb
-)
-ASMMACRO(disable_fpu_hazard,
- _ehb
-)
+
+#define __enable_fpu_hazard \
+ ___ehb
+
+#define __disable_fpu_hazard \
+ ___ehb
+
#else
-ASMMACRO(enable_fpu_hazard,
- nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
- _ehb
-)
+
+#define __enable_fpu_hazard \
+ nop; \
+ nop; \
+ nop; \
+ nop
+
+#define __disable_fpu_hazard \
+ ___ehb
+
#endif
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define _ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(___ssnop) \
+ ); \
+} while (0)
+
+#define _ehb() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(___ehb) \
+ ); \
+} while (0)
+
+
+#define mtc0_tlbw_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__mtc0_tlbw_hazard) \
+ ); \
+} while (0)
+
+
+#define tlbw_use_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__tlbw_use_hazard) \
+ ); \
+} while (0)
+
+
+#define tlb_probe_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__tlb_probe_hazard) \
+ ); \
+} while (0)
+
+
+#define irq_enable_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__irq_enable_hazard) \
+ ); \
+} while (0)
+
+
+#define irq_disable_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__irq_disable_hazard) \
+ ); \
+} while (0)
+
+
+#define back_to_back_c0_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__back_to_back_c0_hazard) \
+ ); \
+} while (0)
+
+
+#define enable_fpu_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__enable_fpu_hazard) \
+ ); \
+} while (0)
+
+
+#define disable_fpu_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__disable_fpu_hazard) \
+ ); \
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index f1eadf764071..22912f78401c 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -73,4 +73,16 @@
typedef unsigned int mips_instruction;
+/* microMIPS instruction decode structure. Do NOT export!!! */
+struct mm_decoded_insn {
+ mips_instruction insn;
+ mips_instruction next_insn;
+ int pc_inc;
+ int next_pc_inc;
+ int micro_mips_mode;
+};
+
+/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
+extern const int reg16to32[];
+
#endif /* _ASM_INST_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c789d7..45c00951888b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/stringify.h>
#include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
-__asm__(
- " .macro arch_local_irq_disable\n"
+static inline void arch_local_irq_disable(void)
+{
+ __asm__ __volatile__(
" .set push \n"
" .set noat \n"
" di \n"
- " irq_disable_hazard \n"
+ " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
- " .endm \n");
-
-static inline void arch_local_irq_disable(void)
-{
- __asm__ __volatile__(
- "arch_local_irq_disable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
}
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
-__asm__(
- " .macro arch_local_irq_save result \n"
+ asm __volatile__(
" .set push \n"
" .set reorder \n"
" .set noat \n"
- " di \\result \n"
- " andi \\result, 1 \n"
- " irq_disable_hazard \n"
+ " di %[flags] \n"
+ " andi %[flags], 1 \n"
+ " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
- " .endm \n");
+ : [flags] "=r" (flags)
+ : /* no inputs */
+ : "memory");
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
- asm volatile("arch_local_irq_save\t%0"
- : "=r" (flags)
- : /* no inputs */
- : "memory");
return flags;
}
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
-__asm__(
- " .macro arch_local_irq_restore flags \n"
+ __asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
@@ -69,7 +64,7 @@ __asm__(
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
- " beqz \\flags, 1f \n"
+ " beqz %[flags], 1f \n"
" di \n"
" ei \n"
"1: \n"
@@ -78,33 +73,44 @@ __asm__(
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
- " ins $1, \\flags, 0, 1 \n"
+ " ins $1, %[flags], 0, 1 \n"
" mtc0 $1, $12 \n"
#endif
- " irq_disable_hazard \n"
+ " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
- " .endm \n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
- unsigned long __tmp1;
-
- __asm__ __volatile__(
- "arch_local_irq_restore\t%0"
- : "=r" (__tmp1)
- : "0" (flags)
- : "memory");
+ : [flags] "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
- unsigned long __tmp1;
-
__asm__ __volatile__(
- "arch_local_irq_restore\t%0"
- : "=r" (__tmp1)
- : "0" (flags)
- : "memory");
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#if defined(CONFIG_IRQ_CPU)
+ /*
+ * Slow, but doesn't suffer from a relatively unlikely race
+ * condition we're having since days 1.
+ */
+ " beqz %[flags], 1f \n"
+ " di \n"
+ " ei \n"
+ "1: \n"
+#else
+ /*
+ * Fast, dangerous. Life is fun, life is good.
+ */
+ " mfc0 $1, $12 \n"
+ " ins $1, %[flags], 0, 1 \n"
+ " mtc0 $1, $12 \n"
+#endif
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : [flags] "=r" (flags)
+ : "0" (flags)
+ : "memory");
}
#else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
-__asm__(
- " .macro arch_local_irq_enable \n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
+ __asm__ __volatile__(
" .set push \n"
" .set reorder \n"
" .set noat \n"
@@ -133,45 +149,28 @@ __asm__(
" xori $1,0x1e \n"
" mtc0 $1,$12 \n"
#endif
- " irq_enable_hazard \n"
+ " " __stringify(__irq_enable_hazard) " \n"
" .set pop \n"
- " .endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of call overhead on each local_irq_enable()
- */
- smtc_ipi_replay();
-#endif
- __asm__ __volatile__(
- "arch_local_irq_enable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
}
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
-__asm__(
- " .macro arch_local_save_flags flags \n"
+ asm __volatile__(
" .set push \n"
" .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\flags, $2, 1 \n"
+ " mfc0 %[flags], $2, 1 \n"
#else
- " mfc0 \\flags, $12 \n"
+ " mfc0 %[flags], $12 \n"
#endif
" .set pop \n"
- " .endm \n");
+ : [flags] "=r" (flags));
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
- asm volatile("arch_local_save_flags %0" : "=r" (flags));
return flags;
}
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644
index 000000000000..85789eacbf18
--- /dev/null
+++ b/arch/mips/include/asm/kvm.h
@@ -0,0 +1,55 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+#define __KVM_MIPS
+
+#define N_MIPS_COPROC_REGS 32
+#define N_MIPS_COPROC_SEL 8
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+ __u32 gprs[32];
+ __u32 hi;
+ __u32 lo;
+ __u32 pc;
+
+ __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_mips_interrupt {
+ /* in */
+ __u32 cpu;
+ __u32 irq;
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644
index 000000000000..e68781e18387
--- /dev/null
+++ b/arch/mips/include/asm/kvm_host.h
@@ -0,0 +1,667 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+
+#define KVM_MAX_VCPUS 1
+#define KVM_USER_MEM_SLOTS 8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 0
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* Don't support huge pages */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+
+/* We don't currently support large pages. */
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) 1
+
+
+
+/* Special address that contains the comm page, used for reducing # of traps */
+#define KVM_GUEST_COMMPAGE_ADDR 0x0
+
+#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+ ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG 0x00000000UL
+#define KVM_GUEST_KSEG0 0x40000000UL
+#define KVM_GUEST_KSEG23 0x60000000UL
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE 0xdeadbeef
+#define KVM_INVALID_INST 0xdeadbeef
+#define KVM_INVALID_ADDR 0xdeadbeef
+
+#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
+
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
+
+#define CAUSEB_DC 27
+#define CAUSEF_DC (_ULCAST_(1) << 27)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+struct kvm_interrupt;
+
+extern atomic_t kvm_mips_instance;
+extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 wait_exits;
+ u32 cache_exits;
+ u32 signal_exits;
+ u32 int_exits;
+ u32 cop_unusable_exits;
+ u32 tlbmod_exits;
+ u32 tlbmiss_ld_exits;
+ u32 tlbmiss_st_exits;
+ u32 addrerr_st_exits;
+ u32 addrerr_ld_exits;
+ u32 syscall_exits;
+ u32 resvd_inst_exits;
+ u32 break_inst_exits;
+ u32 flush_dcache_exits;
+ u32 halt_wakeup;
+};
+
+enum kvm_mips_exit_types {
+ WAIT_EXITS,
+ CACHE_EXITS,
+ SIGNAL_EXITS,
+ INT_EXITS,
+ COP_UNUSABLE_EXITS,
+ TLBMOD_EXITS,
+ TLBMISS_LD_EXITS,
+ TLBMISS_ST_EXITS,
+ ADDRERR_ST_EXITS,
+ ADDRERR_LD_EXITS,
+ SYSCALL_EXITS,
+ RESVD_INST_EXITS,
+ BREAK_INST_EXITS,
+ FLUSH_DCACHE_EXITS,
+ MAX_KVM_MIPS_EXIT_TYPES
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+ /* Guest GVA->HPA page table */
+ unsigned long *guest_pmap;
+ unsigned long guest_pmap_npages;
+
+ /* Wired host TLB used for the commpage */
+ int commpage_tlb;
+};
+
+#define N_MIPS_COPROC_REGS 32
+#define N_MIPS_COPROC_SEL 8
+
+struct mips_coproc {
+ unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define MIPS_CP0_TLB_INDEX 0
+#define MIPS_CP0_TLB_RANDOM 1
+#define MIPS_CP0_TLB_LOW 2
+#define MIPS_CP0_TLB_LO0 2
+#define MIPS_CP0_TLB_LO1 3
+#define MIPS_CP0_TLB_CONTEXT 4
+#define MIPS_CP0_TLB_PG_MASK 5
+#define MIPS_CP0_TLB_WIRED 6
+#define MIPS_CP0_HWRENA 7
+#define MIPS_CP0_BAD_VADDR 8
+#define MIPS_CP0_COUNT 9
+#define MIPS_CP0_TLB_HI 10
+#define MIPS_CP0_COMPARE 11
+#define MIPS_CP0_STATUS 12
+#define MIPS_CP0_CAUSE 13
+#define MIPS_CP0_EXC_PC 14
+#define MIPS_CP0_PRID 15
+#define MIPS_CP0_CONFIG 16
+#define MIPS_CP0_LLADDR 17
+#define MIPS_CP0_WATCH_LO 18
+#define MIPS_CP0_WATCH_HI 19
+#define MIPS_CP0_TLB_XCONTEXT 20
+#define MIPS_CP0_ECC 26
+#define MIPS_CP0_CACHE_ERR 27
+#define MIPS_CP0_TAG_LO 28
+#define MIPS_CP0_TAG_HI 29
+#define MIPS_CP0_ERROR_PC 30
+#define MIPS_CP0_DEBUG 23
+#define MIPS_CP0_DEPC 24
+#define MIPS_CP0_PERFCNT 25
+#define MIPS_CP0_ERRCTL 26
+#define MIPS_CP0_DATA_LO 28
+#define MIPS_CP0_DATA_HI 29
+#define MIPS_CP0_DESAVE 31
+
+#define MIPS_CP0_CONFIG_SEL 0
+#define MIPS_CP0_CONFIG1_SEL 1
+#define MIPS_CP0_CONFIG2_SEL 2
+#define MIPS_CP0_CONFIG3_SEL 3
+
+/* Config0 register bits */
+#define CP0C0_M 31
+#define CP0C0_K23 28
+#define CP0C0_KU 25
+#define CP0C0_MDU 20
+#define CP0C0_MM 17
+#define CP0C0_BM 16
+#define CP0C0_BE 15
+#define CP0C0_AT 13
+#define CP0C0_AR 10
+#define CP0C0_MT 7
+#define CP0C0_VI 3
+#define CP0C0_K0 0
+
+/* Config1 register bits */
+#define CP0C1_M 31
+#define CP0C1_MMU 25
+#define CP0C1_IS 22
+#define CP0C1_IL 19
+#define CP0C1_IA 16
+#define CP0C1_DS 13
+#define CP0C1_DL 10
+#define CP0C1_DA 7
+#define CP0C1_C2 6
+#define CP0C1_MD 5
+#define CP0C1_PC 4
+#define CP0C1_WR 3
+#define CP0C1_CA 2
+#define CP0C1_EP 1
+#define CP0C1_FP 0
+
+/* Config2 Register bits */
+#define CP0C2_M 31
+#define CP0C2_TU 28
+#define CP0C2_TS 24
+#define CP0C2_TL 20
+#define CP0C2_TA 16
+#define CP0C2_SU 12
+#define CP0C2_SS 8
+#define CP0C2_SL 4
+#define CP0C2_SA 0
+
+/* Config3 Register bits */
+#define CP0C3_M 31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI 13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA 7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP 4
+#define CP0C3_MT 2
+#define CP0C3_SM 1
+#define CP0C3_TL 0
+
+/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
+#define MIPS_CONFIG0 \
+ ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+ no performance counters, watch registers present,
+ no code compression, EJTAG present, no FPU, no watch registers */
+#define MIPS_CONFIG1 \
+((1 << CP0C1_M) | \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2 \
+((1 << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+ no external interrupt controller, no vectored interrupts,
+ no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3 \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+/* MMU types, the first four entries have the same layout as the
+ CP0C0_MT field. */
+enum mips_mmu_types {
+ MMU_TYPE_NONE,
+ MMU_TYPE_R4000,
+ MMU_TYPE_RESERVED,
+ MMU_TYPE_FMT,
+ MMU_TYPE_R3000,
+ MMU_TYPE_R6000,
+ MMU_TYPE_R8000
+};
+
+/*
+ * Trap codes
+ */
+#define T_INT 0 /* Interrupt pending */
+#define T_TLB_MOD 1 /* TLB modified fault */
+#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS 3 /* TLB miss on a store */
+#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST 5 /* Address error on a store */
+#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
+#define T_SYSCALL 8 /* System call */
+#define T_BREAK 9 /* Breakpoint */
+#define T_RES_INST 10 /* Reserved instruction exception */
+#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
+#define T_OVFLOW 12 /* Arithmetic overflow */
+
+/*
+ * Trap definitions added for r4000 port.
+ */
+#define T_TRAP 13 /* Trap instruction */
+#define T_VCEI 14 /* Virtual coherency exception */
+#define T_FPE 15 /* Floating point exception */
+#define T_WATCH 23 /* Watch address reference */
+#define T_VCED 31 /* Virtual coherency data */
+
+/* Resume Flags */
+#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_GUEST_DR RESUME_FLAG_DR
+#define RESUME_HOST RESUME_FLAG_HOST
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_WAIT, /* WAIT instruction */
+ EMULATE_PRIV_FAIL,
+};
+
+#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V 0x00000002 /* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D 0x00000004 /* Dirty */
+
+#define mips3_paddr_to_tlbpfn(x) \
+ (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+ ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT 6
+#define MIPS3_PG_FRAME 0x3fffffc0
+
+#define VPN2_MASK 0xffffe000
+#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x) (ASID_MASK((x).tlb_hi))
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+
+struct kvm_mips_tlb {
+ long tlb_mask;
+ long tlb_hi;
+ long tlb_lo0;
+ long tlb_lo1;
+};
+
+#define KVM_MIPS_GUEST_TLB_SIZE 64
+struct kvm_vcpu_arch {
+ void *host_ebase, *guest_ebase;
+ unsigned long host_stack;
+ unsigned long host_gp;
+
+ /* Host CP0 registers used when handling exits from guest */
+ unsigned long host_cp0_badvaddr;
+ unsigned long host_cp0_cause;
+ unsigned long host_cp0_epc;
+ unsigned long host_cp0_entryhi;
+ uint32_t guest_inst;
+
+ /* GPRS */
+ unsigned long gprs[32];
+ unsigned long hi;
+ unsigned long lo;
+ unsigned long pc;
+
+ /* FPU State */
+ struct mips_fpu_struct fpu;
+
+ /* COP0 State */
+ struct mips_coproc *cop0;
+
+ /* Host KSEG0 address of the EI/DI offset */
+ void *kseg0_commpage;
+
+ u32 io_gpr; /* GPR used as IO source/target */
+
+ /* Used to calibrate the virutal count register for the guest */
+ int32_t host_cp0_count;
+
+ /* Bitmask of exceptions that are pending */
+ unsigned long pending_exceptions;
+
+ /* Bitmask of pending exceptions to be cleared */
+ unsigned long pending_exceptions_clr;
+
+ unsigned long pending_load_cause;
+
+ /* Save/Restore the entryhi register when are are preempted/scheduled back in */
+ unsigned long preempt_entryhi;
+
+ /* S/W Based TLB for guest */
+ struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+ /* Cached guest kernel/user ASIDs */
+ uint32_t guest_user_asid[NR_CPUS];
+ uint32_t guest_kernel_asid[NR_CPUS];
+ struct mm_struct guest_kernel_mm, guest_user_mm;
+
+ struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
+
+
+ struct hrtimer comparecount_timer;
+
+ int last_sched_cpu;
+
+ /* WAIT executed */
+ int wait;
+};
+
+
+#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val) \
+{ \
+ kvm_clear_c0_guest_cause(cop0, change); \
+ kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+}
+#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val) \
+{ \
+ kvm_clear_c0_guest_ebase(cop0, change); \
+ kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+}
+
+
+struct kvm_mips_callbacks {
+ int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
+ int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
+ int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
+ int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
+ int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
+ int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
+ int (*handle_syscall) (struct kvm_vcpu *vcpu);
+ int (*handle_res_inst) (struct kvm_vcpu *vcpu);
+ int (*handle_break) (struct kvm_vcpu *vcpu);
+ int (*vm_init) (struct kvm *kvm);
+ int (*vcpu_init) (struct kvm_vcpu *vcpu);
+ int (*vcpu_setup) (struct kvm_vcpu *vcpu);
+ gpa_t(*gva_to_gpa) (gva_t gva);
+ void (*queue_timer_int) (struct kvm_vcpu *vcpu);
+ void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
+ void (*queue_io_int) (struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+ void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+ int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+ int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+ int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
+ struct kvm_regs *regs);
+ int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
+ struct kvm_regs *regs);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* Trampoline ASM routine to start running in "Guest" context */
+extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+ struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb,
+ unsigned long *hpa0,
+ unsigned long *hpa1);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_flush_host_tlb(int skip_kseg0);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+ unsigned long entryhi);
+extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
+extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long gva);
+extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+ struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
+extern void kvm_local_flush_tlb_all(void);
+extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
+extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
+extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
+
+/* Emulation */
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+
+extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run);
+
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
+ uint32_t *opc,
+ uint32_t cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
+ uint32_t *opc,
+ uint32_t cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(uint32_t inst,
+ uint32_t cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(uint32_t inst,
+ uint32_t cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void mips32_SyncICache(unsigned long addr, unsigned long size);
+extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+
+
+#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644
index 8fcf8df4418a..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef BCM63XX_CLK_H_
-#define BCM63XX_CLK_H_
-
-struct clk {
- void (*set)(struct clk *, int);
- unsigned int rate;
- unsigned int usage;
- int id;
-};
-
-#endif /* ! BCM63XX_CLK_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index cb922b9cb0e9..336228990808 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -14,11 +14,12 @@
#define BCM6345_CPU_ID 0x6345
#define BCM6348_CPU_ID 0x6348
#define BCM6358_CPU_ID 0x6358
+#define BCM6362_CPU_ID 0x6362
#define BCM6368_CPU_ID 0x6368
void __init bcm63xx_cpu_init(void);
u16 __bcm63xx_get_cpu_id(void);
-u16 bcm63xx_get_cpu_rev(void);
+u8 bcm63xx_get_cpu_rev(void);
unsigned int bcm63xx_get_cpu_freq(void);
#ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void);
# define BCMCPU_IS_6358() (0)
#endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+# ifdef bcm63xx_get_cpu_id
+# undef bcm63xx_get_cpu_id
+# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
+# define BCMCPU_RUNTIME_DETECT
+# else
+# define bcm63xx_get_cpu_id() BCM6362_CPU_ID
+# endif
+# define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
+#else
+# define BCMCPU_IS_6362() (0)
+#endif
+
+
#ifdef CONFIG_BCM63XX_CPU_6368
# ifdef bcm63xx_get_cpu_id
# undef bcm63xx_get_cpu_id
@@ -406,6 +421,62 @@ enum bcm63xx_regs_set {
/*
+ * 6362 register sets base address
+ */
+#define BCM_6362_DSL_LMEM_BASE (0xdeadbeef)
+#define BCM_6362_PERF_BASE (0xb0000000)
+#define BCM_6362_TIMER_BASE (0xb0000040)
+#define BCM_6362_WDT_BASE (0xb000005c)
+#define BCM_6362_UART0_BASE (0xb0000100)
+#define BCM_6362_UART1_BASE (0xb0000120)
+#define BCM_6362_GPIO_BASE (0xb0000080)
+#define BCM_6362_SPI_BASE (0xb0000800)
+#define BCM_6362_HSSPI_BASE (0xb0001000)
+#define BCM_6362_UDC0_BASE (0xdeadbeef)
+#define BCM_6362_USBDMA_BASE (0xb000c000)
+#define BCM_6362_OHCI0_BASE (0xb0002600)
+#define BCM_6362_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_6362_USBH_PRIV_BASE (0xb0002700)
+#define BCM_6362_USBD_BASE (0xb0002400)
+#define BCM_6362_MPI_BASE (0xdeadbeef)
+#define BCM_6362_PCMCIA_BASE (0xdeadbeef)
+#define BCM_6362_PCIE_BASE (0xb0e40000)
+#define BCM_6362_SDRAM_REGS_BASE (0xdeadbeef)
+#define BCM_6362_DSL_BASE (0xdeadbeef)
+#define BCM_6362_UBUS_BASE (0xdeadbeef)
+#define BCM_6362_ENET0_BASE (0xdeadbeef)
+#define BCM_6362_ENET1_BASE (0xdeadbeef)
+#define BCM_6362_ENETDMA_BASE (0xb000d800)
+#define BCM_6362_ENETDMAC_BASE (0xb000da00)
+#define BCM_6362_ENETDMAS_BASE (0xb000dc00)
+#define BCM_6362_ENETSW_BASE (0xb0e00000)
+#define BCM_6362_EHCI0_BASE (0xb0002500)
+#define BCM_6362_SDRAM_BASE (0xdeadbeef)
+#define BCM_6362_MEMC_BASE (0xdeadbeef)
+#define BCM_6362_DDR_BASE (0xb0003000)
+#define BCM_6362_M2M_BASE (0xdeadbeef)
+#define BCM_6362_ATM_BASE (0xdeadbeef)
+#define BCM_6362_XTM_BASE (0xb0007800)
+#define BCM_6362_XTMDMA_BASE (0xb000b800)
+#define BCM_6362_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6362_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6362_PCM_BASE (0xb000a800)
+#define BCM_6362_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6362_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6362_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6362_RNG_BASE (0xdeadbeef)
+#define BCM_6362_MISC_BASE (0xb0001800)
+
+#define BCM_6362_NAND_REG_BASE (0xb0000200)
+#define BCM_6362_NAND_CACHE_BASE (0xb0000600)
+#define BCM_6362_LED_BASE (0xb0001900)
+#define BCM_6362_IPSEC_BASE (0xb0002800)
+#define BCM_6362_IPSEC_DMA_BASE (0xb000d000)
+#define BCM_6362_WLAN_CHIPCOMMON_BASE (0xb0004000)
+#define BCM_6362_WLAN_D11_BASE (0xb0005000)
+#define BCM_6362_WLAN_SHIM_BASE (0xb0007000)
+
+/*
* 6368 register sets base address
*/
#define BCM_6368_DSL_LMEM_BASE (0xdeadbeef)
@@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
#ifdef CONFIG_BCM63XX_CPU_6358
__GEN_RSET(6358)
#endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+ __GEN_RSET(6362)
+#endif
#ifdef CONFIG_BCM63XX_CPU_6368
__GEN_RSET(6368)
#endif
@@ -820,6 +894,71 @@ enum bcm63xx_irq {
#define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28)
/*
+ * 6362 irqs
+ */
+#define BCM_6362_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6362_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6362_SPI_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6362_UART0_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6362_UART1_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6362_DSL_IRQ (IRQ_INTERNAL_BASE + 28)
+#define BCM_6362_UDC0_IRQ 0
+#define BCM_6362_ENET0_IRQ 0
+#define BCM_6362_ENET1_IRQ 0
+#define BCM_6362_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 14)
+#define BCM_6362_HSSPI_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6362_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6362_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10)
+#define BCM_6362_USBD_IRQ (IRQ_INTERNAL_BASE + 11)
+#define BCM_6362_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 20)
+#define BCM_6362_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 21)
+#define BCM_6362_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 22)
+#define BCM_6362_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 23)
+#define BCM_6362_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 24)
+#define BCM_6362_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 25)
+#define BCM_6362_PCMCIA_IRQ 0
+#define BCM_6362_ENET0_RXDMA_IRQ 0
+#define BCM_6362_ENET0_TXDMA_IRQ 0
+#define BCM_6362_ENET1_RXDMA_IRQ 0
+#define BCM_6362_ENET1_TXDMA_IRQ 0
+#define BCM_6362_PCI_IRQ (IRQ_INTERNAL_BASE + 30)
+#define BCM_6362_ATM_IRQ 0
+#define BCM_6362_ENETSW_RXDMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 0)
+#define BCM_6362_ENETSW_RXDMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 1)
+#define BCM_6362_ENETSW_RXDMA2_IRQ (BCM_6362_HIGH_IRQ_BASE + 2)
+#define BCM_6362_ENETSW_RXDMA3_IRQ (BCM_6362_HIGH_IRQ_BASE + 3)
+#define BCM_6362_ENETSW_TXDMA0_IRQ 0
+#define BCM_6362_ENETSW_TXDMA1_IRQ 0
+#define BCM_6362_ENETSW_TXDMA2_IRQ 0
+#define BCM_6362_ENETSW_TXDMA3_IRQ 0
+#define BCM_6362_XTM_IRQ 0
+#define BCM_6362_XTM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 12)
+
+#define BCM_6362_RING_OSC_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_6362_WLAN_GPIO_IRQ (IRQ_INTERNAL_BASE + 6)
+#define BCM_6362_WLAN_IRQ (IRQ_INTERNAL_BASE + 7)
+#define BCM_6362_IPSEC_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6362_NAND_IRQ (IRQ_INTERNAL_BASE + 12)
+#define BCM_6362_PCM_IRQ (IRQ_INTERNAL_BASE + 13)
+#define BCM_6362_DG_IRQ (IRQ_INTERNAL_BASE + 15)
+#define BCM_6362_EPHY_ENERGY0_IRQ (IRQ_INTERNAL_BASE + 16)
+#define BCM_6362_EPHY_ENERGY1_IRQ (IRQ_INTERNAL_BASE + 17)
+#define BCM_6362_EPHY_ENERGY2_IRQ (IRQ_INTERNAL_BASE + 18)
+#define BCM_6362_EPHY_ENERGY3_IRQ (IRQ_INTERNAL_BASE + 19)
+#define BCM_6362_IPSEC_DMA0_IRQ (IRQ_INTERNAL_BASE + 26)
+#define BCM_6362_IPSEC_DMA1_IRQ (IRQ_INTERNAL_BASE + 27)
+#define BCM_6362_FAP0_IRQ (IRQ_INTERNAL_BASE + 29)
+#define BCM_6362_PCM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 4)
+#define BCM_6362_PCM_DMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 5)
+#define BCM_6362_DECT0_IRQ (BCM_6362_HIGH_IRQ_BASE + 6)
+#define BCM_6362_DECT1_IRQ (BCM_6362_HIGH_IRQ_BASE + 7)
+#define BCM_6362_EXT_IRQ0 (BCM_6362_HIGH_IRQ_BASE + 8)
+#define BCM_6362_EXT_IRQ1 (BCM_6362_HIGH_IRQ_BASE + 9)
+#define BCM_6362_EXT_IRQ2 (BCM_6362_HIGH_IRQ_BASE + 10)
+#define BCM_6362_EXT_IRQ3 (BCM_6362_HIGH_IRQ_BASE + 11)
+
+/*
* 6368 irqs
*/
#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index b0184cf02575..c426cabc620a 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
return bcm63xx_regs_spi[reg];
#else
-#ifdef CONFIG_BCM63XX_CPU_6338
- __GEN_SPI_RSET(6338)
-#endif
-#ifdef CONFIG_BCM63XX_CPU_6348
+#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
__GEN_SPI_RSET(6348)
#endif
-#ifdef CONFIG_BCM63XX_CPU_6358
+#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
+ defined(CONFIG_BCM63XX_CPU_6368)
__GEN_SPI_RSET(6358)
#endif
-#ifdef CONFIG_BCM63XX_CPU_6368
- __GEN_SPI_RSET(6368)
-#endif
#endif
return 0;
}
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 0a9891f7580d..35baa1a60a64 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void)
return 8;
case BCM6345_CPU_ID:
return 16;
+ case BCM6362_CPU_ID:
+ return 48;
case BCM6368_CPU_ID:
return 38;
case BCM6348_CPU_ID:
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 81b4702f792a..3203fe49b34d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -10,7 +10,7 @@
#define REV_CHIPID_SHIFT 16
#define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT)
#define REV_REVID_SHIFT 0
-#define REV_REVID_MASK (0xffff << REV_REVID_SHIFT)
+#define REV_REVID_MASK (0xff << REV_REVID_SHIFT)
/* Clock Control register */
#define PERF_CKCTL_REG 0x4
@@ -112,6 +112,39 @@
CKCTL_6358_USBSU_EN | \
CKCTL_6358_EPHY_EN)
+#define CKCTL_6362_ADSL_QPROC_EN (1 << 1)
+#define CKCTL_6362_ADSL_AFE_EN (1 << 2)
+#define CKCTL_6362_ADSL_EN (1 << 3)
+#define CKCTL_6362_MIPS_EN (1 << 4)
+#define CKCTL_6362_WLAN_OCP_EN (1 << 5)
+#define CKCTL_6362_SWPKT_USB_EN (1 << 7)
+#define CKCTL_6362_SWPKT_SAR_EN (1 << 8)
+#define CKCTL_6362_SAR_EN (1 << 9)
+#define CKCTL_6362_ROBOSW_EN (1 << 10)
+#define CKCTL_6362_PCM_EN (1 << 11)
+#define CKCTL_6362_USBD_EN (1 << 12)
+#define CKCTL_6362_USBH_EN (1 << 13)
+#define CKCTL_6362_IPSEC_EN (1 << 14)
+#define CKCTL_6362_SPI_EN (1 << 15)
+#define CKCTL_6362_HSSPI_EN (1 << 16)
+#define CKCTL_6362_PCIE_EN (1 << 17)
+#define CKCTL_6362_FAP_EN (1 << 18)
+#define CKCTL_6362_PHYMIPS_EN (1 << 19)
+#define CKCTL_6362_NAND_EN (1 << 20)
+
+#define CKCTL_6362_ALL_SAFE_EN (CKCTL_6362_PHYMIPS_EN | \
+ CKCTL_6362_ADSL_QPROC_EN | \
+ CKCTL_6362_ADSL_AFE_EN | \
+ CKCTL_6362_ADSL_EN | \
+ CKCTL_6362_SAR_EN | \
+ CKCTL_6362_PCM_EN | \
+ CKCTL_6362_IPSEC_EN | \
+ CKCTL_6362_USBD_EN | \
+ CKCTL_6362_USBH_EN | \
+ CKCTL_6362_ROBOSW_EN | \
+ CKCTL_6362_PCIE_EN)
+
+
#define CKCTL_6368_VDSL_QPROC_EN (1 << 2)
#define CKCTL_6368_VDSL_AFE_EN (1 << 3)
#define CKCTL_6368_VDSL_BONDING_EN (1 << 4)
@@ -153,6 +186,7 @@
#define PERF_IRQMASK_6345_REG 0xc
#define PERF_IRQMASK_6348_REG 0xc
#define PERF_IRQMASK_6358_REG 0xc
+#define PERF_IRQMASK_6362_REG 0x20
#define PERF_IRQMASK_6368_REG 0x20
/* Interrupt Status register */
@@ -161,6 +195,7 @@
#define PERF_IRQSTAT_6345_REG 0x10
#define PERF_IRQSTAT_6348_REG 0x10
#define PERF_IRQSTAT_6358_REG 0x10
+#define PERF_IRQSTAT_6362_REG 0x28
#define PERF_IRQSTAT_6368_REG 0x28
/* External Interrupt Configuration register */
@@ -169,6 +204,7 @@
#define PERF_EXTIRQ_CFG_REG_6345 0x14
#define PERF_EXTIRQ_CFG_REG_6348 0x14
#define PERF_EXTIRQ_CFG_REG_6358 0x14
+#define PERF_EXTIRQ_CFG_REG_6362 0x18
#define PERF_EXTIRQ_CFG_REG_6368 0x18
#define PERF_EXTIRQ_CFG_REG2_6368 0x1c
@@ -197,6 +233,7 @@
#define PERF_SOFTRESET_REG 0x28
#define PERF_SOFTRESET_6328_REG 0x10
#define PERF_SOFTRESET_6358_REG 0x34
+#define PERF_SOFTRESET_6362_REG 0x10
#define PERF_SOFTRESET_6368_REG 0x10
#define SOFTRESET_6328_SPI_MASK (1 << 0)
@@ -259,6 +296,22 @@
#define SOFTRESET_6358_PCM_MASK (1 << 13)
#define SOFTRESET_6358_ADSL_MASK (1 << 14)
+#define SOFTRESET_6362_SPI_MASK (1 << 0)
+#define SOFTRESET_6362_IPSEC_MASK (1 << 1)
+#define SOFTRESET_6362_EPHY_MASK (1 << 2)
+#define SOFTRESET_6362_SAR_MASK (1 << 3)
+#define SOFTRESET_6362_ENETSW_MASK (1 << 4)
+#define SOFTRESET_6362_USBS_MASK (1 << 5)
+#define SOFTRESET_6362_USBH_MASK (1 << 6)
+#define SOFTRESET_6362_PCM_MASK (1 << 7)
+#define SOFTRESET_6362_PCIE_CORE_MASK (1 << 8)
+#define SOFTRESET_6362_PCIE_MASK (1 << 9)
+#define SOFTRESET_6362_PCIE_EXT_MASK (1 << 10)
+#define SOFTRESET_6362_WLAN_SHIM_MASK (1 << 11)
+#define SOFTRESET_6362_DDR_PHY_MASK (1 << 12)
+#define SOFTRESET_6362_FAP_MASK (1 << 13)
+#define SOFTRESET_6362_WLAN_UBUS_MASK (1 << 14)
+
#define SOFTRESET_6368_SPI_MASK (1 << 0)
#define SOFTRESET_6368_MPI_MASK (1 << 3)
#define SOFTRESET_6368_EPHY_MASK (1 << 6)
@@ -1223,24 +1276,7 @@
* _REG relative to RSET_SPI
*************************************************************************/
-/* BCM 6338 SPI core */
-#define SPI_6338_CMD 0x00 /* 16-bits register */
-#define SPI_6338_INT_STATUS 0x02
-#define SPI_6338_INT_MASK_ST 0x03
-#define SPI_6338_INT_MASK 0x04
-#define SPI_6338_ST 0x05
-#define SPI_6338_CLK_CFG 0x06
-#define SPI_6338_FILL_BYTE 0x07
-#define SPI_6338_MSG_TAIL 0x09
-#define SPI_6338_RX_TAIL 0x0b
-#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
-#define SPI_6338_MSG_CTL_WIDTH 8
-#define SPI_6338_MSG_DATA 0x41
-#define SPI_6338_MSG_DATA_SIZE 0x3f
-#define SPI_6338_RX_DATA 0x80
-#define SPI_6338_RX_DATA_SIZE 0x3f
-
-/* BCM 6348 SPI core */
+/* BCM 6338/6348 SPI core */
#define SPI_6348_CMD 0x00 /* 16-bits register */
#define SPI_6348_INT_STATUS 0x02
#define SPI_6348_INT_MASK_ST 0x03
@@ -1257,7 +1293,7 @@
#define SPI_6348_RX_DATA 0x80
#define SPI_6348_RX_DATA_SIZE 0x3f
-/* BCM 6358 SPI core */
+/* BCM 6358/6262/6368 SPI core */
#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
#define SPI_6358_MSG_CTL_WIDTH 16
#define SPI_6358_MSG_DATA 0x02
@@ -1274,23 +1310,6 @@
#define SPI_6358_MSG_TAIL 0x709
#define SPI_6358_RX_TAIL 0x70B
-/* BCM 6358 SPI core */
-#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
-#define SPI_6368_MSG_CTL_WIDTH 16
-#define SPI_6368_MSG_DATA 0x02
-#define SPI_6368_MSG_DATA_SIZE 0x21e
-#define SPI_6368_RX_DATA 0x400
-#define SPI_6368_RX_DATA_SIZE 0x220
-#define SPI_6368_CMD 0x700 /* 16-bits register */
-#define SPI_6368_INT_STATUS 0x702
-#define SPI_6368_INT_MASK_ST 0x703
-#define SPI_6368_INT_MASK 0x704
-#define SPI_6368_ST 0x705
-#define SPI_6368_CLK_CFG 0x706
-#define SPI_6368_FILL_BYTE 0x707
-#define SPI_6368_MSG_TAIL 0x709
-#define SPI_6368_RX_TAIL 0x70B
-
/* Shared SPI definitions */
/* Message configuration */
@@ -1298,10 +1317,8 @@
#define SPI_HD_W 0x01
#define SPI_HD_R 0x02
#define SPI_BYTE_CNT_SHIFT 0
-#define SPI_6338_MSG_TYPE_SHIFT 6
#define SPI_6348_MSG_TYPE_SHIFT 6
#define SPI_6358_MSG_TYPE_SHIFT 14
-#define SPI_6368_MSG_TYPE_SHIFT 14
/* Command */
#define SPI_CMD_NOOP 0x00
@@ -1348,10 +1365,18 @@
/*************************************************************************
* _REG relative to RSET_MISC
*************************************************************************/
-#define MISC_SERDES_CTRL_REG 0x0
+#define MISC_SERDES_CTRL_6328_REG 0x0
+#define MISC_SERDES_CTRL_6362_REG 0x4
#define SERDES_PCIE_EN (1 << 0)
#define SERDES_PCIE_EXD_EN (1 << 15)
+#define MISC_STRAPBUS_6362_REG 0x14
+#define STRAPBUS_6362_FCVO_SHIFT 1
+#define STRAPBUS_6362_HSSPI_CLK_FAST (1 << 13)
+#define STRAPBUS_6362_FCVO_MASK (0x1f << STRAPBUS_6362_FCVO_SHIFT)
+#define STRAPBUS_6362_BOOT_SEL_SERIAL (1 << 15)
+#define STRAPBUS_6362_BOOT_SEL_NAND (0 << 15)
+
#define MISC_STRAPBUS_6328_REG 0x240
#define STRAPBUS_6328_FCVO_SHIFT 7
#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT)
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index 30931c42379d..94e3011ba7df 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
return 1;
break;
case BCM6328_CPU_ID:
+ case BCM6362_CPU_ID:
case BCM6368_CPU_ID:
if (offset >= 0xb0000000 && offset < 0xb1000000)
return 1;
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 9c95177f7a7e..fe23034aaf72 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev)
{
#ifdef CONFIG_DMA_COHERENT
return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
+#else
+ return coherentio;
#endif
}
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 73d717a75cb0..5b2f2e68e57f 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -20,14 +20,21 @@
#endif
#ifdef CONFIG_32BIT
-
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE _AC(0x40000000, UL)
+#else
#define CAC_BASE _AC(0x80000000, UL)
+#endif
#define IO_BASE _AC(0xa0000000, UL)
#define UNCAC_BASE _AC(0xa0000000, UL)
#ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE _AC(0x60000000, UL)
+#else
#define MAP_BASE _AC(0xc0000000, UL)
#endif
+#endif
/*
* Memory above this physical address will be considered highmem.
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 75fd8c0f986e..c0f3ef45c2c1 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -57,5 +57,6 @@
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
#define cpu_has_watch 1
+#define cpu_has_local_ebase 0
#endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644
index 000000000000..9809972ea882
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _MT7620_REGS_H_
+#define _MT7620_REGS_H_
+
+#define MT7620_SYSC_BASE 0x10000000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_REV 0x0c
+#define SYSC_REG_SYSTEM_CONFIG0 0x10
+#define SYSC_REG_SYSTEM_CONFIG1 0x14
+#define SYSC_REG_CPLL_CONFIG0 0x54
+#define SYSC_REG_CPLL_CONFIG1 0x58
+
+#define MT7620N_CHIP_NAME0 0x33365452
+#define MT7620N_CHIP_NAME1 0x20203235
+
+#define MT7620A_CHIP_NAME0 0x3637544d
+#define MT7620A_CHIP_NAME1 0x20203032
+
+#define CHIP_REV_PKG_MASK 0x1
+#define CHIP_REV_PKG_SHIFT 16
+#define CHIP_REV_VER_MASK 0xf
+#define CHIP_REV_VER_SHIFT 8
+#define CHIP_REV_ECO_MASK 0xf
+
+#define CPLL_SW_CONFIG_SHIFT 31
+#define CPLL_SW_CONFIG_MASK 0x1
+#define CPLL_CPU_CLK_SHIFT 24
+#define CPLL_CPU_CLK_MASK 0x1
+#define CPLL_MULT_RATIO_SHIFT 16
+#define CPLL_MULT_RATIO 0x7
+#define CPLL_DIV_RATIO_SHIFT 10
+#define CPLL_DIV_RATIO 0x3
+
+#define SYSCFG0_DRAM_TYPE_MASK 0x3
+#define SYSCFG0_DRAM_TYPE_SHIFT 4
+#define SYSCFG0_DRAM_TYPE_SDRAM 0
+#define SYSCFG0_DRAM_TYPE_DDR1 1
+#define SYSCFG0_DRAM_TYPE_DDR2 2
+
+#define MT7620_DRAM_BASE 0x0
+#define MT7620_SDRAM_SIZE_MIN 2
+#define MT7620_SDRAM_SIZE_MAX 64
+#define MT7620_DDR1_SIZE_MIN 32
+#define MT7620_DDR1_SIZE_MAX 128
+#define MT7620_DDR2_SIZE_MIN 32
+#define MT7620_DDR2_SIZE_MAX 256
+
+#define MT7620_GPIO_MODE_I2C BIT(0)
+#define MT7620_GPIO_MODE_UART0_SHIFT 2
+#define MT7620_GPIO_MODE_UART0_MASK 0x7
+#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
+#define MT7620_GPIO_MODE_UARTF 0x0
+#define MT7620_GPIO_MODE_PCM_UARTF 0x1
+#define MT7620_GPIO_MODE_PCM_I2S 0x2
+#define MT7620_GPIO_MODE_I2S_UARTF 0x3
+#define MT7620_GPIO_MODE_PCM_GPIO 0x4
+#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
+#define MT7620_GPIO_MODE_GPIO_I2S 0x6
+#define MT7620_GPIO_MODE_GPIO 0x7
+#define MT7620_GPIO_MODE_UART1 BIT(5)
+#define MT7620_GPIO_MODE_MDIO BIT(8)
+#define MT7620_GPIO_MODE_RGMII1 BIT(9)
+#define MT7620_GPIO_MODE_RGMII2 BIT(10)
+#define MT7620_GPIO_MODE_SPI BIT(11)
+#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12)
+#define MT7620_GPIO_MODE_WLED BIT(13)
+#define MT7620_GPIO_MODE_JTAG BIT(15)
+#define MT7620_GPIO_MODE_EPHY BIT(15)
+#define MT7620_GPIO_MODE_WDT BIT(22)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644
index 000000000000..03ad716acb42
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT288X_REGS_H_
+#define _RT288X_REGS_H_
+
+#define RT2880_SYSC_BASE 0x00300000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_ID 0x0c
+#define SYSC_REG_SYSTEM_CONFIG 0x10
+#define SYSC_REG_CLKCFG 0x30
+
+#define RT2880_CHIP_NAME0 0x38325452
+#define RT2880_CHIP_NAME1 0x20203038
+
+#define CHIP_ID_ID_MASK 0xff
+#define CHIP_ID_ID_SHIFT 8
+#define CHIP_ID_REV_MASK 0xff
+
+#define SYSTEM_CONFIG_CPUCLK_SHIFT 20
+#define SYSTEM_CONFIG_CPUCLK_MASK 0x3
+#define SYSTEM_CONFIG_CPUCLK_250 0x0
+#define SYSTEM_CONFIG_CPUCLK_266 0x1
+#define SYSTEM_CONFIG_CPUCLK_280 0x2
+#define SYSTEM_CONFIG_CPUCLK_300 0x3
+
+#define RT2880_GPIO_MODE_I2C BIT(0)
+#define RT2880_GPIO_MODE_UART0 BIT(1)
+#define RT2880_GPIO_MODE_SPI BIT(2)
+#define RT2880_GPIO_MODE_UART1 BIT(3)
+#define RT2880_GPIO_MODE_JTAG BIT(4)
+#define RT2880_GPIO_MODE_MDIO BIT(5)
+#define RT2880_GPIO_MODE_SDRAM BIT(6)
+#define RT2880_GPIO_MODE_PCI BIT(7)
+
+#define CLKCFG_SRAM_CS_N_WDT BIT(9)
+
+#define RT2880_SDRAM_BASE 0x08000000
+#define RT2880_MEM_SIZE_MIN 2
+#define RT2880_MEM_SIZE_MAX 128
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644
index 000000000000..72fc10669199
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT288x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
+#define _RT288X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+#define cpu_has_64bit_addresses 0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+
+#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 7d344f2d7d0a..069bf37a6010 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void)
#define RT5350_SYSCFG0_CPUCLK_320 0x2
#define RT5350_SYSCFG0_CPUCLK_300 0x3
+#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT 12
+#define RT5350_SYSCFG0_DRAM_SIZE_MASK 7
+#define RT5350_SYSCFG0_DRAM_SIZE_2M 0
+#define RT5350_SYSCFG0_DRAM_SIZE_8M 1
+#define RT5350_SYSCFG0_DRAM_SIZE_16M 2
+#define RT5350_SYSCFG0_DRAM_SIZE_32M 3
+#define RT5350_SYSCFG0_DRAM_SIZE_64M 4
+
/* multi function gpio pins */
#define RT305X_GPIO_I2C_SD 1
#define RT305X_GPIO_I2C_SCLK 2
@@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void)
#define RT305X_GPIO_MODE_SDRAM BIT(8)
#define RT305X_GPIO_MODE_RGMII BIT(9)
+#define RT3352_SYSC_REG_SYSCFG0 0x010
+#define RT3352_SYSC_REG_SYSCFG1 0x014
+#define RT3352_SYSC_REG_CLKCFG1 0x030
+#define RT3352_SYSC_REG_RSTCTRL 0x034
+#define RT3352_SYSC_REG_USB_PS 0x05c
+
+#define RT3352_CLKCFG0_XTAL_SEL BIT(20)
+#define RT3352_CLKCFG1_UPHY0_CLK_EN BIT(18)
+#define RT3352_CLKCFG1_UPHY1_CLK_EN BIT(20)
+#define RT3352_RSTCTRL_UHST BIT(22)
+#define RT3352_RSTCTRL_UDEV BIT(25)
+#define RT3352_SYSCFG1_USB0_HOST_MODE BIT(10)
+
+#define RT305X_SDRAM_BASE 0x00000000
+#define RT305X_MEM_SIZE_MIN 2
+#define RT305X_MEM_SIZE_MAX 64
+#define RT3352_MEM_SIZE_MIN 2
+#define RT3352_MEM_SIZE_MAX 256
+
#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644
index 000000000000..917c28654552
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT305x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
+#define _RT305X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+#define cpu_has_64bit_addresses 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644
index 000000000000..058382f37f92
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -0,0 +1,252 @@
+/*
+ * Ralink RT3662/RT3883 SoC register definitions
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RT3883_REGS_H_
+#define _RT3883_REGS_H_
+
+#include <linux/bitops.h>
+
+#define RT3883_SDRAM_BASE 0x00000000
+#define RT3883_SYSC_BASE 0x10000000
+#define RT3883_TIMER_BASE 0x10000100
+#define RT3883_INTC_BASE 0x10000200
+#define RT3883_MEMC_BASE 0x10000300
+#define RT3883_UART0_BASE 0x10000500
+#define RT3883_PIO_BASE 0x10000600
+#define RT3883_FSCC_BASE 0x10000700
+#define RT3883_NANDC_BASE 0x10000810
+#define RT3883_I2C_BASE 0x10000900
+#define RT3883_I2S_BASE 0x10000a00
+#define RT3883_SPI_BASE 0x10000b00
+#define RT3883_UART1_BASE 0x10000c00
+#define RT3883_PCM_BASE 0x10002000
+#define RT3883_GDMA_BASE 0x10002800
+#define RT3883_CODEC1_BASE 0x10003000
+#define RT3883_CODEC2_BASE 0x10003800
+#define RT3883_FE_BASE 0x10100000
+#define RT3883_ROM_BASE 0x10118000
+#define RT3883_USBDEV_BASE 0x10112000
+#define RT3883_PCI_BASE 0x10140000
+#define RT3883_WLAN_BASE 0x10180000
+#define RT3883_USBHOST_BASE 0x101c0000
+#define RT3883_BOOT_BASE 0x1c000000
+#define RT3883_SRAM_BASE 0x1e000000
+#define RT3883_PCIMEM_BASE 0x20000000
+
+#define RT3883_EHCI_BASE (RT3883_USBHOST_BASE)
+#define RT3883_OHCI_BASE (RT3883_USBHOST_BASE + 0x1000)
+
+#define RT3883_SYSC_SIZE 0x100
+#define RT3883_TIMER_SIZE 0x100
+#define RT3883_INTC_SIZE 0x100
+#define RT3883_MEMC_SIZE 0x100
+#define RT3883_UART0_SIZE 0x100
+#define RT3883_UART1_SIZE 0x100
+#define RT3883_PIO_SIZE 0x100
+#define RT3883_FSCC_SIZE 0x100
+#define RT3883_NANDC_SIZE 0x0f0
+#define RT3883_I2C_SIZE 0x100
+#define RT3883_I2S_SIZE 0x100
+#define RT3883_SPI_SIZE 0x100
+#define RT3883_PCM_SIZE 0x800
+#define RT3883_GDMA_SIZE 0x800
+#define RT3883_CODEC1_SIZE 0x800
+#define RT3883_CODEC2_SIZE 0x800
+#define RT3883_FE_SIZE 0x10000
+#define RT3883_ROM_SIZE 0x4000
+#define RT3883_USBDEV_SIZE 0x4000
+#define RT3883_PCI_SIZE 0x40000
+#define RT3883_WLAN_SIZE 0x40000
+#define RT3883_USBHOST_SIZE 0x40000
+#define RT3883_BOOT_SIZE (32 * 1024 * 1024)
+#define RT3883_SRAM_SIZE (32 * 1024 * 1024)
+
+/* SYSC registers */
+#define RT3883_SYSC_REG_CHIPID0_3 0x00 /* Chip ID 0 */
+#define RT3883_SYSC_REG_CHIPID4_7 0x04 /* Chip ID 1 */
+#define RT3883_SYSC_REG_REVID 0x0c /* Chip Revision Identification */
+#define RT3883_SYSC_REG_SYSCFG0 0x10 /* System Configuration 0 */
+#define RT3883_SYSC_REG_SYSCFG1 0x14 /* System Configuration 1 */
+#define RT3883_SYSC_REG_CLKCFG0 0x2c /* Clock Configuration 0 */
+#define RT3883_SYSC_REG_CLKCFG1 0x30 /* Clock Configuration 1 */
+#define RT3883_SYSC_REG_RSTCTRL 0x34 /* Reset Control*/
+#define RT3883_SYSC_REG_RSTSTAT 0x38 /* Reset Status*/
+#define RT3883_SYSC_REG_USB_PS 0x5c /* USB Power saving control */
+#define RT3883_SYSC_REG_GPIO_MODE 0x60 /* GPIO Purpose Select */
+#define RT3883_SYSC_REG_PCIE_CLK_GEN0 0x7c
+#define RT3883_SYSC_REG_PCIE_CLK_GEN1 0x80
+#define RT3883_SYSC_REG_PCIE_CLK_GEN2 0x84
+#define RT3883_SYSC_REG_PMU 0x88
+#define RT3883_SYSC_REG_PMU1 0x8c
+
+#define RT3883_CHIP_NAME0 0x38335452
+#define RT3883_CHIP_NAME1 0x20203338
+
+#define RT3883_REVID_VER_ID_MASK 0x0f
+#define RT3883_REVID_VER_ID_SHIFT 8
+#define RT3883_REVID_ECO_ID_MASK 0x0f
+
+#define RT3883_SYSCFG0_DRAM_TYPE_DDR2 BIT(17)
+#define RT3883_SYSCFG0_CPUCLK_SHIFT 8
+#define RT3883_SYSCFG0_CPUCLK_MASK 0x3
+#define RT3883_SYSCFG0_CPUCLK_250 0x0
+#define RT3883_SYSCFG0_CPUCLK_384 0x1
+#define RT3883_SYSCFG0_CPUCLK_480 0x2
+#define RT3883_SYSCFG0_CPUCLK_500 0x3
+
+#define RT3883_SYSCFG1_USB0_HOST_MODE BIT(10)
+#define RT3883_SYSCFG1_PCIE_RC_MODE BIT(8)
+#define RT3883_SYSCFG1_PCI_HOST_MODE BIT(7)
+#define RT3883_SYSCFG1_PCI_66M_MODE BIT(6)
+#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT BIT(2)
+
+#define RT3883_CLKCFG1_PCIE_CLK_EN BIT(21)
+#define RT3883_CLKCFG1_UPHY1_CLK_EN BIT(20)
+#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
+#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
+
+#define RT3883_GPIO_MODE_I2C BIT(0)
+#define RT3883_GPIO_MODE_SPI BIT(1)
+#define RT3883_GPIO_MODE_UART0_SHIFT 2
+#define RT3883_GPIO_MODE_UART0_MASK 0x7
+#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
+#define RT3883_GPIO_MODE_UARTF 0x0
+#define RT3883_GPIO_MODE_PCM_UARTF 0x1
+#define RT3883_GPIO_MODE_PCM_I2S 0x2
+#define RT3883_GPIO_MODE_I2S_UARTF 0x3
+#define RT3883_GPIO_MODE_PCM_GPIO 0x4
+#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
+#define RT3883_GPIO_MODE_GPIO_I2S 0x6
+#define RT3883_GPIO_MODE_GPIO 0x7
+#define RT3883_GPIO_MODE_UART1 BIT(5)
+#define RT3883_GPIO_MODE_JTAG BIT(6)
+#define RT3883_GPIO_MODE_MDIO BIT(7)
+#define RT3883_GPIO_MODE_GE1 BIT(9)
+#define RT3883_GPIO_MODE_GE2 BIT(10)
+#define RT3883_GPIO_MODE_PCI_SHIFT 11
+#define RT3883_GPIO_MODE_PCI_MASK 0x7
+#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_SHIFT 16
+#define RT3883_GPIO_MODE_LNA_A_MASK 0x3
+#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3
+#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
+#define RT3883_GPIO_MODE_LNA_G_SHIFT 18
+#define RT3883_GPIO_MODE_LNA_G_MASK 0x3
+#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
+#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
+#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
+
+#define RT3883_GPIO_I2C_SD 1
+#define RT3883_GPIO_I2C_SCLK 2
+#define RT3883_GPIO_SPI_CS0 3
+#define RT3883_GPIO_SPI_CLK 4
+#define RT3883_GPIO_SPI_MOSI 5
+#define RT3883_GPIO_SPI_MISO 6
+#define RT3883_GPIO_7 7
+#define RT3883_GPIO_10 10
+#define RT3883_GPIO_11 11
+#define RT3883_GPIO_14 14
+#define RT3883_GPIO_UART1_TXD 15
+#define RT3883_GPIO_UART1_RXD 16
+#define RT3883_GPIO_JTAG_TDO 17
+#define RT3883_GPIO_JTAG_TDI 18
+#define RT3883_GPIO_JTAG_TMS 19
+#define RT3883_GPIO_JTAG_TCLK 20
+#define RT3883_GPIO_JTAG_TRST_N 21
+#define RT3883_GPIO_MDIO_MDC 22
+#define RT3883_GPIO_MDIO_MDIO 23
+#define RT3883_GPIO_LNA_PE_A0 32
+#define RT3883_GPIO_LNA_PE_A1 33
+#define RT3883_GPIO_LNA_PE_A2 34
+#define RT3883_GPIO_LNA_PE_G0 35
+#define RT3883_GPIO_LNA_PE_G1 36
+#define RT3883_GPIO_LNA_PE_G2 37
+#define RT3883_GPIO_PCI_AD0 40
+#define RT3883_GPIO_PCI_AD31 71
+#define RT3883_GPIO_GE2_TXD0 72
+#define RT3883_GPIO_GE2_TXD1 73
+#define RT3883_GPIO_GE2_TXD2 74
+#define RT3883_GPIO_GE2_TXD3 75
+#define RT3883_GPIO_GE2_TXEN 76
+#define RT3883_GPIO_GE2_TXCLK 77
+#define RT3883_GPIO_GE2_RXD0 78
+#define RT3883_GPIO_GE2_RXD1 79
+#define RT3883_GPIO_GE2_RXD2 80
+#define RT3883_GPIO_GE2_RXD3 81
+#define RT3883_GPIO_GE2_RXDV 82
+#define RT3883_GPIO_GE2_RXCLK 83
+#define RT3883_GPIO_GE1_TXD0 84
+#define RT3883_GPIO_GE1_TXD1 85
+#define RT3883_GPIO_GE1_TXD2 86
+#define RT3883_GPIO_GE1_TXD3 87
+#define RT3883_GPIO_GE1_TXEN 88
+#define RT3883_GPIO_GE1_TXCLK 89
+#define RT3883_GPIO_GE1_RXD0 90
+#define RT3883_GPIO_GE1_RXD1 91
+#define RT3883_GPIO_GE1_RXD2 92
+#define RT3883_GPIO_GE1_RXD3 93
+#define RT3883_GPIO_GE1_RXDV 94
+#define RT3883_GPIO_GE1_RXCLK 95
+
+#define RT3883_RSTCTRL_PCIE_PCI_PDM BIT(27)
+#define RT3883_RSTCTRL_FLASH BIT(26)
+#define RT3883_RSTCTRL_UDEV BIT(25)
+#define RT3883_RSTCTRL_PCI BIT(24)
+#define RT3883_RSTCTRL_PCIE BIT(23)
+#define RT3883_RSTCTRL_UHST BIT(22)
+#define RT3883_RSTCTRL_FE BIT(21)
+#define RT3883_RSTCTRL_WLAN BIT(20)
+#define RT3883_RSTCTRL_UART1 BIT(29)
+#define RT3883_RSTCTRL_SPI BIT(18)
+#define RT3883_RSTCTRL_I2S BIT(17)
+#define RT3883_RSTCTRL_I2C BIT(16)
+#define RT3883_RSTCTRL_NAND BIT(15)
+#define RT3883_RSTCTRL_DMA BIT(14)
+#define RT3883_RSTCTRL_PIO BIT(13)
+#define RT3883_RSTCTRL_UART BIT(12)
+#define RT3883_RSTCTRL_PCM BIT(11)
+#define RT3883_RSTCTRL_MC BIT(10)
+#define RT3883_RSTCTRL_INTC BIT(9)
+#define RT3883_RSTCTRL_TIMER BIT(8)
+#define RT3883_RSTCTRL_SYS BIT(0)
+
+#define RT3883_INTC_INT_SYSCTL BIT(0)
+#define RT3883_INTC_INT_TIMER0 BIT(1)
+#define RT3883_INTC_INT_TIMER1 BIT(2)
+#define RT3883_INTC_INT_IA BIT(3)
+#define RT3883_INTC_INT_PCM BIT(4)
+#define RT3883_INTC_INT_UART0 BIT(5)
+#define RT3883_INTC_INT_PIO BIT(6)
+#define RT3883_INTC_INT_DMA BIT(7)
+#define RT3883_INTC_INT_NAND BIT(8)
+#define RT3883_INTC_INT_PERFC BIT(9)
+#define RT3883_INTC_INT_I2S BIT(10)
+#define RT3883_INTC_INT_UART1 BIT(12)
+#define RT3883_INTC_INT_UHST BIT(18)
+#define RT3883_INTC_INT_UDEV BIT(19)
+
+/* FLASH/SRAM/Codec Controller registers */
+#define RT3883_FSCC_REG_FLASH_CFG0 0x00
+#define RT3883_FSCC_REG_FLASH_CFG1 0x04
+#define RT3883_FSCC_REG_CODEC_CFG0 0x40
+#define RT3883_FSCC_REG_CODEC_CFG1 0x44
+
+#define RT3883_FLASH_CFG_WIDTH_SHIFT 26
+#define RT3883_FLASH_CFG_WIDTH_MASK 0x3
+#define RT3883_FLASH_CFG_WIDTH_8BIT 0x0
+#define RT3883_FLASH_CFG_WIDTH_16BIT 0x1
+#define RT3883_FLASH_CFG_WIDTH_32BIT 0x2
+
+#define RT3883_SDRAM_BASE 0x00000000
+#define RT3883_MEM_SIZE_MIN 2
+#define RT3883_MEM_SIZE_MAX 256
+
+#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644
index 000000000000..181fbf4c976f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
@@ -0,0 +1,55 @@
+/*
+ * Ralink RT3662/RT3883 specific CPU feature overrides
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
+#define _RT3883_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+#define cpu_has_64bit_addresses 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 193c0912d38e..bfbd7035d4c5 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,7 +28,11 @@
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
+#ifdef CONFIG_CPU_MICROMIPS
+#define cpu_has_llsc 0
+#else
#define cpu_has_llsc 1
+#endif
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 44a09a64160a..bd9746fbe4af 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -83,4 +83,7 @@ extern void mips_pcibios_init(void);
#define mips_pcibios_init() do { } while (0)
#endif
+extern void mips_scroll_message(void);
+extern void mips_display_message(const char *str);
+
#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644
index e7aed3e4ff58..000000000000
--- a/arch/mips/include/asm/mips-boards/prom.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * MIPS boards bootprom interface for the Linux kernel.
- *
- */
-
-#ifndef _MIPS_PROM_H
-#define _MIPS_PROM_H
-
-extern char *prom_getcmdline(void);
-extern char *prom_getenv(char *name);
-extern void prom_init_cmdline(void);
-extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
-extern void mips_display_message(const char *str);
-extern void mips_display_word(unsigned int num);
-extern void mips_scroll_message(void);
-extern int get_ethernet_addr(char *ethernet_addr);
-
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS 32
-struct prom_pmemblock {
- unsigned long base; /* Within KSEG0. */
- unsigned int size; /* In bytes. */
- unsigned int type; /* free or prom memory */
-};
-
-#endif /* !(_MIPS_PROM_H) */
diff --git a/arch/mips/include/asm/mips_machine.h b/arch/mips/include/asm/mips_machine.h
index 363bb352c7f7..9d00aebe9842 100644
--- a/arch/mips/include/asm/mips_machine.h
+++ b/arch/mips/include/asm/mips_machine.h
@@ -42,13 +42,9 @@ extern long __mips_machines_end;
#ifdef CONFIG_MIPS_MACHINE
int mips_machtype_setup(char *id) __init;
void mips_machine_setup(void) __init;
-void mips_set_machine_name(const char *name) __init;
-char *mips_get_machine_name(void);
#else
static inline int mips_machtype_setup(char *id) { return 1; }
static inline void mips_machine_setup(void) { }
-static inline void mips_set_machine_name(const char *name) { }
-static inline char *mips_get_machine_name(void) { return NULL; }
#endif /* CONFIG_MIPS_MACHINE */
#endif /* __ASM_MIPS_MACHINE_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0da44d422f5b..87e6207b05e4 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -596,6 +596,7 @@
#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
+#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16)
#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
@@ -623,6 +624,24 @@
#ifndef __ASSEMBLY__
/*
+ * Macros for handling the ISA mode bit for microMIPS.
+ */
+#define get_isa16_mode(x) ((x) & 0x1)
+#define msk_isa16_mode(x) ((x) & ~0x1)
+#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
+
+/*
+ * microMIPS instructions can be 16-bit or 32-bit in length. This
+ * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
+ */
+static inline int mm_insn_16bit(u16 insn)
+{
+ u16 opcode = (insn >> 10) & 0x7;
+
+ return (opcode >= 1 && opcode <= 3) ? 1 : 0;
+}
+
+/*
* Functions to access the R10000 performance counters. These are basically
* mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
* performance counter number encoded into bits 1 ... 5 of the instruction.
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e81d719efcd1..1554721e4808 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -26,10 +26,15 @@
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
- tlbmiss_handler_setup_pgd((unsigned long)(pgd))
-
-extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+do { \
+ void (*tlbmiss_handler_setup_pgd)(unsigned long); \
+ extern u32 tlbmiss_handler_setup_pgd_array[16]; \
+ \
+ tlbmiss_handler_setup_pgd = \
+ (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
+ tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
+} while (0)
#define TLBMISS_HANDLER_SETUP() \
do { \
@@ -62,59 +67,88 @@ extern unsigned long pgd_current[];
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-
-#define ASID_INC 0x40
-#define ASID_MASK 0xfc0
-
-#elif defined(CONFIG_CPU_R8000)
-
-#define ASID_INC 0x10
-#define ASID_MASK 0xff0
-#elif defined(CONFIG_MIPS_MT_SMTC)
-
-#define ASID_INC 0x1
-extern unsigned long smtc_asid_mask;
-#define ASID_MASK (smtc_asid_mask)
-#define HW_ASID_MASK 0xff
-/* End SMTC/34K debug hack */
-#else /* FIXME: not correct for R6000 */
-
-#define ASID_INC 0x1
-#define ASID_MASK 0xff
+#define ASID_INC(asid) \
+({ \
+ unsigned long __asid = asid; \
+ __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
+ ".section\t__asid_inc,\"a\"\n\t" \
+ ".word\t1b\n\t" \
+ ".previous" \
+ :"=r" (__asid) \
+ :"0" (__asid)); \
+ __asid; \
+})
+#define ASID_MASK(asid) \
+({ \
+ unsigned long __asid = asid; \
+ __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
+ ".section\t__asid_mask,\"a\"\n\t" \
+ ".word\t1b\n\t" \
+ ".previous" \
+ :"=r" (__asid) \
+ :"r" (__asid)); \
+ __asid; \
+})
+#define ASID_VERSION_MASK \
+({ \
+ unsigned long __asid; \
+ __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
+ ".section\t__asid_version_mask,\"a\"\n\t" \
+ ".word\t1b\n\t" \
+ ".previous" \
+ :"=r" (__asid)); \
+ __asid; \
+})
+#define ASID_FIRST_VERSION \
+({ \
+ unsigned long __asid = asid; \
+ __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
+ ".section\t__asid_first_version,\"a\"\n\t" \
+ ".word\t1b\n\t" \
+ ".previous" \
+ :"=r" (__asid)); \
+ __asid; \
+})
+
+#define ASID_FIRST_VERSION_R3000 0x1000
+#define ASID_FIRST_VERSION_R4000 0x100
+#define ASID_FIRST_VERSION_R8000 0x1000
+#define ASID_FIRST_VERSION_RM9000 0x1000
+#ifdef CONFIG_MIPS_MT_SMTC
+#define SMTC_HW_ASID_MASK 0xff
+extern unsigned int smtc_asid_mask;
#endif
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
+#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * All unused by hardware upper bits will be considered
- * as a software asid extension.
- */
-#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
-#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
-
#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
+ extern void kvm_local_flush_tlb_all(void);
unsigned long asid = asid_cache(cpu);
- if (! ((asid += ASID_INC) & ASID_MASK) ) {
+ if (!ASID_MASK((asid = ASID_INC(asid)))) {
if (cpu_has_vtag_icache)
flush_icache_all();
+#ifdef CONFIG_VIRTUALIZATION
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
+#else
local_flush_tlb_all(); /* start new asid cycle */
+#endif
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION;
}
+
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
@@ -133,7 +167,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
- for_each_online_cpu(i)
+ for_each_possible_cpu(i)
cpu_context(i, mm) = 0;
return 0;
@@ -166,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* free up the ASID value for use and flush any old
* instances of it from the TLB.
*/
- oldasid = (read_c0_entryhi() & ASID_MASK);
+ oldasid = ASID_MASK(read_c0_entryhi());
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -177,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* having ASID_MASK smaller than the hardware maximum,
* make sure no "soft" bits become "hard"...
*/
- write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+ write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
@@ -230,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
mtflags = dvpe();
- oldasid = read_c0_entryhi() & ASID_MASK;
+ oldasid = ASID_MASK(read_c0_entryhi());
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
- write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
- cpu_asid(cpu, next));
+ write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
+ cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
@@ -275,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
prevvpe = dvpe();
- oldasid = (read_c0_entryhi() & ASID_MASK);
+ oldasid = ASID_MASK(read_c0_entryhi());
if (smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
- write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
+ write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
| cpu_asid(cpu, mm));
ehb(); /* Make sure it propagates to TCStatus */
evpe(prevvpe);
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
index 419d8aef8569..79c7cccdc22c 100644
--- a/arch/mips/include/asm/netlogic/haldefs.h
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -35,42 +35,13 @@
#ifndef __NLM_HAL_HALDEFS_H__
#define __NLM_HAL_HALDEFS_H__
+#include <linux/irqflags.h> /* for local_irq_disable */
+
/*
* This file contains platform specific memory mapped IO implementation
* and will provide a way to read 32/64 bit memory mapped registers in
* all ABIs
*/
-#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
-#error "o32 compile not supported on XLP yet"
-#endif
-/*
- * For o32 compilation, we have to disable interrupts and enable KX bit to
- * access 64 bit addresses or data.
- *
- * We need to disable interrupts because we save just the lower 32 bits of
- * registers in interrupt handling. So if we get hit by an interrupt while
- * using the upper 32 bits of a register, we lose.
- */
-static inline uint32_t nlm_save_flags_kx(void)
-{
- return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
-}
-
-static inline uint32_t nlm_save_flags_cop2(void)
-{
- return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
-}
-
-static inline void nlm_restore_flags(uint32_t sr)
-{
- write_c0_status(sr);
-}
-
-/*
- * The n64 implementations are simple, the o32 implementations when they
- * are added, will have to disable interrupts and enable KX before doing
- * 64 bit ops.
- */
static inline uint32_t
nlm_read_reg(uint64_t base, uint32_t reg)
{
@@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
*addr = val;
}
+/*
+ * For o32 compilation, we have to disable interrupts to access 64 bit
+ * registers
+ *
+ * We need to disable interrupts because we save just the lower 32 bits of
+ * registers in interrupt handling. So if we get hit by an interrupt while
+ * using the upper 32 bits of a register, we lose.
+ */
+
static inline uint64_t
nlm_read_reg64(uint64_t base, uint32_t reg)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
-
- return *ptr;
+ uint64_t val;
+
+ if (sizeof(unsigned long) == 4) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __asm__ __volatile__(
+ ".set push" "\n\t"
+ ".set mips64" "\n\t"
+ "ld %L0, %1" "\n\t"
+ "dsra32 %M0, %L0, 0" "\n\t"
+ "sll %L0, %L0, 0" "\n\t"
+ ".set pop" "\n"
+ : "=r" (val)
+ : "m" (*ptr));
+ local_irq_restore(flags);
+ } else
+ val = *ptr;
+
+ return val;
}
static inline void
@@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
- *ptr = val;
+ if (sizeof(unsigned long) == 4) {
+ unsigned long flags;
+ uint64_t tmp;
+
+ local_irq_save(flags);
+ __asm__ __volatile__(
+ ".set push" "\n\t"
+ ".set mips64" "\n\t"
+ "dsll32 %L0, %L0, 0" "\n\t"
+ "dsrl32 %L0, %L0, 0" "\n\t"
+ "dsll32 %M0, %M0, 0" "\n\t"
+ "or %L0, %L0, %M0" "\n\t"
+ "sd %L0, %2" "\n\t"
+ ".set pop" "\n"
+ : "=r" (tmp)
+ : "0" (val), "m" (*ptr));
+ local_irq_restore(flags);
+ } else
+ *ptr = val;
}
/*
@@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset)
return nlm_io_base + devoffset;
}
-static inline uint64_t
-nlm_xkphys_map_pcibar0(uint64_t pcibase)
-{
- uint64_t paddr;
-
- paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
- return (uint64_t)0x9000000000000000 | paddr;
-}
#elif defined(CONFIG_CPU_XLR)
static inline uint64_t
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 8ad2e0f81719..f299d31d7c1a 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -38,21 +38,16 @@
/*
* XLR and XLP interrupt request and interrupt mask registers
*/
-#define read_c0_eirr() __read_64bit_c0_register($9, 6)
-#define read_c0_eimr() __read_64bit_c0_register($9, 7)
-#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val)
-
/*
- * Writing EIMR in 32 bit is a special case, the lower 8 bit of the
- * EIMR is shadowed in the status register, so we cannot save and
- * restore status register for split read.
+ * NOTE: Do not save/restore flags around write_c0_eimr().
+ * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
+ * register. Restoring flags will overwrite the lower 8 bits of EIMR.
+ *
+ * Call with interrupts disabled.
*/
#define write_c0_eimr(val) \
do { \
if (sizeof(unsigned long) == 4) { \
- unsigned long __flags; \
- \
- local_irq_save(__flags); \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
@@ -62,8 +57,6 @@ do { \
"dmtc0\t%L0, $9, 7\n\t" \
".set\tmips0" \
: : "r" (val)); \
- __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
- local_irq_restore(__flags); \
} else \
__write_64bit_c0_register($9, 7, (val)); \
} while (0)
@@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
uint64_t val;
#ifdef CONFIG_64BIT
- val = read_c0_eimr() & read_c0_eirr();
+ val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
#else
__asm__ __volatile__(
".set push\n\t"
@@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
".set pop"
: "=r" (val));
#endif
-
return val;
}
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index 3df53017fe51..a981f4681a15 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -191,59 +191,6 @@
#define PIC_IRT_PCIE_LINK_2_INDEX 80
#define PIC_IRT_PCIE_LINK_3_INDEX 81
#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
-/* 78 to 81 */
-#define PIC_NUM_NA_IRTS 32
-/* 82 to 113 */
-#define PIC_IRT_NA_0_INDEX 82
-#define PIC_IRT_NA_INDEX(num) ((num) + PIC_IRT_NA_0_INDEX)
-#define PIC_IRT_POE_INDEX 114
-
-#define PIC_NUM_USB_IRTS 6
-#define PIC_IRT_USB_0_INDEX 115
-#define PIC_IRT_EHCI_0_INDEX 115
-#define PIC_IRT_OHCI_0_INDEX 116
-#define PIC_IRT_OHCI_1_INDEX 117
-#define PIC_IRT_EHCI_1_INDEX 118
-#define PIC_IRT_OHCI_2_INDEX 119
-#define PIC_IRT_OHCI_3_INDEX 120
-#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX)
-/* 115 to 120 */
-#define PIC_IRT_GDX_INDEX 121
-#define PIC_IRT_SEC_INDEX 122
-#define PIC_IRT_RSA_INDEX 123
-
-#define PIC_NUM_COMP_IRTS 4
-#define PIC_IRT_COMP_0_INDEX 124
-#define PIC_IRT_COMP_INDEX(num) ((num) + PIC_IRT_COMP_0_INDEX)
-/* 124 to 127 */
-#define PIC_IRT_GBU_INDEX 128
-#define PIC_IRT_ICC_0_INDEX 129 /* ICC - Inter Chip Coherency */
-#define PIC_IRT_ICC_1_INDEX 130
-#define PIC_IRT_ICC_2_INDEX 131
-#define PIC_IRT_CAM_INDEX 132
-#define PIC_IRT_UART_0_INDEX 133
-#define PIC_IRT_UART_1_INDEX 134
-#define PIC_IRT_I2C_0_INDEX 135
-#define PIC_IRT_I2C_1_INDEX 136
-#define PIC_IRT_SYS_0_INDEX 137
-#define PIC_IRT_SYS_1_INDEX 138
-#define PIC_IRT_JTAG_INDEX 139
-#define PIC_IRT_PIC_INDEX 140
-#define PIC_IRT_NBU_INDEX 141
-#define PIC_IRT_TCU_INDEX 142
-#define PIC_IRT_GCU_INDEX 143 /* GBC - Global Coherency */
-#define PIC_IRT_DMC_0_INDEX 144
-#define PIC_IRT_DMC_1_INDEX 145
-
-#define PIC_NUM_GPIO_IRTS 4
-#define PIC_IRT_GPIO_0_INDEX 146
-#define PIC_IRT_GPIO_INDEX(num) ((num) + PIC_IRT_GPIO_0_INDEX)
-
-/* 146 to 149 */
-#define PIC_IRT_NOR_INDEX 150
-#define PIC_IRT_NAND_INDEX 151
-#define PIC_IRT_SPI_INDEX 152
-#define PIC_IRT_MMC_INDEX 153
#define PIC_CLOCK_TIMER 7
#define PIC_IRQ_BASE 8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644
index a9cd350dfb6c..000000000000
--- a/arch/mips/include/asm/netlogic/xlp-hal/usb.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __NLM_HAL_USB_H__
-#define __NLM_HAL_USB_H__
-
-#define USB_CTL_0 0x01
-#define USB_PHY_0 0x0A
-#define USB_PHY_RESET 0x01
-#define USB_PHY_PORT_RESET_0 0x10
-#define USB_PHY_PORT_RESET_1 0x20
-#define USB_CONTROLLER_RESET 0x01
-#define USB_INT_STATUS 0x0E
-#define USB_INT_EN 0x0F
-#define USB_PHY_INTERRUPT_EN 0x01
-#define USB_OHCI_INTERRUPT_EN 0x02
-#define USB_OHCI_INTERRUPT1_EN 0x04
-#define USB_OHCI_INTERRUPT2_EN 0x08
-#define USB_CTRL_INTERRUPT_EN 0x10
-
-#ifndef __ASSEMBLY__
-
-#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_usb_pcibase(node, inst) \
- nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
-#define nlm_get_usb_hcd_base(node, inst) \
- nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
-#define nlm_get_usb_regbase(node, inst) \
- (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
-
-#endif
-#endif /* __NLM_HAL_USB_H__ */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index fdc62fb5630d..8b8f6b393363 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -8,6 +8,7 @@
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
+#include <linux/mm_types.h>
#include <linux/mmzone.h>
#ifdef CONFIG_32BIT
#include <asm/pgtable-32.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 2a5fa7abb346..71686c897dea 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
#define SPECIAL_PAGES_SIZE PAGE_SIZE
#ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE 0x3fff8000UL
+#else
/*
* User space process size: 2GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE 0x7fff8000UL
+#endif
#ifdef __KERNEL__
#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 8808bf548b99..1e7e0961064b 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph);
static inline void device_tree_init(void) { }
#endif /* CONFIG_OF */
+extern char *mips_get_machine_name(void);
+extern void mips_set_machine_name(const char *name);
+
#endif /* __ASM_PROM_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
index 1a2c3025bf28..fdfae43d8b99 100644
--- a/arch/mips/include/asm/sn/sn_private.h
+++ b/arch/mips/include/asm/sn/sn_private.h
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
extern void install_ipi(void);
extern void setup_replication_mask(void);
extern void replicate_kernel_text(void);
-extern pfn_t node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(cnodeid_t);
#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index c4813d67aec3..6d24d4e8b9ed 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -19,7 +19,6 @@ typedef signed char partid_t; /* partition ID type */
typedef signed short moduleid_t; /* user-visible module number type */
typedef signed short cmoduleid_t; /* kernel compact module id type */
typedef unsigned char clusterid_t; /* Clusterid of the cell */
-typedef unsigned long pfn_t;
typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5130c88d6420..78d201fb6c87 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" nop \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[ticket], %[ticket], 0xffff \n"
- " andi %[my_ticket], %[my_ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
@@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" beqz %[my_ticket], 1b \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[ticket], %[ticket], 0xffff \n"
- " andi %[my_ticket], %[my_ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
@@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" srl %[my_ticket], %[ticket], 16 \n"
- " andi %[my_ticket], %[my_ticket], 0xffff \n"
" andi %[now_serving], %[ticket], 0xffff \n"
" bne %[my_ticket], %[now_serving], 3f \n"
" addu %[ticket], %[ticket], %[inc] \n"
@@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" srl %[my_ticket], %[ticket], 16 \n"
- " andi %[my_ticket], %[my_ticket], 0xffff \n"
" andi %[now_serving], %[ticket], 0xffff \n"
" bne %[my_ticket], %[now_serving], 3f \n"
" addu %[ticket], %[ticket], %[inc] \n"
@@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
: "m" (rw->lock)
: "memory");
} else {
- __asm__ __volatile__(
- " .set noreorder # arch_read_lock \n"
- "1: ll %1, %2 \n"
- " bltz %1, 3f \n"
- " addu %1, 1 \n"
- "2: sc %1, %0 \n"
- " beqz %1, 1b \n"
- " nop \n"
- " .subsection 2 \n"
- "3: ll %1, %2 \n"
- " bltz %1, 3b \n"
- " addu %1, 1 \n"
- " b 2b \n"
- " nop \n"
- " .previous \n"
- " .set reorder \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ "1: ll %1, %2 # arch_read_lock \n"
+ " bltz %1, 1b \n"
+ " addu %1, 1 \n"
+ "2: sc %1, %0 \n"
+ : "=m" (rw->lock), "=&r" (tmp)
+ : "m" (rw->lock)
+ : "memory");
+ } while (unlikely(!tmp));
}
smp_llsc_mb();
@@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
: "m" (rw->lock)
: "memory");
} else {
- __asm__ __volatile__(
- " .set noreorder # arch_read_unlock \n"
- "1: ll %1, %2 \n"
- " sub %1, 1 \n"
- " sc %1, %0 \n"
- " beqz %1, 2f \n"
- " nop \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set reorder \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ "1: ll %1, %2 # arch_read_unlock \n"
+ " sub %1, 1 \n"
+ " sc %1, %0 \n"
+ : "=m" (rw->lock), "=&r" (tmp)
+ : "m" (rw->lock)
+ : "memory");
+ } while (unlikely(!tmp));
}
}
@@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
: "m" (rw->lock)
: "memory");
} else {
- __asm__ __volatile__(
- " .set noreorder # arch_write_lock \n"
- "1: ll %1, %2 \n"
- " bnez %1, 3f \n"
- " lui %1, 0x8000 \n"
- "2: sc %1, %0 \n"
- " beqz %1, 3f \n"
- " nop \n"
- " .subsection 2 \n"
- "3: ll %1, %2 \n"
- " bnez %1, 3b \n"
- " lui %1, 0x8000 \n"
- " b 2b \n"
- " nop \n"
- " .previous \n"
- " .set reorder \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ "1: ll %1, %2 # arch_write_lock \n"
+ " bnez %1, 1b \n"
+ " lui %1, 0x8000 \n"
+ "2: sc %1, %0 \n"
+ : "=m" (rw->lock), "=&r" (tmp)
+ : "m" (rw->lock)
+ : "memory");
+ } while (unlikely(!tmp));
}
smp_llsc_mb();
@@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
: "m" (rw->lock)
: "memory");
} else {
- __asm__ __volatile__(
- " .set noreorder # arch_write_trylock \n"
- " li %2, 0 \n"
- "1: ll %1, %3 \n"
- " bnez %1, 2f \n"
- " lui %1, 0x8000 \n"
- " sc %1, %0 \n"
- " beqz %1, 3f \n"
- " li %2, 1 \n"
- "2: \n"
- __WEAK_LLSC_MB
- " .subsection 2 \n"
- "3: b 1b \n"
- " li %2, 0 \n"
- " .previous \n"
- " .set reorder \n"
- : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
- : "m" (rw->lock)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " ll %1, %3 # arch_write_trylock \n"
+ " li %2, 0 \n"
+ " bnez %1, 2f \n"
+ " lui %1, 0x8000 \n"
+ " sc %1, %0 \n"
+ " li %2, 1 \n"
+ "2: \n"
+ : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : "m" (rw->lock)
+ : "memory");
+ } while (unlikely(!tmp));
+
+ smp_llsc_mb();
}
return ret;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index c99384018161..a89d1b10d027 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -139,7 +139,7 @@
1: move ra, k0
li k0, 3
mtc0 k0, $22
-#endif /* CONFIG_CPU_LOONGSON2F */
+#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
lui k1, %hi(kernelsp)
#else
@@ -189,6 +189,7 @@
LONG_S $0, PT_R0(sp)
mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp)
+ LONG_S v1, PT_STATUS(sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Ideally, these instructions would be shuffled in
@@ -200,21 +201,20 @@
LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
- LONG_S $5, PT_R5(sp)
- LONG_S v1, PT_STATUS(sp)
mfc0 v1, CP0_CAUSE
- LONG_S $6, PT_R6(sp)
- LONG_S $7, PT_R7(sp)
+ LONG_S $5, PT_R5(sp)
LONG_S v1, PT_CAUSE(sp)
+ LONG_S $6, PT_R6(sp)
MFC0 v1, CP0_EPC
+ LONG_S $7, PT_R7(sp)
#ifdef CONFIG_64BIT
LONG_S $8, PT_R8(sp)
LONG_S $9, PT_R9(sp)
#endif
+ LONG_S v1, PT_EPC(sp)
LONG_S $25, PT_R25(sp)
LONG_S $28, PT_R28(sp)
LONG_S $31, PT_R31(sp)
- LONG_S v1, PT_EPC(sp)
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 178f7924149a..895320e25662 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -58,8 +58,12 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/* How to get the thread information struct from C. */
-register struct thread_info *__current_thread_info __asm__("$28");
-#define current_thread_info() __current_thread_info
+static inline struct thread_info *current_thread_info(void)
+{
+ register struct thread_info *__current_thread_info __asm__("$28");
+
+ return __current_thread_info;
+}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index debc8009bd58..2d7b9df4542d 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,13 +52,15 @@ extern int (*perf_irq)(void);
*/
extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void);
+extern int smtc_clockevent_init(void);
+extern int gic_clockevent_init(void);
static inline int mips_clockevent_init(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
- extern int smtc_clockevent_init(void);
-
return smtc_clockevent_init();
+#elif defined(CONFIG_CEVT_GIC)
+ return (gic_clockevent_init() | r4k_clockevent_init());
#elif defined(CONFIG_CEVT_R4K)
return r4k_clockevent_init();
#else
@@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void)
/*
* Initialize the count register as a clocksource
*/
-#ifdef CONFIG_CSRC_R4K
extern int init_r4k_clocksource(void);
-#endif
static inline int init_mips_clocksource(void)
{
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bd87e36bf26a..f3fa3750f577 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -23,7 +23,11 @@
*/
#ifdef CONFIG_32BIT
-#define __UA_LIMIT 0x80000000UL
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
#define __UA_ADDR ".word"
#define __UA_LA "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
* address in this range it's the process's problem, not ours :-)
*/
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
+#define USER_DS ((mm_segment_t) { 0xC0000000UL })
+#else
#define KERNEL_DS ((mm_segment_t) { 0UL })
#define USER_DS ((mm_segment_t) { __UA_LIMIT })
+#endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -261,6 +270,7 @@ do { \
__asm__ __volatile__( \
"1: " insn " %1, %3 \n" \
"2: \n" \
+ " .insn \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %4 \n" \
" j 2b \n" \
@@ -287,7 +297,9 @@ do { \
__asm__ __volatile__( \
"1: lw %1, (%3) \n" \
"2: lw %D1, 4(%3) \n" \
- "3: .section .fixup,\"ax\" \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
"4: li %0, %4 \n" \
" move %1, $0 \n" \
" move %D1, $0 \n" \
@@ -355,6 +367,7 @@ do { \
__asm__ __volatile__( \
"1: " insn " %z2, %3 # __put_user_asm\n" \
"2: \n" \
+ " .insn \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %4 \n" \
" j 2b \n" \
@@ -373,6 +386,7 @@ do { \
"1: sw %2, (%3) # __put_user_asm_ll32 \n" \
"2: sw %D2, 4(%3) \n" \
"3: \n" \
+ " .insn \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %4 \n" \
" j 3b \n" \
@@ -524,6 +538,7 @@ do { \
__asm__ __volatile__( \
"1: " insn " %1, %3 \n" \
"2: \n" \
+ " .insn \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %4 \n" \
" j 2b \n" \
@@ -549,7 +564,9 @@ do { \
"1: ulw %1, (%3) \n" \
"2: ulw %D1, 4(%3) \n" \
" move %0, $0 \n" \
- "3: .section .fixup,\"ax\" \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
"4: li %0, %4 \n" \
" move %1, $0 \n" \
" move %D1, $0 \n" \
@@ -616,6 +633,7 @@ do { \
__asm__ __volatile__( \
"1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
"2: \n" \
+ " .insn \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %4 \n" \
" j 2b \n" \
@@ -634,6 +652,7 @@ do { \
"1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
"2: sw %D2, 4(%3) \n" \
"3: \n" \
+ " .insn \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %4 \n" \
" j 3b \n" \
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 058e941626a6..370d967725c2 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -6,7 +6,7 @@
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -22,44 +22,75 @@
#define UASM_EXPORT_SYMBOL(sym)
#endif
+#define _UASM_ISA_CLASSIC 0
+#define _UASM_ISA_MICROMIPS 1
+
+#ifndef UASM_ISA
+#ifdef CONFIG_CPU_MICROMIPS
+#define UASM_ISA _UASM_ISA_MICROMIPS
+#else
+#define UASM_ISA _UASM_ISA_CLASSIC
+#endif
+#endif
+
+#if (UASM_ISA == _UASM_ISA_CLASSIC)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op) CL_uasm_i##op
+#define ISAFUNC(x) CL_##x
+#else
+#define ISAOPC(op) uasm_i##op
+#define ISAFUNC(x) x
+#endif
+#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op) uasm_i##op
+#define ISAFUNC(x) x
+#else
+#define ISAOPC(op) MM_uasm_i##op
+#define ISAFUNC(x) MM_##x
+#endif
+#else
+#error Unsupported micro-assembler ISA!!!
+#endif
+
#define Ip_u1u2u3(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_u2u1s3(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2u1msbu3(op) \
void __uasminit \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
unsigned int d)
#define Ip_u1u2(op) \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
-#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a)
+#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
-#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf)
+#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@ struct uasm_label {
int lab;
};
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+ int lid);
#ifdef CONFIG_64BIT
-int uasm_in_compat_space_p(long addr);
+int ISAFUNC(uasm_in_compat_space_p)(long addr);
#endif
-int uasm_rel_hi(long val);
-int uasm_rel_lo(long val);
-void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+int ISAFUNC(uasm_rel_hi)(long val);
+int ISAFUNC(uasm_rel_lo)(long val);
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \
-static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
{ \
- uasm_build_label(lab, addr, label##lb); \
+ ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
}
/* convenience macros for instructions */
@@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- uasm_i_drotr(p, a1, a2, a3);
+ ISAOPC(_drotr)(p, a1, a2, a3);
else
- uasm_i_drotr32(p, a1, a2, a3 - 32);
+ ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- uasm_i_dsll(p, a1, a2, a3);
+ ISAOPC(_dsll)(p, a1, a2, a3);
else
- uasm_i_dsll32(p, a1, a2, a3 - 32);
+ ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- uasm_i_dsrl(p, a1, a2, a3);
+ ISAOPC(_dsrl)(p, a1, a2, a3);
else
- uasm_i_dsrl32(p, a1, a2, a3 - 32);
+ ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
}
/* Handle relocations. */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4d078815eaa5..0f4aec2ad1e6 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -7,6 +7,7 @@
*
* Copyright (C) 1996, 2000 by Ralf Baechle
* Copyright (C) 2006 by Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
#ifndef _UAPI_ASM_INST_H
#define _UAPI_ASM_INST_H
@@ -193,6 +194,282 @@ enum lx_func {
};
/*
+ * (microMIPS) Major opcodes.
+ */
+enum mm_major_op {
+ mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
+ mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
+ mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
+ mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
+ mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
+ mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+ mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
+ mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
+ mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
+ mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
+ mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
+ mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
+ mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
+ mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
+ mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
+ mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
+};
+
+/*
+ * (microMIPS) POOL32I minor opcodes.
+ */
+enum mm_32i_minor_op {
+ mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
+ mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
+ mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
+ mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
+ mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
+ mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
+ mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
+ mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
+ mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
+};
+
+/*
+ * (microMIPS) POOL32A minor opcodes.
+ */
+enum mm_32a_minor_op {
+ mm_sll32_op = 0x000,
+ mm_ins_op = 0x00c,
+ mm_ext_op = 0x02c,
+ mm_pool32axf_op = 0x03c,
+ mm_srl32_op = 0x040,
+ mm_sra_op = 0x080,
+ mm_rotr_op = 0x0c0,
+ mm_lwxs_op = 0x118,
+ mm_addu32_op = 0x150,
+ mm_subu32_op = 0x1d0,
+ mm_and_op = 0x250,
+ mm_or32_op = 0x290,
+ mm_xor32_op = 0x310,
+};
+
+/*
+ * (microMIPS) POOL32B functions.
+ */
+enum mm_32b_func {
+ mm_lwc2_func = 0x0,
+ mm_lwp_func = 0x1,
+ mm_ldc2_func = 0x2,
+ mm_ldp_func = 0x4,
+ mm_lwm32_func = 0x5,
+ mm_cache_func = 0x6,
+ mm_ldm_func = 0x7,
+ mm_swc2_func = 0x8,
+ mm_swp_func = 0x9,
+ mm_sdc2_func = 0xa,
+ mm_sdp_func = 0xc,
+ mm_swm32_func = 0xd,
+ mm_sdm_func = 0xf,
+};
+
+/*
+ * (microMIPS) POOL32C functions.
+ */
+enum mm_32c_func {
+ mm_pref_func = 0x2,
+ mm_ll_func = 0x3,
+ mm_swr_func = 0x9,
+ mm_sc_func = 0xb,
+ mm_lwu_func = 0xe,
+};
+
+/*
+ * (microMIPS) POOL32AXF minor opcodes.
+ */
+enum mm_32axf_minor_op {
+ mm_mfc0_op = 0x003,
+ mm_mtc0_op = 0x00b,
+ mm_tlbp_op = 0x00d,
+ mm_jalr_op = 0x03c,
+ mm_tlbr_op = 0x04d,
+ mm_jalrhb_op = 0x07c,
+ mm_tlbwi_op = 0x08d,
+ mm_tlbwr_op = 0x0cd,
+ mm_jalrs_op = 0x13c,
+ mm_jalrshb_op = 0x17c,
+ mm_syscall_op = 0x22d,
+ mm_eret_op = 0x3cd,
+};
+
+/*
+ * (microMIPS) POOL32F minor opcodes.
+ */
+enum mm_32f_minor_op {
+ mm_32f_00_op = 0x00,
+ mm_32f_01_op = 0x01,
+ mm_32f_02_op = 0x02,
+ mm_32f_10_op = 0x08,
+ mm_32f_11_op = 0x09,
+ mm_32f_12_op = 0x0a,
+ mm_32f_20_op = 0x10,
+ mm_32f_30_op = 0x18,
+ mm_32f_40_op = 0x20,
+ mm_32f_41_op = 0x21,
+ mm_32f_42_op = 0x22,
+ mm_32f_50_op = 0x28,
+ mm_32f_51_op = 0x29,
+ mm_32f_52_op = 0x2a,
+ mm_32f_60_op = 0x30,
+ mm_32f_70_op = 0x38,
+ mm_32f_73_op = 0x3b,
+ mm_32f_74_op = 0x3c,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_10_minor_op {
+ mm_lwxc1_op = 0x1,
+ mm_swxc1_op,
+ mm_ldxc1_op,
+ mm_sdxc1_op,
+ mm_luxc1_op,
+ mm_suxc1_op,
+};
+
+enum mm_32f_func {
+ mm_lwxc1_func = 0x048,
+ mm_swxc1_func = 0x088,
+ mm_ldxc1_func = 0x0c8,
+ mm_sdxc1_func = 0x108,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_40_minor_op {
+ mm_fmovf_op,
+ mm_fmovt_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_60_minor_op {
+ mm_fadd_op,
+ mm_fsub_op,
+ mm_fmul_op,
+ mm_fdiv_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_70_minor_op {
+ mm_fmovn_op,
+ mm_fmovz_op,
+};
+
+/*
+ * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
+ */
+enum mm_32f_73_minor_op {
+ mm_fmov0_op = 0x01,
+ mm_fcvtl_op = 0x04,
+ mm_movf0_op = 0x05,
+ mm_frsqrt_op = 0x08,
+ mm_ffloorl_op = 0x0c,
+ mm_fabs0_op = 0x0d,
+ mm_fcvtw_op = 0x24,
+ mm_movt0_op = 0x25,
+ mm_fsqrt_op = 0x28,
+ mm_ffloorw_op = 0x2c,
+ mm_fneg0_op = 0x2d,
+ mm_cfc1_op = 0x40,
+ mm_frecip_op = 0x48,
+ mm_fceill_op = 0x4c,
+ mm_fcvtd0_op = 0x4d,
+ mm_ctc1_op = 0x60,
+ mm_fceilw_op = 0x6c,
+ mm_fcvts0_op = 0x6d,
+ mm_mfc1_op = 0x80,
+ mm_fmov1_op = 0x81,
+ mm_movf1_op = 0x85,
+ mm_ftruncl_op = 0x8c,
+ mm_fabs1_op = 0x8d,
+ mm_mtc1_op = 0xa0,
+ mm_movt1_op = 0xa5,
+ mm_ftruncw_op = 0xac,
+ mm_fneg1_op = 0xad,
+ mm_froundl_op = 0xcc,
+ mm_fcvtd1_op = 0xcd,
+ mm_froundw_op = 0xec,
+ mm_fcvts1_op = 0xed,
+};
+
+/*
+ * (microMIPS) POOL16C minor opcodes.
+ */
+enum mm_16c_minor_op {
+ mm_lwm16_op = 0x04,
+ mm_swm16_op = 0x05,
+ mm_jr16_op = 0x18,
+ mm_jrc_op = 0x1a,
+ mm_jalr16_op = 0x1c,
+ mm_jalrs16_op = 0x1e,
+};
+
+/*
+ * (microMIPS) POOL16D minor opcodes.
+ */
+enum mm_16d_minor_op {
+ mm_addius5_func,
+ mm_addiusp_func,
+};
+
+/*
+ * (MIPS16e) opcodes.
+ */
+enum MIPS16e_ops {
+ MIPS16e_jal_op = 003,
+ MIPS16e_ld_op = 007,
+ MIPS16e_i8_op = 014,
+ MIPS16e_sd_op = 017,
+ MIPS16e_lb_op = 020,
+ MIPS16e_lh_op = 021,
+ MIPS16e_lwsp_op = 022,
+ MIPS16e_lw_op = 023,
+ MIPS16e_lbu_op = 024,
+ MIPS16e_lhu_op = 025,
+ MIPS16e_lwpc_op = 026,
+ MIPS16e_lwu_op = 027,
+ MIPS16e_sb_op = 030,
+ MIPS16e_sh_op = 031,
+ MIPS16e_swsp_op = 032,
+ MIPS16e_sw_op = 033,
+ MIPS16e_rr_op = 035,
+ MIPS16e_extend_op = 036,
+ MIPS16e_i64_op = 037,
+};
+
+enum MIPS16e_i64_func {
+ MIPS16e_ldsp_func,
+ MIPS16e_sdsp_func,
+ MIPS16e_sdrasp_func,
+ MIPS16e_dadjsp_func,
+ MIPS16e_ldpc_func,
+};
+
+enum MIPS16e_rr_func {
+ MIPS16e_jr_func,
+};
+
+enum MIPS6e_i8_func {
+ MIPS16e_swrasp_func = 02,
+};
+
+/*
+ * (microMIPS & MIPS16e) NOP instruction.
+ */
+#define MM_NOP16 0x0c00
+
+/*
* Damn ... bitfields depend from byteorder :-(
*/
#ifdef __MIPSEB__
@@ -311,6 +588,262 @@ struct v_format { /* MDMX vector format */
;)))))))
};
+/*
+ * microMIPS instruction formats (32-bit length)
+ *
+ * NOTE:
+ * Parenthesis denote whether the format is a microMIPS instruction or
+ * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
+ */
+struct fb_format { /* FPU branch format (MIPS32) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int bc : 5,
+ BITFIELD_FIELD(unsigned int cc : 3,
+ BITFIELD_FIELD(unsigned int flag : 2,
+ BITFIELD_FIELD(signed int simmediate : 16,
+ ;)))))
+};
+
+struct fp0_format { /* FPU multiply and add format (MIPS32) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int fmt : 5,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int fmt : 3,
+ BITFIELD_FIELD(unsigned int op : 2,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int op : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fmt : 2,
+ BITFIELD_FIELD(unsigned int op : 8,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int cc : 3,
+ BITFIELD_FIELD(unsigned int zero : 2,
+ BITFIELD_FIELD(unsigned int fmt : 2,
+ BITFIELD_FIELD(unsigned int op : 3,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))))
+};
+
+struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fmt : 3,
+ BITFIELD_FIELD(unsigned int op : 7,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp4_format { /* FPU c.cond format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int cc : 3,
+ BITFIELD_FIELD(unsigned int fmt : 3,
+ BITFIELD_FIELD(unsigned int cond : 4,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int index : 5,
+ BITFIELD_FIELD(unsigned int base : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int op : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct fp6_format { /* FPU madd and msub format (MIPS IV) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int fr : 5,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int fr : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_i_format { /* Immediate format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(signed int simmediate : 16,
+ ;))))
+};
+
+struct mm_m_format { /* Multi-word load/store format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int base : 5,
+ BITFIELD_FIELD(unsigned int func : 4,
+ BITFIELD_FIELD(signed int simmediate : 12,
+ ;)))))
+};
+
+struct mm_x_format { /* Scaled indexed load format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int index : 5,
+ BITFIELD_FIELD(unsigned int base : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int func : 11,
+ ;)))))
+};
+
+/*
+ * microMIPS instruction formats (16-bit length)
+ */
+struct mm_b0_format { /* Unconditional branch format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(signed int simmediate : 10,
+ BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;)))
+};
+
+struct mm_b1_format { /* Conditional branch format (microMIPS) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 3,
+ BITFIELD_FIELD(signed int simmediate : 7,
+ BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;))))
+};
+
+struct mm16_m_format { /* Multi-word load/store format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int func : 4,
+ BITFIELD_FIELD(unsigned int rlist : 2,
+ BITFIELD_FIELD(unsigned int imm : 4,
+ BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;)))))
+};
+
+struct mm16_rb_format { /* Signed immediate format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 3,
+ BITFIELD_FIELD(unsigned int base : 3,
+ BITFIELD_FIELD(signed int simmediate : 4,
+ BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;)))))
+};
+
+struct mm16_r3_format { /* Load from global pointer format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 3,
+ BITFIELD_FIELD(signed int simmediate : 7,
+ BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;))))
+};
+
+struct mm16_r5_format { /* Load/store from stack pointer format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(signed int simmediate : 5,
+ BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;))))
+};
+
+/*
+ * MIPS16e instruction formats (16-bit length)
+ */
+struct m16e_rr {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int rx : 3,
+ BITFIELD_FIELD(unsigned int nd : 1,
+ BITFIELD_FIELD(unsigned int l : 1,
+ BITFIELD_FIELD(unsigned int ra : 1,
+ BITFIELD_FIELD(unsigned int func : 5,
+ ;))))))
+};
+
+struct m16e_jal {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int x : 1,
+ BITFIELD_FIELD(unsigned int imm20_16 : 5,
+ BITFIELD_FIELD(signed int imm25_21 : 5,
+ ;))))
+};
+
+struct m16e_i64 {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int func : 3,
+ BITFIELD_FIELD(unsigned int imm : 8,
+ ;)))
+};
+
+struct m16e_ri64 {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int func : 3,
+ BITFIELD_FIELD(unsigned int ry : 3,
+ BITFIELD_FIELD(unsigned int imm : 5,
+ ;))))
+};
+
+struct m16e_ri {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int rx : 3,
+ BITFIELD_FIELD(unsigned int imm : 8,
+ ;)))
+};
+
+struct m16e_rri {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int rx : 3,
+ BITFIELD_FIELD(unsigned int ry : 3,
+ BITFIELD_FIELD(unsigned int imm : 5,
+ ;))))
+};
+
+struct m16e_i8 {
+ BITFIELD_FIELD(unsigned int opcode : 5,
+ BITFIELD_FIELD(unsigned int func : 3,
+ BITFIELD_FIELD(unsigned int imm : 8,
+ ;)))
+};
+
union mips_instruction {
unsigned int word;
unsigned short halfword[2];
@@ -326,6 +859,37 @@ union mips_instruction {
struct b_format b_format;
struct ps_format ps_format;
struct v_format v_format;
+ struct fb_format fb_format;
+ struct fp0_format fp0_format;
+ struct mm_fp0_format mm_fp0_format;
+ struct fp1_format fp1_format;
+ struct mm_fp1_format mm_fp1_format;
+ struct mm_fp2_format mm_fp2_format;
+ struct mm_fp3_format mm_fp3_format;
+ struct mm_fp4_format mm_fp4_format;
+ struct mm_fp5_format mm_fp5_format;
+ struct fp6_format fp6_format;
+ struct mm_fp6_format mm_fp6_format;
+ struct mm_i_format mm_i_format;
+ struct mm_m_format mm_m_format;
+ struct mm_x_format mm_x_format;
+ struct mm_b0_format mm_b0_format;
+ struct mm_b1_format mm_b1_format;
+ struct mm16_m_format mm16_m_format ;
+ struct mm16_rb_format mm16_rb_format;
+ struct mm16_r3_format mm16_r3_format;
+ struct mm16_r5_format mm16_r5_format;
+};
+
+union mips16e_instruction {
+ unsigned int full : 16;
+ struct m16e_rr rr;
+ struct m16e_jal jal;
+ struct m16e_i64 i64;
+ struct m16e_ri64 ri64;
+ struct m16e_ri ri;
+ struct m16e_rri rri;
+ struct m16e_i8 i8;
};
#endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 520a908d45d6..6ad9e04bdf62 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
extra-y := head.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
- ptrace.o reset.o setup.o signal.o syscall.o \
+ prom.o ptrace.o reset.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o watch.o vdso.o
ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
-obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
-obj-$(CONFIG_OF) += prom.o
-
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 50285b2c7ffe..0845091ba480 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -17,6 +17,8 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
+#include <linux/kvm_host.h>
+
void output_ptreg_defines(void)
{
COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
BLANK();
}
#endif
+
+void output_kvm_defines(void)
+{
+ COMMENT(" KVM/MIPS Specfic offsets. ");
+ DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+ OFFSET(VCPU_RUN, kvm_vcpu, run);
+ OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+ OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+ OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+ OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+ OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+ OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+ OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+ OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+ OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+ OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+ OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+ OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+ OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+ OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+ OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+ OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+ OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+ OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+ OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+ OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+ OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+ OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+ OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+ OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+ OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+ OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+ OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+ OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+ OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+ OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+ OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+ OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+ OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+ OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+ OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+ OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+ OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+ OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+ OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+ OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+ OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+ OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+ OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+ OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+ OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+ OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+ OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+ OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+ OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+ OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+ BLANK();
+}
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 556a4357d7fc..97c5a1668e53 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
__res; \
})
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE 0x3fff8000UL
+#else
#define TASK32_SIZE 0x7fff8000UL
+#endif
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 83ffe950f710..46c2ad0703a0 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -14,10 +14,186 @@
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+ unsigned short inst;
+ long epc = regs->cp0_epc;
+
+ /* Calculate exception PC in branch delay slot. */
+ if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+ /* This should never happen because delay slot was checked. */
+ force_sig(SIGSEGV, current);
+ return epc;
+ }
+ if (cpu_has_mips16) {
+ if (((union mips16e_instruction)inst).ri.opcode
+ == MIPS16e_jal_op)
+ epc += 4;
+ else
+ epc += 2;
+ } else if (mm_insn_16bit(inst))
+ epc += 2;
+ else
+ epc += 4;
+
+ return epc;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long contpc;
+ struct mm_decoded_insn mminsn = { 0 };
+
+ mminsn.micro_mips_mode = 1;
+
+ /* This load never faults. */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ mm_isBranchInstr(regs, mminsn, &contpc);
+
+ regs->cp0_epc = contpc;
+
+ return 0;
+
+sigsegv:
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *addr;
+ union mips16e_instruction inst;
+ u16 inst2;
+ u32 fullinst;
+ long epc;
+
+ epc = regs->cp0_epc;
+
+ /* Read the instruction. */
+ addr = (u16 __user *)msk_isa16_mode(epc);
+ if (__get_user(inst.full, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+
+ switch (inst.ri.opcode) {
+ case MIPS16e_extend_op:
+ regs->cp0_epc += 4;
+ return 0;
+
+ /*
+ * JAL and JALX in MIPS16e mode
+ */
+ case MIPS16e_jal_op:
+ addr += 1;
+ if (__get_user(inst2, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+ fullinst = ((unsigned)inst.full << 16) | inst2;
+ regs->regs[31] = epc + 6;
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ /*
+ * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+ *
+ * ......TARGET[15:0].................TARGET[20:16]...........
+ * ......TARGET[25:21]
+ */
+ epc |=
+ ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+ ((fullinst & 0x1f0000) << 7);
+ if (!inst.jal.x)
+ set_isa16_mode(epc); /* Set ISA mode bit. */
+ regs->cp0_epc = epc;
+ return 0;
+
+ /*
+ * J(AL)R(C)
+ */
+ case MIPS16e_rr_op:
+ if (inst.rr.func == MIPS16e_jr_func) {
+
+ if (inst.rr.ra)
+ regs->cp0_epc = regs->regs[31];
+ else
+ regs->cp0_epc =
+ regs->regs[reg16to32[inst.rr.rx]];
+
+ if (inst.rr.l) {
+ if (inst.rr.nd)
+ regs->regs[31] = epc + 2;
+ else
+ regs->regs[31] = epc + 4;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ /*
+ * All other cases have no branch delay slot and are 16-bits.
+ * Branches do not cause an exception.
+ */
+ regs->cp0_epc += 2;
+
+ return 0;
+}
+
/**
* __compute_return_epc_for_insn - Computes the return address and do emulate
* branch simulation, if required.
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
epc <<= 28;
epc |= (insn.j_format.target << 2);
regs->cp0_epc = epc;
+ if (insn.i_format.opcode == jalx_op)
+ set_isa16_mode(regs->cp0_epc);
break;
/*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644
index 000000000000..730eaf92c018
--- /dev/null
+++ b/arch/mips/kernel/cevt-gic.c
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ u64 cnt;
+ int res;
+
+ cnt = gic_read_count();
+ cnt += (u64)delta;
+ gic_write_compare(cnt);
+ res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ gic_write_compare(gic_read_compare());
+ cd = &per_cpu(gic_clockevent_device, cpu);
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+ .handler = gic_compare_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int __cpuinit gic_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+
+ if (!cpu_has_counter || !gic_frequency)
+ return -ENXIO;
+
+ irq = MIPS_GIC_IRQ_BASE;
+
+ cd = &per_cpu(gic_clockevent_device, cpu);
+
+ cd->name = "MIPS GIC";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ clockevent_set_clock(cd, gic_frequency);
+
+ /* Calculate the min / max delta */
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = gic_next_event;
+ cd->set_mode = gic_set_clock_mode;
+ cd->event_handler = gic_event_handler;
+
+ clockevents_register_device(cd);
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+ if (gic_timer_irq_installed)
+ return 0;
+
+ gic_timer_irq_installed = 1;
+
+ setup_irq(irq, &gic_compare_irqaction);
+ irq_set_handler(irq, handle_percpu_irq);
+ return 0;
+}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 07b847d77f5d..02033eaf8825 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -23,7 +23,6 @@
*/
#ifndef CONFIG_MIPS_MT_SMTC
-
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
#ifndef CONFIG_MIPS_MT_SMTC
-
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
+#ifdef CONFIG_CEVT_GIC
+ if (!gic_present)
+#endif
cd->event_handler(cd);
}
@@ -118,6 +119,10 @@ int c0_compare_int_usable(void)
unsigned int delta;
unsigned int cnt;
+#ifdef CONFIG_KVM_GUEST
+ return 1;
+#endif
+
/*
* IP7 already pending? Try to clear it by acking the timer.
*/
@@ -166,7 +171,6 @@ int c0_compare_int_usable(void)
}
#ifndef CONFIG_MIPS_MT_SMTC
-
int __cpuinit r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void)
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
+#ifdef CONFIG_CEVT_GIC
+ if (!gic_present)
+#endif
clockevents_register_device(cd);
if (cp0_timer_irq_installed)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5fe66a0c3224..4bbffdb9024f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_ULRI;
if (config3 & MIPS_CONF3_ISA)
c->options |= MIPS_CPU_MICROMIPS;
+#ifdef CONFIG_CPU_MICROMIPS
+ write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
+#endif
if (config3 & MIPS_CONF3_VZ)
c->ases |= MIPS_ASE_VZ;
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
index 5dca24bce51b..e02620901117 100644
--- a/arch/mips/kernel/csrc-gic.c
+++ b/arch/mips/kernel/csrc-gic.c
@@ -5,23 +5,14 @@
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
-#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/time.h>
-#include <asm/time.h>
#include <asm/gic.h>
static cycle_t gic_hpt_read(struct clocksource *cs)
{
- unsigned int hi, hi2, lo;
-
- do {
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
- } while (hi2 != hi);
-
- return (((cycle_t) hi) << 32) + lo;
+ return gic_read_count();
}
static struct clocksource gic_clocksource = {
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ecb347ce1b3d..5c2ba9f08a80 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -5,8 +5,8 @@
*
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
@@ -21,8 +21,10 @@
#include <asm/war.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_MIPS_MT_SMTC
#define PANIC_PIC(msg) \
- .set push; \
+ .set push; \
+ .set nomicromips; \
.set reorder; \
PTR_LA a0,8f; \
.set noat; \
@@ -31,17 +33,10 @@
9: b 9b; \
.set pop; \
TEXT(msg)
+#endif
__INIT
-NESTED(except_vec0_generic, 0, sp)
- PANIC_PIC("Exception vector 0 called")
- END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
- PANIC_PIC("Exception vector 1 called")
- END(except_vec1_generic)
-
/*
* General exception vector for all other CPUs.
*
@@ -138,12 +133,19 @@ LEAF(r4k_wait)
nop
nop
nop
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+ nop
+ nop
+ nop
+#endif
.set mips3
wait
/* end of rollback region (the region size must be power of two) */
- .set pop
1:
jr ra
+ nop
+ .set pop
END(r4k_wait)
.macro BUILD_ROLLBACK_PROLOGUE handler
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
- j plat_irq_dispatch
+ PTR_LA v0, plat_irq_dispatch
+ jr v0
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(handle_int)
__INIT
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
/*
* EJTAG debug exception handler.
* The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
* unconditional jump to this vector.
*/
NESTED(except_vec_ejtag_debug, 0, sp)
j ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(except_vec_ejtag_debug)
__FINIT
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
FEXPORT(except_vec_vi_mori)
ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */
+ PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
- j except_vec_vi_handler
+ jr v1
FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
*/
NESTED(except_vec_nmi, 0, sp)
j nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(except_vec_nmi)
__FINIT
@@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noreorder
/* check if TLB contains a entry for EPC */
MFC0 k1, CP0_ENTRYHI
- andi k1, 0xff /* ASID_MASK */
+ andi k1, 0xff /* ASID_MASK patched at run-time!! */
MFC0 k0, CP0_EPC
PTR_SRL k0, _PAGE_SHIFT + 1
PTR_SLL k0, _PAGE_SHIFT + 1
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
.set noreorder
- /* 0x7c03e83b: rdhwr v1,$29 */
+ /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
+ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
MFC0 k1, CP0_EPC
- lui k0, 0x7c03
- lw k1, (k1)
- ori k0, 0xe83b
- .set reorder
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+ and k0, k1, 1
+ beqz k0, 1f
+ xor k1, k0
+ lhu k0, (k1)
+ lhu k1, 2(k1)
+ ins k1, k0, 16, 16
+ lui k0, 0x007d
+ b docheck
+ ori k0, 0x6b3c
+1:
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#else
+ andi k0, k1, 1
+ bnez k0, handle_ri
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#endif
+ .set reorder
+docheck:
bne k0, k1, handle_ri /* if not ours */
+
+isrdhwr:
/* The insn is rdhwr. No need to check CAUSE.BD here. */
get_saved_sp /* k1 := current_thread_info */
.set noreorder
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 485e6a961b31..c01b307317a9 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/irq.h>
+#include <linux/clocksource.h>
#include <asm/io.h>
#include <asm/gic.h>
@@ -19,6 +20,8 @@
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
+unsigned int gic_frequency;
+unsigned int gic_present;
unsigned long _gic_base;
unsigned int gic_irq_base;
unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
static struct gic_pending_regs pending_regs[NR_CPUS];
static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+ unsigned int hi, hi2, lo;
+
+ do {
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+ } while (hi2 != hi);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+}
+
+cycle_t gic_read_compare(void)
+{
+ unsigned int hi, lo;
+
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
unsigned int gic_get_timer_pending(void)
{
unsigned int vpe_pending;
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes)
}
}
+unsigned int gic_compare_int(void)
+{
+ unsigned int pending;
+
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+ if (pending & GIC_VPE_PEND_CMP_MSK)
+ return 1;
+ else
+ return 0;
+}
+
unsigned int gic_get_int(void)
{
unsigned int i;
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
index 411a058d2c53..876097529697 100644
--- a/arch/mips/kernel/mips_machine.c
+++ b/arch/mips/kernel/mips_machine.c
@@ -11,9 +11,9 @@
#include <linux/slab.h>
#include <asm/mips_machine.h>
+#include <asm/prom.h>
static struct mips_machine *mips_machine __initdata;
-static char *mips_machine_name = "Unknown";
#define for_each_machine(mach) \
for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown";
(unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
(mach)++)
-__init void mips_set_machine_name(const char *name)
-{
- char *p;
-
- if (name == NULL)
- return;
-
- p = kstrdup(name, GFP_KERNEL);
- if (!p)
- pr_err("MIPS: no memory for machine_name\n");
-
- mips_machine_name = p;
-}
-
-char *mips_get_machine_name(void)
-{
- return mips_machine_name;
-}
-
__init int mips_machtype_setup(char *id)
{
struct mips_machine *mach;
@@ -79,7 +60,6 @@ __init void mips_machine_setup(void)
return;
mips_set_machine_name(mips_machine->mach_name);
- pr_info("MIPS: machine is %s\n", mips_machine_name);
if (mips_machine->mach_setup)
mips_machine->mach_setup();
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 7a54f74b7818..a3e461408b7e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -12,7 +12,7 @@
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
-#include <asm/mips_machine.h>
+#include <asm/prom.h>
unsigned int vced_count, vcei_count;
@@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_vz) seq_printf(m, "%s", " vz");
seq_printf(m, "\n");
+ if (cpu_has_mmips) {
+ seq_printf(m, "micromips kernel\t: %s\n",
+ (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ }
seq_printf(m, "shadow register sets\t: %d\n",
cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index cfc742d75b7f..eb902c1f0cad 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -7,6 +7,7 @@
* Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <linux/sched.h>
@@ -225,34 +226,115 @@ struct mips_frame_info {
static inline int is_ra_save_ins(union mips_instruction *ip)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ union mips_instruction mmi;
+
+ /*
+ * swsp ra,offset
+ * swm16 reglist,offset(sp)
+ * swm32 reglist,offset(sp)
+ * sw32 ra,offset(sp)
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is way more fun...
+ */
+ if (mm_insn_16bit(ip->halfword[0])) {
+ mmi.word = (ip->halfword[0] << 16);
+ return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+ mmi.mm16_r5_format.rt == 31) ||
+ (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+ mmi.mm16_m_format.func == mm_swm16_op));
+ }
+ else {
+ mmi.halfword[0] = ip->halfword[1];
+ mmi.halfword[1] = ip->halfword[0];
+ return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+ mmi.mm_m_format.rd > 9 &&
+ mmi.mm_m_format.base == 29 &&
+ mmi.mm_m_format.func == mm_swm32_func) ||
+ (mmi.i_format.opcode == mm_sw32_op &&
+ mmi.i_format.rs == 29 &&
+ mmi.i_format.rt == 31));
+ }
+#else
/* sw / sd $ra, offset($sp) */
return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
ip->i_format.rs == 29 &&
ip->i_format.rt == 31;
+#endif
}
static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16,jrc,jalr16,jalr16
+ * jal
+ * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+ * jraddiusp - NOT SUPPORTED
+ *
+ * microMIPS is kind of more fun...
+ */
+ union mips_instruction mmi;
+
+ mmi.word = (ip->halfword[0] << 16);
+
+ if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+ (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+ ip->j_format.opcode == mm_jal32_op)
+ return 1;
+ if (ip->r_format.opcode != mm_pool32a_op ||
+ ip->r_format.func != mm_pool32axf_op)
+ return 0;
+ return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+#else
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * addiusp -imm
+ * addius5 sp,-imm
+ * addiu32 sp,sp,-imm
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is not more fun...
+ */
+ if (mm_insn_16bit(ip->halfword[0])) {
+ union mips_instruction mmi;
+
+ mmi.word = (ip->halfword[0] << 16);
+ return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+ mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+ (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+ mmi.mm16_r5_format.rt == 29));
+ }
+ return (ip->mm_i_format.opcode == mm_addiu32_op &&
+ ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
return 1;
+#endif
return 0;
}
static int get_frame_info(struct mips_frame_info *info)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+#else
union mips_instruction *ip = info->func;
+#endif
unsigned max_insns = info->func_size / sizeof(union mips_instruction);
unsigned i;
@@ -272,7 +354,26 @@ static int get_frame_info(struct mips_frame_info *info)
break;
if (!info->frame_size) {
if (is_sp_move_ins(ip))
+ {
+#ifdef CONFIG_CPU_MICROMIPS
+ if (mm_insn_16bit(ip->halfword[0]))
+ {
+ unsigned short tmp;
+
+ if (ip->halfword[0] & mm_addiusp_func)
+ {
+ tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+ info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+ } else {
+ tmp = (ip->halfword[0] >> 1);
+ info->frame_size = -(signed short)(tmp & 0xf);
+ }
+ ip = (void *) &ip->halfword[1];
+ ip--;
+ } else
+#endif
info->frame_size = - ip->i_format.simmediate;
+ }
continue;
}
if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 028f6f837ef9..5712bb532245 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -23,6 +23,23 @@
#include <asm/page.h>
#include <asm/prom.h>
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+ if (name == NULL)
+ return;
+
+ strncpy(mips_machine_name, name, sizeof(mips_machine_name));
+ pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+ return mips_machine_name;
+}
+
+#ifdef CONFIG_OF
int __init early_init_dt_scan_memory_arch(unsigned long node,
const char *uname, int depth,
void *data)
@@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
}
#endif
+int __init early_init_dt_scan_model(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ if (!depth) {
+ char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+ if (model)
+ mips_set_machine_name(model);
+ }
+ return 0;
+}
+
void __init early_init_devtree(void *params)
{
/* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@ void __init early_init_devtree(void *params)
/* Scan memory nodes */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+
+ /* try to load the mips machine name */
+ of_scan_flat_dt(early_init_dt_scan_model, NULL);
}
void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph)
early_init_devtree(initial_boot_params);
}
+#endif
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9ea29649fc28..9b36424b03c5 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -138,9 +138,18 @@ stackargs:
5: jr t1
sw t5, 16(sp) # argument #5 to ksp
+#ifdef CONFIG_CPU_MICROMIPS
sw t8, 28(sp) # argument #8 to ksp
+ nop
sw t7, 24(sp) # argument #7 to ksp
+ nop
sw t6, 20(sp) # argument #6 to ksp
+ nop
+#else
+ sw t8, 28(sp) # argument #8 to ksp
+ sw t7, 24(sp) # argument #7 to ksp
+ sw t6, 20(sp) # argument #6 to ksp
+#endif
6: j stack_done # go back
nop
.set pop
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4c774d5d5087..c7f90519e58c 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -23,6 +23,7 @@
#include <linux/pfn.h>
#include <linux/debugfs.h>
#include <linux/kexec.h>
+#include <linux/sizes.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base);
static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
+static void *detect_magic __initdata = detect_memory_region;
+
void __init add_memory_region(phys_t start, phys_t size, long type)
{
int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
boot_mem_map.nr_map++;
}
+void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+{
+ void *dm = &detect_magic;
+ phys_t size;
+
+ for (size = sz_min; size < sz_max; size <<= 1) {
+ if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+ break;
+ }
+
+ pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+ ((unsigned long long) size) / SZ_1M,
+ (unsigned long long) start,
+ ((unsigned long long) sz_min) / SZ_1M,
+ ((unsigned long long) sz_max) / SZ_1M);
+
+ add_memory_region(start, size, BOOT_MEM_RAM);
+}
+
static void __init print_memory_map(void)
{
int i;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b5e88fd83277..fd3ef2c2afbc 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -35,6 +35,7 @@
#include <asm/war.h>
#include <asm/vdso.h>
#include <asm/dsp.h>
+#include <asm/inst.h>
#include "signal-common.h"
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
sigset_t *oldset = sigmask_to_save();
int ret;
struct mips_abi *abi = current->thread.abi;
+#ifdef CONFIG_CPU_MICROMIPS
+ void *vdso;
+ unsigned int tmp = (unsigned int)current->mm->context.vdso;
+
+ set_isa16_mode(tmp);
+ vdso = (void *)tmp;
+#else
void *vdso = current->mm->context.vdso;
+#endif
if (regs->regs[0]) {
switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index bfede063d96a..3e5164c11cac 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -34,6 +34,7 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
+#include <asm/gic.h>
static void __init smvp_copy_vpe_config(void)
{
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
static void __cpuinit vsmp_init_secondary(void)
{
#ifdef CONFIG_IRQ_GIC
- extern int gic_present;
-
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index aee04af213c5..c17619fe18e3 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
}
struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
{
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 76016ac0a9c8..2866863a39df 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
.text
.align 5
FEXPORT(__smtc_ipi_vector)
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
.set noat
/* Disable thread scheduling to make Status update atomic */
DMT 27 # dmt k1
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 7186222dc5bb..31d22f3121c9 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -111,7 +111,7 @@ static int vpe0limit;
static int ipibuffers;
static int nostlb;
static int asidmask;
-unsigned long smtc_asid_mask = 0xff;
+unsigned int smtc_asid_mask = 0xff;
static int __init vpe0tcs(char *str)
{
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
asid = asid_cache(cpu);
do {
- if (!((asid += ASID_INC) & ASID_MASK) ) {
+ if (!ASID_MASK(ASID_INC(asid))) {
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
mips_ihb();
}
tcstat = read_tc_c0_tcstatus();
- smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
+ smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
if (!prevhalt)
write_tc_c0_tchalt(0);
}
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
asid = ASID_FIRST_VERSION;
local_flush_tlb_all(); /* start new asid cycle */
}
- } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
+ } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
tlb_read();
ehb();
ehi = read_c0_entryhi();
- if ((ehi & ASID_MASK) == asid) {
+ if (ASID_MASK(ehi) == asid) {
/*
* Invalidate only entries with specified ASID,
* makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 25225515451f..77cff1f6d050 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,8 +8,8 @@
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/bug.h>
#include <linux/compiler.h>
@@ -60,9 +60,9 @@ extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
- struct mips_fpu_struct *ctx, int has_fpu,
- void *__user *fault_addr);
-
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs)
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
+/* microMIPS definitions */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR 0x00006b3c
+#define MM_RS 0x001f0000
+#define MM_RT 0x03e00000
+
/*
* The ll_bit is cleared by r*_switch.S
*/
@@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
* Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware.
*/
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
{
struct thread_info *ti = task_thread_info(current);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ switch (rd) {
+ case 0: /* CPU number */
+ regs->regs[rt] = smp_processor_id();
+ return 0;
+ case 1: /* SYNCI length */
+ regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ return 0;
+ case 2: /* Read count register */
+ regs->regs[rt] = read_c0_count();
+ return 0;
+ case 3: /* Count register resolution */
+ switch (current_cpu_data.cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ regs->regs[rt] = 1;
+ break;
+ default:
+ regs->regs[rt] = 2;
+ }
+ return 0;
+ case 29:
+ regs->regs[rt] = ti->tp_value;
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
- 1, regs, 0);
- switch (rd) {
- case 0: /* CPU number */
- regs->regs[rt] = smp_processor_id();
- return 0;
- case 1: /* SYNCI length */
- regs->regs[rt] = min(current_cpu_data.dcache.linesz,
- current_cpu_data.icache.linesz);
- return 0;
- case 2: /* Read count register */
- regs->regs[rt] = read_c0_count();
- return 0;
- case 3: /* Count register resolution */
- switch (current_cpu_data.cputype) {
- case CPU_20KC:
- case CPU_25KF:
- regs->regs[rt] = 1;
- break;
- default:
- regs->regs[rt] = 2;
- }
- return 0;
- case 29:
- regs->regs[rt] = ti->tp_value;
- return 0;
- default:
- return -1;
- }
+
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+{
+ if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+ int rd = (opcode & MM_RS) >> 16;
+ int rt = (opcode & MM_RT) >> 21;
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
}
/* Not ours. */
@@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
force_sig_info(SIGFPE, &info, current);
}
-static int process_fpemu_return(int sig, void __user *fault_addr)
+int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
@@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
asmlinkage void do_bp(struct pt_regs *regs)
{
unsigned int opcode, bcode;
-
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
+ unsigned long epc;
+ u16 instr[2];
+
+ if (get_isa16_mode(regs->cp0_epc)) {
+ /* Calculate EPC. */
+ epc = exception_epc(regs);
+ if (cpu_has_mmips) {
+ if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+ (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ } else {
+ /* MIPS16e mode */
+ if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+ goto out_sigsegv;
+ bcode = (instr[0] >> 6) & 0x3f;
+ do_trap_or_bp(regs, bcode, "Break");
+ return;
+ }
+ } else {
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
+ }
/*
* There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@ out_sigsegv:
asmlinkage void do_tr(struct pt_regs *regs)
{
unsigned int opcode, tcode = 0;
+ u16 instr[2];
+ unsigned long epc = exception_epc(regs);
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
+ if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
+ (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
/* Immediate versions don't provide a code. */
- if (!(opcode & OPCODE))
- tcode = ((opcode >> 6) & ((1 << 10) - 1));
+ if (!(opcode & OPCODE)) {
+ if (get_isa16_mode(regs->cp0_epc))
+ /* microMIPS */
+ tcode = (opcode >> 12) & 0x1f;
+ else
+ tcode = ((opcode >> 6) & ((1 << 10) - 1));
+ }
do_trap_or_bp(regs, tcode, "Trap");
return;
@@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc;
+ unsigned long old31 = regs->regs[31];
unsigned int opcode = 0;
int status = -1;
@@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
if (unlikely(compute_return_epc(regs) < 0))
return;
- if (unlikely(get_user(opcode, epc) < 0))
- status = SIGSEGV;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ unsigned short mmop[2] = { 0 };
- if (!cpu_has_llsc && status < 0)
- status = simulate_llsc(regs, opcode);
+ if (unlikely(get_user(mmop[0], epc) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], epc) < 0))
+ status = SIGSEGV;
+ opcode = (mmop[0] << 16) | mmop[1];
- if (status < 0)
- status = simulate_rdhwr(regs, opcode);
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
+ } else {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
- if (status < 0)
- status = simulate_sync(regs, opcode);
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr_normal(regs, opcode);
+
+ if (status < 0)
+ status = simulate_sync(regs, opcode);
+ }
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
force_sig(status, current);
}
}
@@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
- unsigned long old_epc;
+ unsigned long old_epc, old31;
unsigned int opcode;
unsigned int cpid;
int status;
@@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 0:
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
+ old31 = regs->regs[31];
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
return;
- if (unlikely(get_user(opcode, epc) < 0))
- status = SIGSEGV;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ unsigned short mmop[2] = { 0 };
- if (!cpu_has_llsc && status < 0)
- status = simulate_llsc(regs, opcode);
+ if (unlikely(get_user(mmop[0], epc) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], epc) < 0))
+ status = SIGSEGV;
+ opcode = (mmop[0] << 16) | mmop[1];
- if (status < 0)
- status = simulate_rdhwr(regs, opcode);
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
+ } else {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr_normal(regs, opcode);
+ }
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
force_sig(status, current);
}
@@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void)
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
- unsigned long depc, old_epc;
+ unsigned long depc, old_epc, old_ra;
unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
* calculation.
*/
old_epc = regs->cp0_epc;
+ old_ra = regs->regs[31];
regs->cp0_epc = depc;
- __compute_return_epc(regs);
+ compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
} else
depc += 4;
write_c0_depc(depc);
@@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64];
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
- unsigned long old_handler = exception_handlers[n];
+ unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Only the TLB handlers are cache aligned with an even
+ * address. All other handlers are on an odd address and
+ * require no modification. Otherwise, MIPS32 mode will
+ * be entered when handling any TLB exceptions. That
+ * would be bad...since we must stay in microMIPS mode.
+ */
+ if (!(handler & 0x1))
+ handler |= 1;
+#endif
+ old_handler = xchg(&exception_handlers[n], handler);
- exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+ unsigned long jump_mask = ~((1 << 27) - 1);
+#else
unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr)
return (void *)old_handler;
}
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
- u32 *w;
+ u16 *h;
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
+ BUG_ON((n < 0) && (n > 9));
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
} else
handler = (unsigned long) addr;
- vi_handlers[n] = (unsigned long) addr;
+ vi_handlers[n] = handler;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
@@ -1437,9 +1537,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
- * that does normal register saving and a standard interrupt exit
+ * that does normal register saving and standard interrupt exit
*/
-
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
@@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+ const int mori_offset = &except_vec_vi_mori - vec_start + 2;
+#else
const int mori_offset = &except_vec_vi_mori - vec_start;
+#endif
#endif /* CONFIG_MIPS_MT_SMTC */
- const int handler_len = &except_vec_vi_end - vec_start;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+ const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+ const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+#else
const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start;
+#endif
+ const int handler_len = &except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
/*
@@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
panic("VECTORSPACING too small");
}
- memcpy(b, vec_start, handler_len);
+ set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+ (handler_len - 1));
+#else
+ handler_len);
+#endif
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
- w = (u32 *)(b + mori_offset);
- *w = (*w & 0xffff0000) | (0x100 << n);
+ h = (u16 *)(b + mori_offset);
+ *h = (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
- w = (u32 *)(b + lui_offset);
- *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
- w = (u32 *)(b + ori_offset);
- *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+ h = (u16 *)(b + lui_offset);
+ *h = (handler >> 16) & 0xffff;
+ h = (u16 *)(b + ori_offset);
+ *h = (handler & 0xffff);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
else {
/*
- * In other cases jump directly to the interrupt handler
- *
- * It is the handlers responsibility to save registers if required
- * (eg hi/lo) and return from the exception using "eret"
+ * In other cases jump directly to the interrupt handler. It
+ * is the handler's responsibility to save registers if required
+ * (eg hi/lo) and return from the exception using "eret".
*/
- w = (u32 *)b;
- *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
- *w = 0;
+ u32 insn;
+
+ h = (u16 *)b;
+ /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+ insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+ insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+ h[0] = (insn >> 16) & 0xffff;
+ h[1] = insn & 0xffff;
+ h[2] = 0;
+ h[3] = 0;
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8));
}
@@ -1534,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
+ unsigned long asid = 0;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
@@ -1617,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
}
#endif /* CONFIG_MIPS_MT_SMTC */
- if (!cpu_data[cpu].asid_cache)
- cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+ asid = ASID_FIRST_VERSION;
+ cpu_data[cpu].asid_cache = asid;
+ TLBMISS_HANDLER_SETUP();
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
@@ -1648,7 +1772,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
/* Install CPU exception handler */
void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
memcpy((void *)(ebase + offset), addr, size);
+#endif
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
@@ -1682,8 +1810,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
- extern char except_vec3_generic, except_vec3_r4000;
+ extern char except_vec3_generic;
extern char except_vec4;
+ extern char except_vec3_r4000;
unsigned long i;
int rollback;
@@ -1700,7 +1829,12 @@ void __init trap_init(void)
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
- ebase = CKSEG0;
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0 0x40000000
+ ebase = KVM_GUEST_KSEG0;
+#else
+ ebase = CKSEG0;
+#endif
if (cpu_has_mips_r2)
ebase += (read_c0_ebase() & 0x3ffff000);
}
@@ -1816,11 +1950,11 @@ void __init trap_init(void)
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
- memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
+ set_handler(0x180, &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
- memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
+ set_handler(0x180, &except_vec3_generic, 0x80);
else
- memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
+ set_handler(0x080, &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 6087a54c86a0..203d8857070d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -83,8 +83,12 @@
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
#define STR(x) __STR(x)
#define __STR(x) #x
@@ -102,12 +106,332 @@ static u32 unaligned_action;
#endif
extern void show_registers(struct pt_regs *regs);
+#ifdef __BIG_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwl\t%0, (%2)\n" \
+ "2:\tlwr\t%0, 3(%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwl\t%0, (%2)\n" \
+ "2:\tlwr\t%0, 3(%2)\n\t" \
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, (%2)\n" \
+ "2:\tldr\t%0, 7(%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 1(%2)\n\t" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tswl\t%1,(%2)\n" \
+ "2:\tswr\t%1, 3(%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1,(%2)\n" \
+ "2:\tsdr\t%1, 7(%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlb\t%0, 1(%2)\n" \
+ "2:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwl\t%0, 3(%2)\n" \
+ "2:\tlwr\t%0, (%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 1(%2)\n" \
+ "2:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwl\t%0, 3(%2)\n" \
+ "2:\tlwr\t%0, (%2)\n\t" \
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, 7(%2)\n" \
+ "2:\tldr\t%0, (%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tswl\t%1, 3(%2)\n" \
+ "2:\tswr\t%1, (%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1, 7(%2)\n" \
+ "2:\tsdr\t%1, (%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
+ unsigned long origpc;
+ unsigned long orig31;
+ void __user *fault_addr = NULL;
+
+ origpc = (unsigned long)pc;
+ orig31 = regs->regs[31];
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
- /*
- * These are instructions that a compiler doesn't generate. We
- * can assume therefore that the code is MIPS-aware and
- * really buggy. Emulating these instructions would break the
- * semantics anyway.
- */
+ /*
+ * These are instructions that a compiler doesn't generate. We
+ * can assume therefore that the code is MIPS-aware and
+ * really buggy. Emulating these instructions would break the
+ * semantics anyway.
+ */
case ll_op:
case lld_op:
case sc_op:
case scd_op:
- /*
- * For these instructions the only way to create an address
- * error is an attempted access to kernel/supervisor address
- * space.
- */
+ /*
+ * For these instructions the only way to create an address
+ * error is an attempted access to kernel/supervisor address
+ * space.
+ */
case ldl_op:
case ldr_op:
case lwl_op:
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case sb_op:
goto sigbus;
- /*
- * The remaining opcodes are the ones that are really of interest.
- */
+ /*
+ * The remaining opcodes are the ones that are really of
+ * interest.
+ */
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- __asm__ __volatile__ (".set\tnoat\n"
-#ifdef __BIG_ENDIAN
- "1:\tlb\t%0, 0(%2)\n"
- "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlb\t%0, 1(%2)\n"
- "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
- "sll\t%0, 0x8\n\t"
- "or\t%0, $1\n\t"
- "li\t%1, 0\n"
- "3:\t.set\tat\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadHW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tlwl\t%0, (%2)\n"
- "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlwl\t%0, 3(%2)\n"
- "2:\tlwr\t%0, (%2)\n\t"
-#endif
- "li\t%1, 0\n"
- "3:\t.section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- __asm__ __volatile__ (
- ".set\tnoat\n"
-#ifdef __BIG_ENDIAN
- "1:\tlbu\t%0, 0(%2)\n"
- "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlbu\t%0, 1(%2)\n"
- "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
- "sll\t%0, 0x8\n\t"
- "or\t%0, $1\n\t"
- "li\t%1, 0\n"
- "3:\t.set\tat\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadHWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tlwl\t%0, (%2)\n"
- "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlwl\t%0, 3(%2)\n"
- "2:\tlwr\t%0, (%2)\n\t"
-#endif
- "dsll\t%0, %0, 32\n\t"
- "dsrl\t%0, %0, 32\n\t"
- "li\t%1, 0\n"
- "3:\t.section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tldl\t%0, (%2)\n"
- "2:\tldr\t%0, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tldl\t%0, 7(%2)\n"
- "2:\tldr\t%0, (%2)\n\t"
-#endif
- "li\t%1, 0\n"
- "3:\t.section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
+ compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- ".set\tnoat\n"
- "1:\tsb\t%1, 1(%2)\n\t"
- "srl\t$1, %1, 0x8\n"
- "2:\tsb\t$1, 0(%2)\n\t"
- ".set\tat\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- ".set\tnoat\n"
- "1:\tsb\t%1, 0(%2)\n\t"
- "srl\t$1,%1, 0x8\n"
- "2:\tsb\t$1, 1(%2)\n\t"
- ".set\tat\n\t"
-#endif
- "li\t%0, 0\n"
- "3:\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%0, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=r" (res)
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ StoreHW(addr, value, res);
if (res)
goto fault;
- compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
+ compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tswl\t%1,(%2)\n"
- "2:\tswr\t%1, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tswl\t%1, 3(%2)\n"
- "2:\tswr\t%1, (%2)\n\t"
-#endif
- "li\t%0, 0\n"
- "3:\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%0, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=r" (res)
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ StoreW(addr, value, res);
if (res)
goto fault;
- compute_return_epc(regs);
break;
case sd_op:
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
+ compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tsdl\t%1,(%2)\n"
- "2:\tsdr\t%1, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tsdl\t%1, 7(%2)\n"
- "2:\tsdr\t%1, (%2)\n\t"
-#endif
- "li\t%0, 0\n"
- "3:\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%0, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=r" (res)
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ StoreDW(addr, value, res);
if (res)
goto fault;
- compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case ldc1_op:
case swc1_op:
case sdc1_op:
- /*
- * I herewith declare: this does not happen. So send SIGBUS.
- */
- goto sigbus;
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ BUG_ON(!is_fpu_owner());
+
+ lose_fpu(1); /* Save FPU state for the emulator. */
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* Restore FPU state. */
+
+ /* Signal if something went wrong. */
+ process_fpemu_return(res, fault_addr);
+
+ if (res == 0)
+ break;
+ return;
/*
* COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
return;
fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
@@ -504,10 +673,881 @@ sigbus:
return;
sigill:
- die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL, current);
}
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int i;
+ unsigned int reg = 0, rvar;
+ unsigned long orig31;
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long origpc, contpc;
+ union mips_instruction insn;
+ struct mm_decoded_insn mminsn;
+ void __user *fault_addr = NULL;
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+
+ mminsn.micro_mips_mode = 1;
+
+ /*
+ * This load never faults.
+ */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ insn = (union mips_instruction)(mminsn.insn);
+ if (mm_isBranchInstr(regs, mminsn, &contpc))
+ insn = (union mips_instruction)(mminsn.next_insn);
+
+ /* Parse instruction to find what to do */
+
+ switch (insn.mm_i_format.opcode) {
+
+ case mm_pool32a_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxs_op:
+ reg = insn.mm_x_format.rd;
+ goto loadW;
+ }
+
+ goto sigbus;
+
+ case mm_pool32b_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 4;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+
+ case mm_swp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ value = regs->regs[reg + 1];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+ case mm_ldp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_READ, addr, 16))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 8;
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_WRITE, addr, 16))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ value = regs->regs[reg + 1];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_lwm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_READ, addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+
+ case mm_swm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_WRITE, addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+
+ case mm_ldm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_READ, addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_READ, addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_WRITE, addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ /* LWC2, SWC2, LDC2, SDC2 are not serviced */
+ }
+
+ goto sigbus;
+
+ case mm_pool32c_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwu_func:
+ reg = insn.mm_m_format.rd;
+ goto loadWU;
+ }
+
+ /* LL,SC,LLD,SCD are not serviced */
+ goto sigbus;
+
+ case mm_pool32f_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxc1_func:
+ case mm_swxc1_func:
+ case mm_ldxc1_func:
+ case mm_sdxc1_func:
+ goto fpu_emul;
+ }
+
+ goto sigbus;
+
+ case mm_ldc132_op:
+ case mm_sdc132_op:
+ case mm_lwc132_op:
+ case mm_swc132_op:
+fpu_emul:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ BUG_ON(!is_fpu_owner());
+
+ lose_fpu(1); /* save the FPU state for the emulator */
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* restore FPU state */
+
+ /* If something went wrong, signal */
+ process_fpemu_return(res, fault_addr);
+
+ if (res == 0)
+ goto success;
+ return;
+
+ case mm_lh32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHW;
+
+ case mm_lhu32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHWU;
+
+ case mm_lw32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadW;
+
+ case mm_sh32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeHW;
+
+ case mm_sw32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeW;
+
+ case mm_ld32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadDW;
+
+ case mm_sd32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeDW;
+
+ case mm_pool16c_op:
+ switch (insn.mm16_m_format.func) {
+ case mm_lwm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+
+ goto success;
+
+ case mm_swm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+
+ goto success;
+
+ }
+
+ goto sigbus;
+
+ case mm_lhu16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadHWU;
+
+ case mm_lw16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadW;
+
+ case mm_sh16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeHW;
+
+ case mm_sw16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeW;
+
+ case mm_lwsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto loadW;
+
+ case mm_swsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto storeW;
+
+ case mm_lwgp16_op:
+ reg = reg16to32[insn.mm16_r3_format.rt];
+ goto loadW;
+
+ default:
+ goto sigill;
+ }
+
+loadHW:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadHWU:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadW:
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+storeHW:
+ if (!access_ok(VERIFY_WRITE, addr, 2))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeW:
+ if (!access_ok(VERIFY_WRITE, addr, 4))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+success:
+ regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV, current);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS, current);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL, current);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int reg;
+ unsigned long orig31;
+ u16 __user *pc16;
+ unsigned long origpc;
+ union mips16e_instruction mips16inst, oldinst;
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+ pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+ /*
+ * This load never faults.
+ */
+ __get_user(mips16inst.full, pc16);
+ oldinst = mips16inst;
+
+ /* skip EXTEND instruction */
+ if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+ pc16++;
+ __get_user(mips16inst.full, pc16);
+ } else if (delay_slot(regs)) {
+ /* skip jump instructions */
+ /* JAL/JALX are 32 bits but have OPCODE in first short int */
+ if (mips16inst.ri.opcode == MIPS16e_jal_op)
+ pc16++;
+ pc16++;
+ if (get_user(mips16inst.full, pc16))
+ goto sigbus;
+ }
+
+ switch (mips16inst.ri.opcode) {
+ case MIPS16e_i64_op: /* I64 or RI64 instruction */
+ switch (mips16inst.i64.func) { /* I64/RI64 func field check */
+ case MIPS16e_ldpc_func:
+ case MIPS16e_ldsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto loadDW;
+
+ case MIPS16e_sdsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto writeDW;
+
+ case MIPS16e_sdrasp_func:
+ reg = 29; /* GPRSP */
+ goto writeDW;
+ }
+
+ goto sigbus;
+
+ case MIPS16e_swsp_op:
+ case MIPS16e_lwpc_op:
+ case MIPS16e_lwsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ break;
+
+ case MIPS16e_i8_op:
+ if (mips16inst.i8.func != MIPS16e_swrasp_func)
+ goto sigbus;
+ reg = 29; /* GPRSP */
+ break;
+
+ default:
+ reg = reg16to32[mips16inst.rri.ry];
+ break;
+ }
+
+ switch (mips16inst.ri.opcode) {
+
+ case MIPS16e_lb_op:
+ case MIPS16e_lbu_op:
+ case MIPS16e_sb_op:
+ goto sigbus;
+
+ case MIPS16e_lh_op:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lhu_op:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lw_op:
+ case MIPS16e_lwpc_op:
+ case MIPS16e_lwsp_op:
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_sh_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sw_op:
+ case MIPS16e_swsp_op:
+ case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
+ if (!access_ok(VERIFY_WRITE, addr, 4))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ default:
+ /*
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
+ */
+ goto sigill;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV, current);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS, current);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL, current);
+}
asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned int __user *pc;
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs)
1, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
- * Or are we running in MIPS16 mode?
*/
- if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
+ if (regs->cp0_badvaddr == regs->cp0_epc)
goto sigbus;
- pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
- else if (unaligned_action == UNALIGNED_ACTION_SHOW)
- show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
+
+ /*
+ * Are we running in microMIPS mode?
+ */
+ if (get_isa16_mode(regs->cp0_epc)) {
+ /*
+ * Did we catch a fault trying to load an instruction in
+ * 16-bit mode?
+ */
+ if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+ goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+
+ if (cpu_has_mmips) {
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_microMIPS(regs,
+ (void __user *)regs->cp0_badvaddr);
+ set_fs(seg);
+
+ return;
+ }
+
+ if (cpu_has_mips16) {
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_MIPS16e(regs,
+ (void __user *)regs->cp0_badvaddr);
+ set_fs(seg);
+
+ return;
+ }
+
+ goto sigbus;
+ }
+
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+ pc = (unsigned int __user *)exception_epc(regs);
+
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 000000000000..51617e481aa3
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+ Malta Board with FPGA based 34K
+ Sigma Designs TangoX board with a 24K based 8654 SoC.
+ Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+ Guest User address space: 0x00000000 -> 0x40000000
+ Guest Kernel Unmapped: 0x40000000 -> 0x60000000
+ Guest Kernel Mapped: 0x60000000 -> 0x80000000
+
+ Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+ Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+ Both the host kernel and Guest kernel should have the page size set to 16K.
+ This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+ Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+ LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
+ when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+ This will be fixed in a future release.
+
+(5) Use Host FPU
+ Currently KVM/MIPS emulates a 24K CPU without a FPU.
+ This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 000000000000..2c15590e55f7
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ depends on HAVE_KVM
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ select KVM_MMIO
+ ---help---
+ Support for hosting Guest kernels.
+ Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+ bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+ depends on KVM
+ ---help---
+ When running in Trap & Emulate mode patch privileged
+ instructions to reduce the number of traps.
+
+ If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+ bool "Maintain counters for COP0 accesses"
+ depends on KVM
+ ---help---
+ Maintain statistics for Guest COP0 accesses.
+ A histogram of COP0 accesses is printed when the VM is
+ shutdown.
+
+ If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 000000000000..78d87bbc99db
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+ kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+ kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM) += kvm.o
+obj-y += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 000000000000..313c2e37b978
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 000000000000..dca2aa665993
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x) x
+#define MIPSX(name) mips32_ ## name
+#define CALLFRAME_SIZ 32
+
+/*
+ * VECTOR
+ * exception vector entrypoint
+ */
+#define VECTOR(x, regmask) \
+ .ent _C_LABEL(x),0; \
+ EXPORT(x);
+
+#define VECTOR_END(x) \
+ EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID PT_BVADDR
+#define PT_HOST_USERLOCAL PT_EPC
+
+#define CP0_DDATA_LO $28,3
+#define CP0_EBASE $15,1
+
+#define CP0_INTCTL $12,1
+#define CP0_SRSCTL $12,2
+#define CP0_SRSMAP $12,3
+#define CP0_HWRENA $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_HOST RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+ .set push
+ .set noreorder
+ .set noat
+
+ /* k0/k1 not being used in host kernel context */
+ addiu k1,sp, -PT_SIZE
+ LONG_S $0, PT_R0(k1)
+ LONG_S $1, PT_R1(k1)
+ LONG_S $2, PT_R2(k1)
+ LONG_S $3, PT_R3(k1)
+
+ LONG_S $4, PT_R4(k1)
+ LONG_S $5, PT_R5(k1)
+ LONG_S $6, PT_R6(k1)
+ LONG_S $7, PT_R7(k1)
+
+ LONG_S $8, PT_R8(k1)
+ LONG_S $9, PT_R9(k1)
+ LONG_S $10, PT_R10(k1)
+ LONG_S $11, PT_R11(k1)
+ LONG_S $12, PT_R12(k1)
+ LONG_S $13, PT_R13(k1)
+ LONG_S $14, PT_R14(k1)
+ LONG_S $15, PT_R15(k1)
+ LONG_S $16, PT_R16(k1)
+ LONG_S $17, PT_R17(k1)
+
+ LONG_S $18, PT_R18(k1)
+ LONG_S $19, PT_R19(k1)
+ LONG_S $20, PT_R20(k1)
+ LONG_S $21, PT_R21(k1)
+ LONG_S $22, PT_R22(k1)
+ LONG_S $23, PT_R23(k1)
+ LONG_S $24, PT_R24(k1)
+ LONG_S $25, PT_R25(k1)
+
+ /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+ LONG_S $28, PT_R28(k1)
+ LONG_S $29, PT_R29(k1)
+ LONG_S $30, PT_R30(k1)
+ LONG_S $31, PT_R31(k1)
+
+ /* Save hi/lo */
+ mflo v0
+ LONG_S v0, PT_LO(k1)
+ mfhi v1
+ LONG_S v1, PT_HI(k1)
+
+ /* Save host status */
+ mfc0 v0, CP0_STATUS
+ LONG_S v0, PT_STATUS(k1)
+
+ /* Save host ASID, shove it into the BVADDR location */
+ mfc0 v1,CP0_ENTRYHI
+ andi v1, 0xff
+ LONG_S v1, PT_HOST_ASID(k1)
+
+ /* Save DDATA_LO, will be used to store pointer to vcpu */
+ mfc0 v1, CP0_DDATA_LO
+ LONG_S v1, PT_HOST_USERLOCAL(k1)
+
+ /* DDATA_LO has pointer to vcpu */
+ mtc0 a1,CP0_DDATA_LO
+
+ /* Offset into vcpu->arch */
+ addiu k1, a1, VCPU_HOST_ARCH
+
+ /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+ LONG_S sp, VCPU_HOST_STACK(k1)
+
+ /* Save the kernel gp as well */
+ LONG_S gp, VCPU_HOST_GP(k1)
+
+ /* Setup status register for running the guest in UM, interrupts are disabled */
+ li k0,(ST0_EXL | KSU_USER| ST0_BEV)
+ mtc0 k0,CP0_STATUS
+ ehb
+
+ /* load up the new EBASE */
+ LONG_L k0, VCPU_GUEST_EBASE(k1)
+ mtc0 k0,CP0_EBASE
+
+ /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+ * but make sure that timer interrupts are enabled
+ */
+ li k0,(ST0_EXL | KSU_USER | ST0_IE)
+ andi v0, v0, ST0_IM
+ or k0, k0, v0
+ mtc0 k0,CP0_STATUS
+ ehb
+
+
+ /* Set Guest EPC */
+ LONG_L t0, VCPU_PC(k1)
+ mtc0 t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+ /* Set the ASID for the Guest Kernel */
+ sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
+ /* addresses shift to 0x80000000 */
+ bltz t0, 1f /* If kernel */
+ addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
+ addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
+1:
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ LONG_L t2, TI_CPU($28) /* smp_processor_id */
+ sll t2, t2, 2 /* x4 */
+ addu t3, t1, t2
+ LONG_L k0, (t3)
+ andi k0, k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Disable RDHWR access */
+ mtc0 zero, CP0_HWRENA
+
+ /* Now load up the Guest Context from VCPU */
+ LONG_L $1, VCPU_R1(k1)
+ LONG_L $2, VCPU_R2(k1)
+ LONG_L $3, VCPU_R3(k1)
+
+ LONG_L $4, VCPU_R4(k1)
+ LONG_L $5, VCPU_R5(k1)
+ LONG_L $6, VCPU_R6(k1)
+ LONG_L $7, VCPU_R7(k1)
+
+ LONG_L $8, VCPU_R8(k1)
+ LONG_L $9, VCPU_R9(k1)
+ LONG_L $10, VCPU_R10(k1)
+ LONG_L $11, VCPU_R11(k1)
+ LONG_L $12, VCPU_R12(k1)
+ LONG_L $13, VCPU_R13(k1)
+ LONG_L $14, VCPU_R14(k1)
+ LONG_L $15, VCPU_R15(k1)
+ LONG_L $16, VCPU_R16(k1)
+ LONG_L $17, VCPU_R17(k1)
+ LONG_L $18, VCPU_R18(k1)
+ LONG_L $19, VCPU_R19(k1)
+ LONG_L $20, VCPU_R20(k1)
+ LONG_L $21, VCPU_R21(k1)
+ LONG_L $22, VCPU_R22(k1)
+ LONG_L $23, VCPU_R23(k1)
+ LONG_L $24, VCPU_R24(k1)
+ LONG_L $25, VCPU_R25(k1)
+
+ /* k0/k1 loaded up later */
+
+ LONG_L $28, VCPU_R28(k1)
+ LONG_L $29, VCPU_R29(k1)
+ LONG_L $30, VCPU_R30(k1)
+ LONG_L $31, VCPU_R31(k1)
+
+ /* Restore hi/lo */
+ LONG_L k0, VCPU_LO(k1)
+ mtlo k0
+
+ LONG_L k0, VCPU_HI(k1)
+ mthi k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+ /* Restore the guest's k0/k1 registers */
+ LONG_L k0, VCPU_R26(k1)
+ LONG_L k1, VCPU_R27(k1)
+
+ /* Jump to guest */
+ eret
+ .set pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+ .set push
+ .set noat
+ .set noreorder
+ mtc0 k0, CP0_ERROREPC #01: Save guest k0
+ ehb #02:
+
+ mfc0 k0, CP0_EBASE #02: Get EBASE
+ srl k0, k0, 10 #03: Get rid of CPUNum
+ sll k0, k0, 10 #04
+ LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
+ addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
+ j k0 #07: jump to the function
+ nop #08: branch delay slot
+ .set push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+ .set push
+ .set noat
+ .set noreorder
+
+ /* Get the VCPU pointer from DDTATA_LO */
+ mfc0 k1, CP0_DDATA_LO
+ addiu k1, k1, VCPU_HOST_ARCH
+
+ /* Start saving Guest context to VCPU */
+ LONG_S $0, VCPU_R0(k1)
+ LONG_S $1, VCPU_R1(k1)
+ LONG_S $2, VCPU_R2(k1)
+ LONG_S $3, VCPU_R3(k1)
+ LONG_S $4, VCPU_R4(k1)
+ LONG_S $5, VCPU_R5(k1)
+ LONG_S $6, VCPU_R6(k1)
+ LONG_S $7, VCPU_R7(k1)
+ LONG_S $8, VCPU_R8(k1)
+ LONG_S $9, VCPU_R9(k1)
+ LONG_S $10, VCPU_R10(k1)
+ LONG_S $11, VCPU_R11(k1)
+ LONG_S $12, VCPU_R12(k1)
+ LONG_S $13, VCPU_R13(k1)
+ LONG_S $14, VCPU_R14(k1)
+ LONG_S $15, VCPU_R15(k1)
+ LONG_S $16, VCPU_R16(k1)
+ LONG_S $17,VCPU_R17(k1)
+ LONG_S $18, VCPU_R18(k1)
+ LONG_S $19, VCPU_R19(k1)
+ LONG_S $20, VCPU_R20(k1)
+ LONG_S $21, VCPU_R21(k1)
+ LONG_S $22, VCPU_R22(k1)
+ LONG_S $23, VCPU_R23(k1)
+ LONG_S $24, VCPU_R24(k1)
+ LONG_S $25, VCPU_R25(k1)
+
+ /* Guest k0/k1 saved later */
+
+ LONG_S $28, VCPU_R28(k1)
+ LONG_S $29, VCPU_R29(k1)
+ LONG_S $30, VCPU_R30(k1)
+ LONG_S $31, VCPU_R31(k1)
+
+ /* We need to save hi/lo and restore them on
+ * the way out
+ */
+ mfhi t0
+ LONG_S t0, VCPU_HI(k1)
+
+ mflo t0
+ LONG_S t0, VCPU_LO(k1)
+
+ /* Finally save guest k0/k1 to VCPU */
+ mfc0 t0, CP0_ERROREPC
+ LONG_S t0, VCPU_R26(k1)
+
+ /* Get GUEST k1 and save it in VCPU */
+ la t1, ~0x2ff
+ mfc0 t0, CP0_EBASE
+ and t0, t0, t1
+ LONG_L t0, 0x3000(t0)
+ LONG_S t0, VCPU_R27(k1)
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ mfc0 a1, CP0_DDATA_LO
+ move s1, a1
+
+ /* Restore run (vcpu->run) */
+ LONG_L a0, VCPU_RUN(a1)
+ /* Save pointer to run in s0, will be saved by the compiler */
+ move s0, a0
+
+
+ /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+ mfc0 k0,CP0_EPC
+ LONG_S k0, VCPU_PC(k1)
+
+ mfc0 k0, CP0_BADVADDR
+ LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+ mfc0 k0, CP0_CAUSE
+ LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
+
+ mfc0 k0, CP0_ENTRYHI
+ LONG_S k0, VCPU_HOST_ENTRYHI(k1)
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Swtich EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ mfc0 v0, CP0_STATUS
+
+ .set at
+ or k0, v0, ST0_BEV
+ .set noat
+
+ mtc0 k0, CP0_STATUS
+ ehb
+
+ LONG_L k0, VCPU_HOST_EBASE(k1)
+ mtc0 k0,CP0_EBASE
+
+
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ .set at
+ and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+ or v0, v0, ST0_CU0
+ .set noat
+ mtc0 v0, CP0_STATUS
+ ehb
+
+ /* Load up host GP */
+ LONG_L gp, VCPU_HOST_GP(k1)
+
+ /* Need a stack before we can jump to "C" */
+ LONG_L sp, VCPU_HOST_STACK(k1)
+
+ /* Saved host state */
+ addiu sp,sp, -PT_SIZE
+
+ /* XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
+
+ /* Restore host DDATA_LO */
+ LONG_L k0, PT_HOST_USERLOCAL(sp)
+ mtc0 k0, CP0_DDATA_LO
+
+ /* Restore RDHWR access */
+ la k0, 0x2000000F
+ mtc0 k0, CP0_HWRENA
+
+ /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+ /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+ /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+ la t9,kvm_mips_handle_exit
+ jalr.hb t9
+ addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
+
+ /* Return from handler Make sure interrupts are disabled */
+ di
+ ehb
+
+ /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+ * while we were handling the exception from the guest, reload k1
+ */
+ move k1, s1
+ addiu k1, k1, VCPU_HOST_ARCH
+
+ /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+ * or resuming the guest
+ */
+ andi t0, v0, RESUME_HOST
+ bnez t0, __kvm_mips_return_to_host
+ nop
+
+__kvm_mips_return_to_guest:
+ /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+ mtc0 s1, CP0_DDATA_LO
+
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ LONG_L t0, VCPU_GUEST_EBASE(k1)
+
+ /* Switch EBASE back to the one used by KVM */
+ mfc0 v1, CP0_STATUS
+ .set at
+ or k0, v1, ST0_BEV
+ .set noat
+ mtc0 k0, CP0_STATUS
+ ehb
+ mtc0 t0,CP0_EBASE
+
+ /* Setup status register for running guest in UM */
+ .set at
+ or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+ and v1, v1, ~ST0_CU0
+ .set noat
+ mtc0 v1, CP0_STATUS
+ ehb
+
+
+ /* Set Guest EPC */
+ LONG_L t0, VCPU_PC(k1)
+ mtc0 t0, CP0_EPC
+
+ /* Set the ASID for the Guest Kernel */
+ sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
+ /* addresses shift to 0x80000000 */
+ bltz t0, 1f /* If kernel */
+ addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
+ addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
+1:
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ LONG_L t2, TI_CPU($28) /* smp_processor_id */
+ sll t2, t2, 2 /* x4 */
+ addu t3, t1, t2
+ LONG_L k0, (t3)
+ andi k0, k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Disable RDHWR access */
+ mtc0 zero, CP0_HWRENA
+
+ /* load the guest context from VCPU and return */
+ LONG_L $0, VCPU_R0(k1)
+ LONG_L $1, VCPU_R1(k1)
+ LONG_L $2, VCPU_R2(k1)
+ LONG_L $3, VCPU_R3(k1)
+ LONG_L $4, VCPU_R4(k1)
+ LONG_L $5, VCPU_R5(k1)
+ LONG_L $6, VCPU_R6(k1)
+ LONG_L $7, VCPU_R7(k1)
+ LONG_L $8, VCPU_R8(k1)
+ LONG_L $9, VCPU_R9(k1)
+ LONG_L $10, VCPU_R10(k1)
+ LONG_L $11, VCPU_R11(k1)
+ LONG_L $12, VCPU_R12(k1)
+ LONG_L $13, VCPU_R13(k1)
+ LONG_L $14, VCPU_R14(k1)
+ LONG_L $15, VCPU_R15(k1)
+ LONG_L $16, VCPU_R16(k1)
+ LONG_L $17, VCPU_R17(k1)
+ LONG_L $18, VCPU_R18(k1)
+ LONG_L $19, VCPU_R19(k1)
+ LONG_L $20, VCPU_R20(k1)
+ LONG_L $21, VCPU_R21(k1)
+ LONG_L $22, VCPU_R22(k1)
+ LONG_L $23, VCPU_R23(k1)
+ LONG_L $24, VCPU_R24(k1)
+ LONG_L $25, VCPU_R25(k1)
+
+ /* $/k1 loaded later */
+ LONG_L $28, VCPU_R28(k1)
+ LONG_L $29, VCPU_R29(k1)
+ LONG_L $30, VCPU_R30(k1)
+ LONG_L $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+ LONG_L k0, VCPU_HI(k1)
+ mthi k0
+
+ LONG_L k0, VCPU_LO(k1)
+ mtlo k0
+
+ LONG_L k0, VCPU_R26(k1)
+ LONG_L k1, VCPU_R27(k1)
+
+ eret
+
+__kvm_mips_return_to_host:
+ /* EBASE is already pointing to Linux */
+ LONG_L k1, VCPU_HOST_STACK(k1)
+ addiu k1,k1, -PT_SIZE
+
+ /* Restore host DDATA_LO */
+ LONG_L k0, PT_HOST_USERLOCAL(k1)
+ mtc0 k0, CP0_DDATA_LO
+
+ /* Restore host ASID */
+ LONG_L k0, PT_HOST_ASID(sp)
+ andi k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Load context saved on the host stack */
+ LONG_L $0, PT_R0(k1)
+ LONG_L $1, PT_R1(k1)
+
+ /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
+ sra k0, v0, 2
+ move $2, k0
+
+ LONG_L $3, PT_R3(k1)
+ LONG_L $4, PT_R4(k1)
+ LONG_L $5, PT_R5(k1)
+ LONG_L $6, PT_R6(k1)
+ LONG_L $7, PT_R7(k1)
+ LONG_L $8, PT_R8(k1)
+ LONG_L $9, PT_R9(k1)
+ LONG_L $10, PT_R10(k1)
+ LONG_L $11, PT_R11(k1)
+ LONG_L $12, PT_R12(k1)
+ LONG_L $13, PT_R13(k1)
+ LONG_L $14, PT_R14(k1)
+ LONG_L $15, PT_R15(k1)
+ LONG_L $16, PT_R16(k1)
+ LONG_L $17, PT_R17(k1)
+ LONG_L $18, PT_R18(k1)
+ LONG_L $19, PT_R19(k1)
+ LONG_L $20, PT_R20(k1)
+ LONG_L $21, PT_R21(k1)
+ LONG_L $22, PT_R22(k1)
+ LONG_L $23, PT_R23(k1)
+ LONG_L $24, PT_R24(k1)
+ LONG_L $25, PT_R25(k1)
+
+ /* Host k0/k1 were not saved */
+
+ LONG_L $28, PT_R28(k1)
+ LONG_L $29, PT_R29(k1)
+ LONG_L $30, PT_R30(k1)
+
+ LONG_L k0, PT_HI(k1)
+ mthi k0
+
+ LONG_L k0, PT_LO(k1)
+ mtlo k0
+
+ /* Restore RDHWR access */
+ la k0, 0x2000000F
+ mtc0 k0, CP0_HWRENA
+
+
+ /* Restore RA, which is the address we will return to */
+ LONG_L ra, PT_R31(k1)
+ j ra
+ nop
+
+ .set pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+ ####
+ ##### The exception handlers.
+ #####
+ .word _C_LABEL(MIPSX(GuestException)) # 0
+ .word _C_LABEL(MIPSX(GuestException)) # 1
+ .word _C_LABEL(MIPSX(GuestException)) # 2
+ .word _C_LABEL(MIPSX(GuestException)) # 3
+ .word _C_LABEL(MIPSX(GuestException)) # 4
+ .word _C_LABEL(MIPSX(GuestException)) # 5
+ .word _C_LABEL(MIPSX(GuestException)) # 6
+ .word _C_LABEL(MIPSX(GuestException)) # 7
+ .word _C_LABEL(MIPSX(GuestException)) # 8
+ .word _C_LABEL(MIPSX(GuestException)) # 9
+ .word _C_LABEL(MIPSX(GuestException)) # 10
+ .word _C_LABEL(MIPSX(GuestException)) # 11
+ .word _C_LABEL(MIPSX(GuestException)) # 12
+ .word _C_LABEL(MIPSX(GuestException)) # 13
+ .word _C_LABEL(MIPSX(GuestException)) # 14
+ .word _C_LABEL(MIPSX(GuestException)) # 15
+ .word _C_LABEL(MIPSX(GuestException)) # 16
+ .word _C_LABEL(MIPSX(GuestException)) # 17
+ .word _C_LABEL(MIPSX(GuestException)) # 18
+ .word _C_LABEL(MIPSX(GuestException)) # 19
+ .word _C_LABEL(MIPSX(GuestException)) # 20
+ .word _C_LABEL(MIPSX(GuestException)) # 21
+ .word _C_LABEL(MIPSX(GuestException)) # 22
+ .word _C_LABEL(MIPSX(GuestException)) # 23
+ .word _C_LABEL(MIPSX(GuestException)) # 24
+ .word _C_LABEL(MIPSX(GuestException)) # 25
+ .word _C_LABEL(MIPSX(GuestException)) # 26
+ .word _C_LABEL(MIPSX(GuestException)) # 27
+ .word _C_LABEL(MIPSX(GuestException)) # 28
+ .word _C_LABEL(MIPSX(GuestException)) # 29
+ .word _C_LABEL(MIPSX(GuestException)) # 30
+ .word _C_LABEL(MIPSX(GuestException)) # 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step $1
+LEAF(MIPSX(SyncICache))
+ .set push
+ .set mips32r2
+ beq a1, zero, 20f
+ nop
+ addu a1, a0, a1
+ rdhwr v0, HW_SYNCI_Step
+ beq v0, zero, 20f
+ nop
+
+10:
+ synci 0(a0)
+ addu a0, a0, v0
+ sltu v1, a0, a1
+ bne v1, zero, 10b
+ nop
+ sync
+20:
+ jr.hb ra
+ nop
+ .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 000000000000..e0dad0289797
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "wait", VCPU_STAT(wait_exits) },
+ { "cache", VCPU_STAT(cache_exits) },
+ { "signal", VCPU_STAT(signal_exits) },
+ { "interrupt", VCPU_STAT(int_exits) },
+ { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+ { "tlbmod", VCPU_STAT(tlbmod_exits) },
+ { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+ { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+ { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+ { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+ { "syscall", VCPU_STAT(syscall_exits) },
+ { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+ { "break_inst", VCPU_STAT(break_inst_exits) },
+ { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+ { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+ for_each_possible_cpu(i) {
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ vcpu->arch.guest_user_asid[i] = 0;
+ }
+ return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ int *r = (int *)rtn;
+ *r = 0;
+ return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+ unsigned long wired;
+
+ /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+ wired = read_c0_wired();
+ write_c0_wired(wired + 1);
+ mtc0_tlbw_hazard();
+ kvm->arch.commpage_tlb = wired;
+
+ kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+ kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+ struct kvm *kvm = (struct kvm *)arg;
+
+ kvm_mips_init_tlbs(kvm);
+ kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+ if (atomic_inc_return(&kvm_mips_instance) == 1) {
+ kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+ __func__);
+ on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+ }
+
+
+ return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+ struct kvm_vcpu *vcpu;
+
+ /* Put the pages we reserved for the guest pmap */
+ for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+ if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+ kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ }
+
+ if (kvm->arch.guest_pmap)
+ kfree(kvm->arch.guest_pmap);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvm_arch_vcpu_free(vcpu);
+ }
+
+ mutex_lock(&kvm->lock);
+
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+
+ mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+ /* Restore wired count */
+ write_c0_wired(0);
+ mtc0_tlbw_hazard();
+ /* Clear out all the TLBs */
+ kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvm_mips_free_vcpus(kvm);
+
+ /* If this is the last instance, restore wired count */
+ if (atomic_dec_return(&kvm_mips_instance) == 0) {
+ kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+ __func__);
+ on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+ }
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
+{
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ enum kvm_mr_change change)
+{
+ unsigned long npages = 0;
+ int i, err = 0;
+
+ kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+ __func__, kvm, mem->slot, mem->guest_phys_addr,
+ mem->memory_size, mem->userspace_addr);
+
+ /* Setup Guest PMAP table */
+ if (!kvm->arch.guest_pmap) {
+ if (mem->slot == 0)
+ npages = mem->memory_size >> PAGE_SHIFT;
+
+ if (npages) {
+ kvm->arch.guest_pmap_npages = npages;
+ kvm->arch.guest_pmap =
+ kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+ if (!kvm->arch.guest_pmap) {
+ kvm_err("Failed to allocate guest PMAP");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ kvm_info
+ ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+ npages, kvm->arch.guest_pmap);
+
+ /* Now setup the page table */
+ for (i = 0; i < npages; i++) {
+ kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+ }
+ }
+ }
+out:
+ return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ extern char mips32_exception[], mips32_exceptionEnd[];
+ extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+ int err, size, offset;
+ void *gebase;
+ int i;
+
+ struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+ if (!vcpu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+
+ if (err)
+ goto out_free_cpu;
+
+ kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+ /* Allocate space for host mode exception handlers that handle
+ * guest mode exits
+ */
+ if (cpu_has_veic || cpu_has_vint) {
+ size = 0x200 + VECTORSPACING * 64;
+ } else {
+ size = 0x200;
+ }
+
+ /* Save Linux EBASE */
+ vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+ gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+ if (!gebase) {
+ err = -ENOMEM;
+ goto out_free_cpu;
+ }
+ kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+ ALIGN(size, PAGE_SIZE), gebase);
+
+ /* Save new ebase */
+ vcpu->arch.guest_ebase = gebase;
+
+ /* Copy L1 Guest Exception handler to correct offset */
+
+ /* TLB Refill, EXL = 0 */
+ memcpy(gebase, mips32_exception,
+ mips32_exceptionEnd - mips32_exception);
+
+ /* General Exception Entry point */
+ memcpy(gebase + 0x180, mips32_exception,
+ mips32_exceptionEnd - mips32_exception);
+
+ /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+ for (i = 0; i < 8; i++) {
+ kvm_debug("L1 Vectored handler @ %p\n",
+ gebase + 0x200 + (i * VECTORSPACING));
+ memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+ mips32_exceptionEnd - mips32_exception);
+ }
+
+ /* General handler, relocate to unmapped space for sanity's sake */
+ offset = 0x2000;
+ kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+ gebase + offset,
+ mips32_GuestExceptionEnd - mips32_GuestException);
+
+ memcpy(gebase + offset, mips32_GuestException,
+ mips32_GuestExceptionEnd - mips32_GuestException);
+
+ /* Invalidate the icache for these ranges */
+ mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+ /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+ if (!vcpu->arch.kseg0_commpage) {
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
+ kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+ kvm_mips_commpage_init(vcpu);
+
+ /* Init */
+ vcpu->arch.last_sched_cpu = -1;
+
+ /* Start off the timer */
+ kvm_mips_emulate_count(vcpu);
+
+ return vcpu;
+
+out_free_gebase:
+ kfree(gebase);
+
+out_free_cpu:
+ kfree(vcpu);
+
+out:
+ return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+ kvm_vcpu_uninit(vcpu);
+
+ kvm_mips_dump_stats(vcpu);
+
+ if (vcpu->arch.guest_ebase)
+ kfree(vcpu->arch.guest_ebase);
+
+ if (vcpu->arch.kseg0_commpage)
+ kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int r = 0;
+ sigset_t sigsaved;
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+ kvm_mips_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ }
+
+ /* Check if we have any exceptions/interrupts pending */
+ kvm_mips_deliver_interrupts(vcpu,
+ kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+ local_irq_disable();
+ kvm_guest_enter();
+
+ r = __kvm_mips_vcpu_run(run, vcpu);
+
+ kvm_guest_exit();
+ local_irq_enable();
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+ struct kvm_vcpu *dvcpu = NULL;
+
+ if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+ (int)intr);
+
+ if (irq->cpu == -1)
+ dvcpu = vcpu;
+ else
+ dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+ if (intr == 2 || intr == 3 || intr == 4) {
+ kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+ } else if (intr == -2 || intr == -3 || intr == -4) {
+ kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+ } else {
+ kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+ irq->cpu, irq->irq);
+ return -EINVAL;
+ }
+
+ dvcpu->arch.wait = 0;
+
+ if (waitqueue_active(&dvcpu->wq)) {
+ wake_up_interruptible(&dvcpu->wq);
+ }
+
+ return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+ int intr;
+
+ switch (ioctl) {
+ case KVM_NMI:
+ /* Treat the NMI as a CPU reset */
+ r = kvm_mips_reset_vcpu(vcpu);
+ break;
+ case KVM_INTERRUPT:
+ {
+ struct kvm_mips_interrupt irq;
+ r = -EFAULT;
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ goto out;
+
+ intr = (int)irq.irq;
+
+ kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+ irq.irq);
+
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ struct kvm_memory_slot *memslot;
+ unsigned long ga, ga_end;
+ int is_dirty = 0;
+ int r;
+ unsigned long n;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
+ memslot = &kvm->memslots->memslots[log->slot];
+
+ ga = memslot->base_gfn << PAGE_SHIFT;
+ ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+ printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+ ga_end);
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+
+ r = 0;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ long r;
+
+ switch (ioctl) {
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ int ret;
+
+ if (kvm_mips_callbacks) {
+ kvm_err("kvm: module already exists\n");
+ return -EEXIST;
+ }
+
+ ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+ return ret;
+}
+
+void kvm_arch_exit(void)
+{
+ kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ return r;
+
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct mips_coproc *cop0;
+
+ if (!vcpu)
+ return -1;
+
+ printk("VCPU Register Dump:\n");
+ printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+ printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+ for (i = 0; i < 32; i += 4) {
+ printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+ vcpu->arch.gprs[i],
+ vcpu->arch.gprs[i + 1],
+ vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+ }
+ printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+ printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+ cop0 = vcpu->arch.cop0;
+ printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+ kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+ printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ vcpu->arch.gprs[i] = regs->gprs[i];
+
+ vcpu->arch.hi = regs->hi;
+ vcpu->arch.lo = regs->lo;
+ vcpu->arch.pc = regs->pc;
+
+ return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ regs->gprs[i] = vcpu->arch.gprs[i];
+
+ regs->hi = vcpu->arch.hi;
+ regs->lo = vcpu->arch.lo;
+ regs->pc = vcpu->arch.pc;
+
+ return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ vcpu->arch.wait = 0;
+ if (waitqueue_active(&vcpu->wq)) {
+ wake_up_interruptible(&vcpu->wq);
+ }
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+ kvm_mips_comparecount_func((unsigned long) vcpu);
+ hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+ ktime_set(0, MS_TO_NS(10)));
+ return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ kvm_mips_callbacks->vcpu_init(vcpu);
+ hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+ kvm_mips_init_shadow_tlb(vcpu);
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+ return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+ uint32_t status = read_c0_status();
+
+ if (cpu_has_fpu)
+ status |= (ST0_CU1);
+
+ if (cpu_has_dsp)
+ status |= (ST0_MX);
+
+ write_c0_status(status);
+ ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ uint32_t cause = vcpu->arch.host_cp0_cause;
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ /* Set a default exit reason */
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+ kvm_mips_set_c0_status();
+
+ local_irq_enable();
+
+ kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+ cause, opc, run, vcpu);
+
+ /* Do a privilege check, if in UM most of these exit conditions end up
+ * causing an exception to be delivered to the Guest Kernel
+ */
+ er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+ if (er == EMULATE_PRIV_FAIL) {
+ goto skip_emul;
+ } else if (er == EMULATE_FAIL) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ goto skip_emul;
+ }
+
+ switch (exccode) {
+ case T_INT:
+ kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+ ++vcpu->stat.int_exits;
+ trace_kvm_exit(vcpu, INT_EXITS);
+
+ if (need_resched()) {
+ cond_resched();
+ }
+
+ ret = RESUME_GUEST;
+ break;
+
+ case T_COP_UNUSABLE:
+ kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+ ++vcpu->stat.cop_unusable_exits;
+ trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+ ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+ /* XXXKYMA: Might need to return to user space */
+ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+ ret = RESUME_HOST;
+ }
+ break;
+
+ case T_TLB_MOD:
+ ++vcpu->stat.tlbmod_exits;
+ trace_kvm_exit(vcpu, TLBMOD_EXITS);
+ ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+ break;
+
+ case T_TLB_ST_MISS:
+ kvm_debug
+ ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+ badvaddr);
+
+ ++vcpu->stat.tlbmiss_st_exits;
+ trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+ ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+ break;
+
+ case T_TLB_LD_MISS:
+ kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+
+ ++vcpu->stat.tlbmiss_ld_exits;
+ trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+ ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+ break;
+
+ case T_ADDR_ERR_ST:
+ ++vcpu->stat.addrerr_st_exits;
+ trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+ ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+ break;
+
+ case T_ADDR_ERR_LD:
+ ++vcpu->stat.addrerr_ld_exits;
+ trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+ ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+ break;
+
+ case T_SYSCALL:
+ ++vcpu->stat.syscall_exits;
+ trace_kvm_exit(vcpu, SYSCALL_EXITS);
+ ret = kvm_mips_callbacks->handle_syscall(vcpu);
+ break;
+
+ case T_RES_INST:
+ ++vcpu->stat.resvd_inst_exits;
+ trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+ break;
+
+ case T_BREAK:
+ ++vcpu->stat.break_inst_exits;
+ trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_break(vcpu);
+ break;
+
+ default:
+ kvm_err
+ ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
+ exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+ kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ }
+
+skip_emul:
+ local_irq_disable();
+
+ if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+ kvm_mips_deliver_interrupts(vcpu, cause);
+
+ if (!(ret & RESUME_HOST)) {
+ /* Only check for signals if not already exiting to userspace */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ ret = (-EINTR << 2) | RESUME_HOST;
+ ++vcpu->stat.signal_exits;
+ trace_kvm_exit(vcpu, SIGNAL_EXITS);
+ }
+ }
+
+ return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+ int ret;
+
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+ if (ret)
+ return ret;
+
+ /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+ * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+ * to avoid the possibility of double faulting. The issue is that the TLB code
+ * references routines that are part of the the KVM module,
+ * which are only available once the module is loaded.
+ */
+ kvm_mips_gfn_to_pfn = gfn_to_pfn;
+ kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+ kvm_mips_is_error_pfn = is_error_pfn;
+
+ pr_info("KVM/MIPS Initialized\n");
+ return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+ kvm_exit();
+
+ kvm_mips_gfn_to_pfn = NULL;
+ kvm_mips_release_pfn_clean = NULL;
+ kvm_mips_is_error_pfn = NULL;
+
+ pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 000000000000..a4a8c85cc8f7
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+ struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 000000000000..3873b1ecc40f
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+ memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+ /* Specific init values for fields */
+ vcpu->arch.cop0 = &page->cop0;
+ memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+ return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 000000000000..96528e2d1ea6
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE 0x041f0000
+#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET ((x) & 0xffff)
+
+#define LW_TEMPLATE 0x8c000000
+#define CLEAR_TEMPLATE 0x00000020
+#define SW_TEMPLATE 0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu)
+{
+ int result = 0;
+ unsigned long kseg0_opc;
+ uint32_t synci_inst = 0x0;
+
+ /* Replace the CACHE instruction, with a NOP */
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+
+ return result;
+}
+
+/*
+ * Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu)
+{
+ int result = 0;
+ unsigned long kseg0_opc;
+ uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+ base = (inst >> 21) & 0x1f;
+ offset = inst & 0xffff;
+ synci_inst |= (base << 21);
+ synci_inst |= offset;
+
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+
+ return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+ int32_t rt, rd, sel;
+ uint32_t mfc0_inst;
+ unsigned long kseg0_opc, flags;
+
+ rt = (inst >> 16) & 0x1f;
+ rd = (inst >> 11) & 0x1f;
+ sel = inst & 0x7;
+
+ if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+ mfc0_inst = CLEAR_TEMPLATE;
+ mfc0_inst |= ((rt & 0x1f) << 16);
+ } else {
+ mfc0_inst = LW_TEMPLATE;
+ mfc0_inst |= ((rt & 0x1f) << 16);
+ mfc0_inst |=
+ offsetof(struct mips_coproc,
+ reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+ cop0);
+ }
+
+ if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+ mips32_SyncICache((unsigned long) opc, 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+ int32_t rt, rd, sel;
+ uint32_t mtc0_inst = SW_TEMPLATE;
+ unsigned long kseg0_opc, flags;
+
+ rt = (inst >> 16) & 0x1f;
+ rd = (inst >> 11) & 0x1f;
+ sel = inst & 0x7;
+
+ mtc0_inst |= ((rt & 0x1f) << 16);
+ mtc0_inst |=
+ offsetof(struct mips_coproc,
+ reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+ if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+ mips32_SyncICache((unsigned long) opc, 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 000000000000..2b2bac9a40aa
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1826 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+ unsigned long instpc)
+{
+ unsigned int dspcontrol;
+ union mips_instruction insn;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ long epc = instpc;
+ long nextpc = KVM_INVALID_INST;
+
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+ if (insn.word == KVM_INVALID_INST)
+ return KVM_INVALID_INST;
+
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ arch->gprs[insn.r_format.rd] = epc + 8;
+ /* Fall through */
+ case jr_op:
+ nextpc = arch->gprs[insn.r_format.rs];
+ break;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ if ((long)arch->gprs[insn.i_format.rs] < 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgez_op:
+ case bgezl_op:
+ if ((long)arch->gprs[insn.i_format.rs] >= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bltzal_op:
+ case bltzall_op:
+ arch->gprs[31] = epc + 8;
+ if ((long)arch->gprs[insn.i_format.rs] < 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgezal_op:
+ case bgezall_op:
+ arch->gprs[31] = epc + 8;
+ if ((long)arch->gprs[insn.i_format.rs] >= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+ case bposge32_op:
+ if (!cpu_has_dsp)
+ goto sigill;
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ nextpc = epc;
+ break;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ arch->gprs[31] = instpc + 8;
+ case j_op:
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ epc |= (insn.j_format.target << 2);
+ nextpc = epc;
+ break;
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beq_op:
+ case beql_op:
+ if (arch->gprs[insn.i_format.rs] ==
+ arch->gprs[insn.i_format.rt])
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bne_op:
+ case bnel_op:
+ if (arch->gprs[insn.i_format.rs] !=
+ arch->gprs[insn.i_format.rt])
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ /* rt field assumed to be zero */
+ if ((long)arch->gprs[insn.i_format.rs] <= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgtz_op:
+ case bgtzl_op:
+ /* rt field assumed to be zero */
+ if ((long)arch->gprs[insn.i_format.rs] > 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+ printk("%s: unsupported cop1_op\n", __func__);
+ break;
+ }
+
+ return nextpc;
+
+unaligned:
+ printk("%s: unaligned epc\n", __func__);
+ return nextpc;
+
+sigill:
+ printk("%s: DSP branch but not DSP ASE\n", __func__);
+ return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+ unsigned long branch_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ if (cause & CAUSEF_BD) {
+ branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+ if (branch_pc == KVM_INVALID_INST) {
+ er = EMULATE_FAIL;
+ } else {
+ vcpu->arch.pc = branch_pc;
+ kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+ }
+ } else
+ vcpu->arch.pc += 4;
+
+ kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+ return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+
+ /* If COUNT is enabled */
+ if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+ hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+ hrtimer_start(&vcpu->arch.comparecount_timer,
+ ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+ } else {
+ hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+
+ if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+ kvm_read_c0_guest_epc(cop0));
+ kvm_clear_c0_guest_status(cop0, ST0_EXL);
+ vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+ } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+ kvm_clear_c0_guest_status(cop0, ST0_ERL);
+ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else {
+ printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+ vcpu->arch.pc);
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+
+ kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+ vcpu->arch.pending_exceptions);
+
+ ++vcpu->stat.wait_exits;
+ trace_kvm_exit(vcpu, WAIT_EXITS);
+ if (!vcpu->arch.pending_exceptions) {
+ vcpu->arch.wait = 1;
+ kvm_vcpu_block(vcpu);
+
+ /* We we are runnable, then definitely go off to user space to check if any
+ * I/O interrupts are pending.
+ */
+ if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+ }
+ }
+
+ return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_FAIL;
+ uint32_t pc = vcpu->arch.pc;
+
+ printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+ return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int index = kvm_read_c0_guest_index(cop0);
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_mips_tlb *tlb = NULL;
+ uint32_t pc = vcpu->arch.pc;
+
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ printk("%s: illegal index: %d\n", __func__, index);
+ printk
+ ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0),
+ kvm_read_c0_guest_pagemask(cop0));
+ index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+ /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+ tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+ tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+ kvm_debug
+ ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+ kvm_read_c0_guest_pagemask(cop0));
+
+ return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_mips_tlb *tlb = NULL;
+ uint32_t pc = vcpu->arch.pc;
+ int index;
+
+#if 1
+ get_random_bytes(&index, sizeof(index));
+ index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+ index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ printk("%s: illegal index: %d\n", __func__, index);
+ return EMULATE_FAIL;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+ /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+ tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+ tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+ kvm_debug
+ ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0));
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ long entryhi = kvm_read_c0_guest_entryhi(cop0);
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t pc = vcpu->arch.pc;
+ int index = -1;
+
+ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+ kvm_write_c0_guest_index(cop0, index);
+
+ kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+ index);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ int32_t rt, rd, copz, sel, co_bit, op;
+ uint32_t pc = vcpu->arch.pc;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL) {
+ return er;
+ }
+
+ copz = (inst >> 21) & 0x1f;
+ rt = (inst >> 16) & 0x1f;
+ rd = (inst >> 11) & 0x1f;
+ sel = inst & 0x7;
+ co_bit = (inst >> 25) & 1;
+
+ /* Verify that the register is valid */
+ if (rd > MIPS_CP0_DESAVE) {
+ printk("Invalid rd: %d\n", rd);
+ er = EMULATE_FAIL;
+ goto done;
+ }
+
+ if (co_bit) {
+ op = (inst) & 0xff;
+
+ switch (op) {
+ case tlbr_op: /* Read indexed TLB entry */
+ er = kvm_mips_emul_tlbr(vcpu);
+ break;
+ case tlbwi_op: /* Write indexed */
+ er = kvm_mips_emul_tlbwi(vcpu);
+ break;
+ case tlbwr_op: /* Write random */
+ er = kvm_mips_emul_tlbwr(vcpu);
+ break;
+ case tlbp_op: /* TLB Probe */
+ er = kvm_mips_emul_tlbp(vcpu);
+ break;
+ case rfe_op:
+ printk("!!!COP0_RFE!!!\n");
+ break;
+ case eret_op:
+ er = kvm_mips_emul_eret(vcpu);
+ goto dont_update_pc;
+ break;
+ case wait_op:
+ er = kvm_mips_emul_wait(vcpu);
+ break;
+ }
+ } else {
+ switch (copz) {
+ case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ /* Get reg */
+ if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+ /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+ vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+ } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+ vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+ }
+ else {
+ vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+ }
+
+ kvm_debug
+ ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+ pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+ break;
+
+ case dmfc_op:
+ vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+ break;
+
+ case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ if ((rd == MIPS_CP0_TLB_INDEX)
+ && (vcpu->arch.gprs[rt] >=
+ KVM_MIPS_GUEST_TLB_SIZE)) {
+ printk("Invalid TLB Index: %ld",
+ vcpu->arch.gprs[rt]);
+ er = EMULATE_FAIL;
+ break;
+ }
+#define C0_EBASE_CORE_MASK 0xff
+ if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+ /* Preserve CORE number */
+ kvm_change_c0_guest_ebase(cop0,
+ ~(C0_EBASE_CORE_MASK),
+ vcpu->arch.gprs[rt]);
+ printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+ kvm_read_c0_guest_ebase(cop0));
+ } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+ uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+ if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+ &&
+ (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
+ != nasid)) {
+
+ kvm_debug
+ ("MTCz, change ASID from %#lx to %#lx\n",
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
+ ASID_MASK(vcpu->arch.gprs[rt]));
+
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+ }
+ kvm_write_c0_guest_entryhi(cop0,
+ vcpu->arch.gprs[rt]);
+ }
+ /* Are we writing to COUNT */
+ else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+ /* Linux doesn't seem to write into COUNT, we throw an error
+ * if we notice a write to COUNT
+ */
+ /*er = EMULATE_FAIL; */
+ goto done;
+ } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+ kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+ pc, kvm_read_c0_guest_compare(cop0),
+ vcpu->arch.gprs[rt]);
+
+ /* If we are writing to COMPARE */
+ /* Clear pending timer interrupt, if any */
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ kvm_write_c0_guest_compare(cop0,
+ vcpu->arch.gprs[rt]);
+ } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ kvm_write_c0_guest_status(cop0,
+ vcpu->arch.gprs[rt]);
+ /* Make sure that CU1 and NMI bits are never set */
+ kvm_clear_c0_guest_status(cop0,
+ (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+ } else {
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+ }
+
+ kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+ rd, sel, cop0->reg[rd][sel]);
+ break;
+
+ case dmtc_op:
+ printk
+ ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+ vcpu->arch.pc, rt, rd, sel);
+ er = EMULATE_FAIL;
+ break;
+
+ case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+ if (rt != 0) {
+ vcpu->arch.gprs[rt] =
+ kvm_read_c0_guest_status(cop0);
+ }
+ /* EI */
+ if (inst & 0x20) {
+ kvm_debug("[%#lx] mfmcz_op: EI\n",
+ vcpu->arch.pc);
+ kvm_set_c0_guest_status(cop0, ST0_IE);
+ } else {
+ kvm_debug("[%#lx] mfmcz_op: DI\n",
+ vcpu->arch.pc);
+ kvm_clear_c0_guest_status(cop0, ST0_IE);
+ }
+
+ break;
+
+ case wrpgpr_op:
+ {
+ uint32_t css =
+ cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+ uint32_t pss =
+ (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+ /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+ if (css || pss) {
+ er = EMULATE_FAIL;
+ break;
+ }
+ kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+ vcpu->arch.gprs[rt]);
+ vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+ }
+ break;
+ default:
+ printk
+ ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+ vcpu->arch.pc, copz);
+ er = EMULATE_FAIL;
+ break;
+ }
+ }
+
+done:
+ /*
+ * Rollback PC only if emulation was unsuccessful
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
+ }
+
+dont_update_pc:
+ /*
+ * This is for special instructions whose emulation
+ * updates the PC, so do not overwrite the PC under
+ * any circumstances
+ */
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DO_MMIO;
+ int32_t op, base, rt, offset;
+ uint32_t bytes;
+ void *data = run->mmio.data;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rt = (inst >> 16) & 0x1f;
+ base = (inst >> 21) & 0x1f;
+ offset = inst & 0xffff;
+ op = (inst >> 26) & 0x3f;
+
+ switch (op) {
+ case sb_op:
+ bytes = 1;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ *(u8 *) data = vcpu->arch.gprs[rt];
+ kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+ *(uint8_t *) data);
+
+ break;
+
+ case sw_op:
+ bytes = 4;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(uint32_t *) data);
+ break;
+
+ case sh_op:
+ bytes = 2;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(uint32_t *) data);
+ break;
+
+ default:
+ printk("Store not yet supported");
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /*
+ * Rollback PC if emulation was unsuccessful
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DO_MMIO;
+ int32_t op, base, rt, offset;
+ uint32_t bytes;
+
+ rt = (inst >> 16) & 0x1f;
+ base = (inst >> 21) & 0x1f;
+ offset = inst & 0xffff;
+ op = (inst >> 26) & 0x3f;
+
+ vcpu->arch.pending_load_cause = cause;
+ vcpu->arch.io_gpr = rt;
+
+ switch (op) {
+ case lw_op:
+ bytes = 4;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+ break;
+
+ case lh_op:
+ case lhu_op:
+ bytes = 2;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+
+ if (op == lh_op)
+ vcpu->mmio_needed = 2;
+ else
+ vcpu->mmio_needed = 1;
+
+ break;
+
+ case lbu_op:
+ case lb_op:
+ bytes = 1;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+ vcpu->mmio_is_write = 0;
+
+ if (op == lb_op)
+ vcpu->mmio_needed = 2;
+ else
+ vcpu->mmio_needed = 1;
+
+ break;
+
+ default:
+ printk("Load not yet supported");
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+ unsigned long offset = (va & ~PAGE_MASK);
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pa;
+ gfn_t gfn;
+ pfn_t pfn;
+
+ gfn = va >> PAGE_SHIFT;
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ return -1;
+ }
+ pfn = kvm->arch.guest_pmap[gfn];
+ pa = (pfn << PAGE_SHIFT) | offset;
+
+ printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+ mips32_SyncICache(CKSEG0ADDR(pa), 32);
+ return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV 0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
+#define MIPS_CACHE_OP_IMP 0x3
+#define MIPS_CACHE_OP_HIT_INV 0x4
+#define MIPS_CACHE_OP_FILL_WB_INV 0x5
+#define MIPS_CACHE_OP_HIT_HB 0x6
+#define MIPS_CACHE_OP_FETCH_LOCK 0x7
+
+#define MIPS_CACHE_ICACHE 0x0
+#define MIPS_CACHE_DCACHE 0x1
+#define MIPS_CACHE_SEC 0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ extern void (*r4k_blast_dcache) (void);
+ extern void (*r4k_blast_icache) (void);
+ enum emulation_result er = EMULATE_DONE;
+ int32_t offset, cache, op_inst, op, base;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long va;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ base = (inst >> 21) & 0x1f;
+ op_inst = (inst >> 16) & 0x1f;
+ offset = inst & 0xffff;
+ cache = (inst >> 16) & 0x3;
+ op = (inst >> 18) & 0x7;
+
+ va = arch->gprs[base] + offset;
+
+ kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+
+ /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+ * the caches entirely by stepping through all the ways/indexes
+ */
+ if (op == MIPS_CACHE_OP_INDEX_INV) {
+ kvm_debug
+ ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+ arch->gprs[base], offset);
+
+ if (cache == MIPS_CACHE_DCACHE)
+ r4k_blast_dcache();
+ else if (cache == MIPS_CACHE_ICACHE)
+ r4k_blast_icache();
+ else {
+ printk("%s: unsupported CACHE INDEX operation\n",
+ __func__);
+ return EMULATE_FAIL;
+ }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+ goto done;
+ }
+
+ preempt_disable();
+ if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+ }
+ } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ int index;
+
+ /* If an entry already exists then skip */
+ if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+ goto skip_fault;
+ }
+
+ /* If address not in the guest TLB, then give the guest a fault, the
+ * resulting handler will do the right thing
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+
+ if (index < 0) {
+ vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+ vcpu->arch.host_cp0_badvaddr = va;
+ er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+ vcpu);
+ preempt_enable();
+ goto dont_update_pc;
+ } else {
+ struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+ /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+ if (!TLB_IS_VALID(*tlb, va)) {
+ er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+ run, vcpu);
+ preempt_enable();
+ goto dont_update_pc;
+ } else {
+ /* We fault an entry from the guest tlb to the shadow host TLB */
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+ NULL,
+ NULL);
+ }
+ }
+ } else {
+ printk
+ ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto dont_update_pc;
+
+ }
+
+skip_fault:
+ /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+ if (cache == MIPS_CACHE_DCACHE
+ && (op == MIPS_CACHE_OP_FILL_WB_INV
+ || op == MIPS_CACHE_OP_HIT_INV)) {
+ flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+ kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+ } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+ flush_dcache_line(va);
+ flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /* Replace the CACHE instruction, with a SYNCI */
+ kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+ } else {
+ printk
+ ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto dont_update_pc;
+ }
+
+ preempt_enable();
+
+ dont_update_pc:
+ /*
+ * Rollback PC
+ */
+ vcpu->arch.pc = curr_pc;
+ done:
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t inst;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD) {
+ opc += 1;
+ }
+
+ inst = kvm_get_inst(opc, vcpu);
+
+ switch (((union mips_instruction)inst).r_format.opcode) {
+ case cop0_op:
+ er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+ break;
+ case sb_op:
+ case sh_op:
+ case sw_op:
+ er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ break;
+ case lb_op:
+ case lbu_op:
+ case lhu_op:
+ case lh_op:
+ case lw_op:
+ er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ break;
+
+ case cache_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, CACHE_EXITS);
+ er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+ break;
+
+ default:
+ printk("Instruction emulation not supported (%p/%#x)\n", opc,
+ inst);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_SYSCALL << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ printk("Trying to deliver SYSCALL when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* set pc to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+ } else {
+ kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi =
+ (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+ arch->pc);
+
+ /* set pc to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x0;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+ /*
+ * If address not in the guest TLB, then we are in trouble
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+ if (index < 0) {
+ /* XXXKYMA Invalidate and retry */
+ kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+ kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+ __func__, entryhi);
+ kvm_mips_dump_guest_tlbs(vcpu);
+ kvm_mips_dump_host_tlbs();
+ return EMULATE_FAIL;
+ }
+#endif
+
+ er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+ arch->pc);
+
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ }
+
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+ kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_RES_INST << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver RI when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_BREAK << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ printk("Trying to deliver BP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE 0x03e00000
+#define RT 0x001f0000
+#define OFFSET 0x0000ffff
+#define LL 0xc0000000
+#define SC 0xe0000000
+#define SPEC0 0x00000000
+#define SPEC3 0x7c000000
+#define RD 0x0000f800
+#define FUNC 0x0000003f
+#define SYNC 0x0000000f
+#define RDHWR 0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long curr_pc;
+ uint32_t inst;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+
+ inst = kvm_get_inst(opc, vcpu);
+
+ if (inst == KVM_INVALID_INST) {
+ printk("%s: Cannot get inst @ %p\n", __func__, opc);
+ return EMULATE_FAIL;
+ }
+
+ if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+ int rd = (inst & RD) >> 11;
+ int rt = (inst & RT) >> 16;
+ switch (rd) {
+ case 0: /* CPU number */
+ arch->gprs[rt] = 0;
+ break;
+ case 1: /* SYNCI length */
+ arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ break;
+ case 2: /* Read count register */
+ printk("RDHWR: Cont register\n");
+ arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+ break;
+ case 3: /* Count register resolution */
+ switch (current_cpu_data.cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ arch->gprs[rt] = 1;
+ break;
+ default:
+ arch->gprs[rt] = 2;
+ }
+ break;
+ case 29:
+#if 1
+ arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+ /* UserLocal not implemented */
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+ break;
+
+ default:
+ printk("RDHWR not supported\n");
+ er = EMULATE_FAIL;
+ break;
+ }
+ } else {
+ printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ er = EMULATE_FAIL;
+ }
+
+ /*
+ * Rollback PC only if emulation was unsuccessful
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
+ }
+ return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long curr_pc;
+
+ if (run->mmio.len > sizeof(*gpr)) {
+ printk("Bad MMIO length: %d", run->mmio.len);
+ er = EMULATE_FAIL;
+ goto done;
+ }
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ switch (run->mmio.len) {
+ case 4:
+ *gpr = *(int32_t *) run->mmio.data;
+ break;
+
+ case 2:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(int16_t *) run->mmio.data;
+ else
+ *gpr = *(int16_t *) run->mmio.data;
+
+ break;
+ case 1:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(int8_t *) run->mmio.data;
+ else
+ *gpr = *(u8 *) run->mmio.data;
+ break;
+ }
+
+ if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+ kvm_debug
+ ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+ vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+ vcpu->mmio_needed);
+
+done:
+ return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (exccode << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+ kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+ exccode, kvm_read_c0_guest_epc(cop0),
+ kvm_read_c0_guest_badvaddr(cop0));
+ } else {
+ printk("Trying to deliver EXC when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+ int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+ if (usermode) {
+ switch (exccode) {
+ case T_INT:
+ case T_SYSCALL:
+ case T_BREAK:
+ case T_RES_INST:
+ break;
+
+ case T_COP_UNUSABLE:
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+ er = EMULATE_PRIV_FAIL;
+ break;
+
+ case T_TLB_MOD:
+ break;
+
+ case T_TLB_LD_MISS:
+ /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+ printk("%s: LD MISS @ %#lx\n", __func__,
+ badvaddr);
+ cause &= ~0xff;
+ cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+ er = EMULATE_PRIV_FAIL;
+ }
+ break;
+
+ case T_TLB_ST_MISS:
+ /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+ printk("%s: ST MISS @ %#lx\n", __func__,
+ badvaddr);
+ cause &= ~0xff;
+ cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+ er = EMULATE_PRIV_FAIL;
+ }
+ break;
+
+ case T_ADDR_ERR_ST:
+ printk("%s: address error ST @ %#lx\n", __func__,
+ badvaddr);
+ if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+ cause &= ~0xff;
+ cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+ }
+ er = EMULATE_PRIV_FAIL;
+ break;
+ case T_ADDR_ERR_LD:
+ printk("%s: address error LD @ %#lx\n", __func__,
+ badvaddr);
+ if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+ cause &= ~0xff;
+ cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+ }
+ er = EMULATE_PRIV_FAIL;
+ break;
+ default:
+ er = EMULATE_PRIV_FAIL;
+ break;
+ }
+ }
+
+ if (er == EMULATE_PRIV_FAIL) {
+ kvm_mips_emulate_exc(cause, opc, run, vcpu);
+ }
+ return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ * case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ * case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ unsigned long va = vcpu->arch.host_cp0_badvaddr;
+ int index;
+
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+ vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+ /* KVM would not have got the exception if this entry was valid in the shadow host TLB
+ * Check the Guest TLB, if the entry is not there then send the guest an
+ * exception. The guest exc handler should then inject an entry into the
+ * guest TLB
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu,
+ (va & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi
+ (vcpu->arch.cop0)));
+ if (index < 0) {
+ if (exccode == T_TLB_LD_MISS) {
+ er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+ } else if (exccode == T_TLB_ST_MISS) {
+ er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+ } else {
+ printk("%s: invalid exc code: %d\n", __func__, exccode);
+ er = EMULATE_FAIL;
+ }
+ } else {
+ struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+ /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+ if (!TLB_IS_VALID(*tlb, va)) {
+ if (exccode == T_TLB_LD_MISS) {
+ er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+ vcpu);
+ } else if (exccode == T_TLB_ST_MISS) {
+ er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+ vcpu);
+ } else {
+ printk("%s: invalid exc code: %d\n", __func__,
+ exccode);
+ er = EMULATE_FAIL;
+ }
+ } else {
+#ifdef DEBUG
+ kvm_debug
+ ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+ tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+ /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+ NULL);
+ }
+ }
+
+ return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 000000000000..1e5de16afe29
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /* Cause bits to reflect the pending timer interrupt,
+ * the EXC code will be set when we are actually
+ * delivering the interrupt:
+ */
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+ /* Queue up an INT exception for the core */
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /* Cause bits to reflect the pending IO interrupt,
+ * the EXC code will be set when we are actually
+ * delivering the interrupt:
+ */
+ switch (intr) {
+ case 2:
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+ /* Queue up an INT exception for the core */
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+ break;
+
+ case 3:
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+ break;
+
+ case 4:
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+ switch (intr) {
+ case -2:
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+ break;
+
+ case -3:
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+ break;
+
+ case -4:
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause)
+{
+ int allowed = 0;
+ uint32_t exccode;
+
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ switch (priority) {
+ case MIPS_EXC_INT_TIMER:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ case MIPS_EXC_INT_IO:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ case MIPS_EXC_INT_IPI_1:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ case MIPS_EXC_INT_IPI_2:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Are we allowed to deliver the interrupt ??? */
+ if (allowed) {
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+ } else
+ kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+ kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+ (exccode << CAUSEB_EXCCODE));
+
+ /* XXXSL Set PC to the interrupt exception entry point */
+ if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+ arch->pc = KVM_GUEST_KSEG0 + 0x200;
+ else
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ }
+
+ return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause)
+{
+ return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+ unsigned int priority;
+
+ if (!(*pending) && !(*pending_clr))
+ return;
+
+ priority = __ffs(*pending_clr);
+ while (priority <= MIPS_EXC_MAX) {
+ if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+ if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+ break;
+ }
+
+ priority = find_next_bit(pending_clr,
+ BITS_PER_BYTE * sizeof(*pending_clr),
+ priority + 1);
+ }
+
+ priority = __ffs(*pending);
+ while (priority <= MIPS_EXC_MAX) {
+ if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+ if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 000000000000..20da7d29eede
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET 0
+#define MIPS_EXC_SRESET 1
+#define MIPS_EXC_DEBUG_ST 2
+#define MIPS_EXC_DEBUG 3
+#define MIPS_EXC_DDB 4
+#define MIPS_EXC_NMI 5
+#define MIPS_EXC_MCHK 6
+#define MIPS_EXC_INT_TIMER 7
+#define MIPS_EXC_INT_IO 8
+#define MIPS_EXC_EXECUTE 9
+#define MIPS_EXC_INT_IPI_1 10
+#define MIPS_EXC_INT_IPI_2 11
+#define MIPS_EXC_MAX 12
+/* XXXSL More to follow */
+
+#define C_TI (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 000000000000..86d3b4cc348b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define mfmcz_op 0x0b /* 01011 */
+#define wrpgpr_op 0x0e /* 01110 */
+
+/* COP0 opcodes (only if COP0 and CO=1): */
+#define wait_op 0x20 /* 100000 */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 000000000000..075904bcac1b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+ "WAIT",
+ "CACHE",
+ "Signal",
+ "Interrupt",
+ "COP0/1 Unusable",
+ "TLB Mod",
+ "TLB Miss (LD)",
+ "TLB Miss (ST)",
+ "Address Err (ST)",
+ "Address Error (LD)",
+ "System Call",
+ "Reserved Inst",
+ "Break Inst",
+ "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+ "Index",
+ "Random",
+ "EntryLo0",
+ "EntryLo1",
+ "Context",
+ "PG Mask",
+ "Wired",
+ "HWREna",
+ "BadVAddr",
+ "Count",
+ "EntryHI",
+ "Compare",
+ "Status",
+ "Cause",
+ "EXC PC",
+ "PRID",
+ "Config",
+ "LLAddr",
+ "Watch Lo",
+ "Watch Hi",
+ "X Context",
+ "Reserved",
+ "Impl Dep",
+ "Debug",
+ "DEPC",
+ "PerfCnt",
+ "ErrCtl",
+ "CacheErr",
+ "TagLo",
+ "TagHi",
+ "ErrorEPC",
+ "DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ int i, j;
+
+ printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+ for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+ for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+ if (vcpu->arch.cop0->stat[i][j])
+ printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+ vcpu->arch.cop0->stat[i][j]);
+ }
+ }
+#endif
+
+ return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 000000000000..89511a9258d3
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,928 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB 0
+#define KVM_GUEST_SP_TLB 1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+ return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+ return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+ unsigned long old_entryhi;
+ unsigned long old_pagemask;
+ struct kvm_mips_tlb tlb;
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+
+ printk("HOST TLBs:\n");
+ printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+
+ for (i = 0; i < current_cpu_data.tlbsize; i++) {
+ write_c0_index(i);
+ mtc0_tlbw_hazard();
+
+ tlb_read();
+ tlbw_use_hazard();
+
+ tlb.tlb_hi = read_c0_entryhi();
+ tlb.tlb_lo0 = read_c0_entrylo0();
+ tlb.tlb_lo1 = read_c0_entrylo1();
+ tlb.tlb_mask = read_c0_pagemask();
+
+ printk("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ i, tlb.tlb_hi);
+ printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+ (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo0 >> 3) & 7);
+ printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+ (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ }
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+ local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb tlb;
+ int i;
+
+ printk("Guest TLBs:\n");
+ printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ tlb = vcpu->arch.guest_tlb[i];
+ printk("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ i, tlb.tlb_hi);
+ printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+ (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo0 >> 3) & 7);
+ printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+ (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ }
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+ int i;
+ volatile struct kvm_mips_tlb tlb;
+
+ printk("Shadow TLBs:\n");
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+ printk("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ i, tlb.tlb_hi);
+ printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+ (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo0 >> 3) & 7);
+ printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+ (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ }
+}
+
+static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+ pfn_t pfn;
+
+ if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+ return;
+
+ pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+ if (kvm_mips_is_error_pfn(pfn)) {
+ panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ }
+
+ kvm->arch.guest_pmap[gfn] = pfn;
+ return;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long gva)
+{
+ gfn_t gfn;
+ uint32_t offset = gva & ~PAGE_MASK;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+ kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+ __builtin_return_address(0), gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+ gva);
+ return KVM_INVALID_PAGE;
+ }
+ kvm_mips_map_page(vcpu->kvm, gfn);
+ return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+ unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+ unsigned long flags;
+ unsigned long old_entryhi;
+ volatile int idx;
+
+ local_irq_save(flags);
+
+
+ old_entryhi = read_c0_entryhi();
+ write_c0_entryhi(entryhi);
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ if (idx > current_cpu_data.tlbsize) {
+ kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ if (idx < 0) {
+ idx = read_c0_random() % current_cpu_data.tlbsize;
+ write_c0_index(idx);
+ mtc0_tlbw_hazard();
+ }
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+#ifdef DEBUG
+ if (debug) {
+ kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+ "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+ vcpu->arch.pc, idx, read_c0_entryhi(),
+ read_c0_entrylo0(), read_c0_entrylo1());
+ }
+#endif
+
+ /* Flush D-cache */
+ if (flush_dcache_mask) {
+ if (entrylo0 & MIPS3_PG_V) {
+ ++vcpu->stat.flush_dcache_exits;
+ flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+ }
+ if (entrylo1 & MIPS3_PG_V) {
+ ++vcpu->stat.flush_dcache_exits;
+ flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+ (0x1 << PAGE_SHIFT));
+ }
+ }
+
+ /* Restore old ASID */
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+ local_irq_restore(flags);
+ return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ gfn_t gfn;
+ pfn_t pfn0, pfn1;
+ unsigned long vaddr = 0;
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ int even;
+ struct kvm *kvm = vcpu->kvm;
+ const int flush_dcache_mask = 0;
+
+
+ if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+ kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ gfn, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+ even = !(gfn & 0x1);
+ vaddr = badvaddr & (PAGE_MASK << 1);
+
+ kvm_mips_map_page(vcpu->kvm, gfn);
+ kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+
+ if (even) {
+ pfn0 = kvm->arch.guest_pmap[gfn];
+ pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+ } else {
+ pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+ pfn1 = kvm->arch.guest_pmap[gfn];
+ }
+
+ entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+ (0x1 << 1);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+ (0x1 << 1);
+
+ return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ pfn_t pfn0, pfn1;
+ unsigned long flags, old_entryhi = 0, vaddr = 0;
+ unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+ pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+ pfn1 = 0;
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+ (0x1 << 1);
+ entrylo1 = 0;
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ vaddr = badvaddr & (PAGE_MASK << 1);
+ write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+ mtc0_tlbw_hazard();
+ write_c0_entrylo0(entrylo0);
+ mtc0_tlbw_hazard();
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+#ifdef DEBUG
+ kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+ vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+ read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+ /* Restore old ASID */
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ pfn_t pfn0, pfn1;
+
+
+ if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+ pfn0 = 0;
+ pfn1 = 0;
+ } else {
+ kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
+ kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+
+ pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+ pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+ }
+
+ if (hpa0)
+ *hpa0 = pfn0 << PAGE_SHIFT;
+
+ if (hpa1)
+ *hpa1 = pfn1 << PAGE_SHIFT;
+
+ /* Get attributes from the Guest TLB */
+ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+ (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+ (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+ return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+ int i;
+ int index = -1;
+ struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+ (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+ index = i;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+ __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+ return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+ unsigned long old_entryhi, flags;
+ volatile int idx;
+
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+ else {
+ write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+ }
+
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ /* Restore old ASID */
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+
+#ifdef DEBUG
+ kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+ return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+ int idx;
+ unsigned long flags, old_entryhi;
+
+ local_irq_save(flags);
+
+
+ old_entryhi = read_c0_entryhi();
+
+ write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ if (idx >= current_cpu_data.tlbsize)
+ BUG();
+
+ if (idx > 0) {
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo0(0);
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ }
+
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+
+#ifdef DEBUG
+ if (idx > 0) {
+ kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+ (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+ }
+#endif
+
+ return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+ unsigned long flags, old_entryhi;
+
+ if (index >= current_cpu_data.tlbsize)
+ BUG();
+
+ local_irq_save(flags);
+
+
+ old_entryhi = read_c0_entryhi();
+
+ write_c0_entryhi(UNIQUE_ENTRYHI(index));
+ mtc0_tlbw_hazard();
+
+ write_c0_index(index);
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo0(0);
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+ unsigned long flags;
+ unsigned long old_entryhi, entryhi;
+ unsigned long old_pagemask;
+ int entry = 0;
+ int maxentry = current_cpu_data.tlbsize;
+
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+
+ /* Blast 'em all away. */
+ for (entry = 0; entry < maxentry; entry++) {
+
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+
+ if (skip_kseg0) {
+ tlb_read();
+ tlbw_use_hazard();
+
+ entryhi = read_c0_entryhi();
+
+ /* Don't blow away guest kernel entries */
+ if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+ continue;
+ }
+ }
+
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ mtc0_tlbw_hazard();
+ write_c0_entrylo0(0);
+ mtc0_tlbw_hazard();
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ }
+
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long asid = asid_cache(cpu);
+
+ if (!(ASID_MASK(ASID_INC(asid)))) {
+ if (cpu_has_vtag_icache) {
+ flush_icache_all();
+ }
+
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
+
+ if (!asid) /* fix version if needed */
+ asid = ASID_FIRST_VERSION;
+ }
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ unsigned long old_entryhi;
+ unsigned long old_pagemask;
+ int entry = 0;
+ int cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_read();
+ tlbw_use_hazard();
+
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+ }
+
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+
+ local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry;
+ int cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ old_ctx = read_c0_entryhi();
+
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+ mtc0_tlbw_hazard();
+ write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+ write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
+ local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry = 0;
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+ /* Blast 'em all away. */
+ while (entry < current_cpu_data.tlbsize) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ entry++;
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
+
+ local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+ int cpu, entry;
+
+ for_each_possible_cpu(cpu) {
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+ UNIQUE_ENTRYHI(entry);
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+ read_c0_pagemask();
+#ifdef DEBUG
+ kvm_debug
+ ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+ cpu, entry,
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+ }
+ }
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ unsigned long flags;
+ int newasid = 0;
+
+#ifdef DEBUG
+ kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+ /* Alocate new kernel and user ASIDs if needed */
+
+ local_irq_save(flags);
+
+ if (((vcpu->arch.
+ guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+ vcpu->arch.guest_user_asid[cpu] =
+ vcpu->arch.guest_user_mm.context.asid[cpu];
+ newasid++;
+
+ kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+ vcpu->arch.guest_user_asid[cpu]);
+ }
+
+ if (vcpu->arch.last_sched_cpu != cpu) {
+ kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ }
+
+ /* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+ if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+ kvm_mips_flush_host_tlb(0);
+ kvm_shadow_tlb_load(vcpu);
+ }
+#endif
+
+ if (!newasid) {
+ /* If we preempted while the guest was executing, then reload the pre-empted ASID */
+ if (current->flags & PF_VCPU) {
+ write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+ ehb();
+ }
+ } else {
+ /* New ASIDs were allocated for the VM */
+
+ /* Were we in guest context? If so then the pre-empted ASID is no longer
+ * valid, we need to set it to what it should be based on the mode of
+ * the Guest (Kernel/User)
+ */
+ if (current->flags & PF_VCPU) {
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi(ASID_MASK(vcpu->arch.
+ guest_kernel_asid[cpu]));
+ else
+ write_c0_entryhi(ASID_MASK(vcpu->arch.
+ guest_user_asid[cpu]));
+ ehb();
+ }
+ }
+
+ local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ uint32_t cpu;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+
+ vcpu->arch.preempt_entryhi = read_c0_entryhi();
+ vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+ if ((atomic_read(&kvm_mips_instance) > 1)) {
+ kvm_shadow_tlb_put(vcpu);
+ }
+#endif
+
+ if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+ ASID_VERSION_MASK)) {
+ kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
+ cpu_context(cpu, current->mm));
+ drop_mmu_context(current->mm, cpu);
+ }
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+ ehb();
+
+ local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long paddr, flags;
+ uint32_t inst;
+ int index;
+
+ if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+ KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+ if (index >= 0) {
+ inst = *(opc);
+ } else {
+ index =
+ kvm_mips_guest_tlb_lookup(vcpu,
+ ((unsigned long) opc & VPN2_MASK)
+ |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+ if (index < 0) {
+ kvm_err
+ ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, vcpu, read_c0_entryhi());
+ kvm_mips_dump_host_tlbs();
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.
+ guest_tlb[index],
+ NULL, NULL);
+ inst = *(opc);
+ }
+ local_irq_restore(flags);
+ } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+ paddr =
+ kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+ (unsigned long) opc);
+ inst = *(uint32_t *) CKSEG0ADDR(paddr);
+ } else {
+ kvm_err("%s: illegal address: %p\n", __func__, opc);
+ return KVM_INVALID_INST;
+ }
+
+ return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 000000000000..466aeef044bd
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+ gpa_t gpa;
+ uint32_t kseg = KSEGX(gva);
+
+ if ((kseg == CKSEG0) || (kseg == CKSEG1))
+ gpa = CPHYSADDR(gva);
+ else {
+ printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+ kvm_mips_dump_host_tlbs();
+ gpa = KVM_INVALID_ADDR;
+ }
+
+#ifdef DEBUG
+ kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+ return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ } else
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ case EMULATE_WAIT:
+ run->exit_reason = KVM_EXIT_INTR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+ kvm_debug
+ ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+#endif
+ er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+ * using HIGHMEM. Need to address this in a HIGHMEM kernel
+ */
+ printk
+ ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ printk
+ ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+ && KVM_GUEST_KERNEL_MODE(vcpu)) {
+ if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+ kvm_debug
+ ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+#endif
+ er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /* All KSEG0 faults are handled by KVM, as the guest kernel does not
+ * expect to ever get them
+ */
+ if (kvm_mips_handle_kseg0_tlb_fault
+ (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else {
+ kvm_err
+ ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+ && KVM_GUEST_KERNEL_MODE(vcpu)) {
+ if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+ kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+ vcpu->arch.pc, badvaddr);
+#endif
+
+ /* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ * case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ * case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+
+ er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault
+ (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else {
+ printk
+ ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu)
+ && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+ kvm_debug("Emulate Store to MMIO space\n");
+#endif
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ printk("Emulate Store to MMIO space failed\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ }
+ } else {
+ printk
+ ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+ kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ printk("Emulate Load from MMIO space failed\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ }
+ } else {
+ printk
+ ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ er = EMULATE_FAIL;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int
+kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
+ kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
+ kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
+ kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
+ kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
+
+ kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
+ kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
+ kvm_write_c0_guest_pagemask(cop0,
+ regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
+ kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
+ kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
+
+ return 0;
+}
+
+static int
+kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
+ regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
+ regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
+
+ regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
+ regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
+ kvm_read_c0_guest_pagemask(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
+ regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
+
+ regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
+
+ return 0;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ uint32_t config1;
+ int vcpu_id = vcpu->vcpu_id;
+
+ /* Arch specific stuff, set up config registers properly so that the
+ * guest will come up as expected, for now we simulate a
+ * MIPS 24kc
+ */
+ kvm_write_c0_guest_prid(cop0, 0x00019300);
+ kvm_write_c0_guest_config(cop0,
+ MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT));
+
+ /* Read the cache characteristics from the host Config1 Register */
+ config1 = (read_c0_config1() & ~0x7f);
+
+ /* Set up MMU size */
+ config1 &= ~(0x3f << 25);
+ config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+ /* We unset some bits that we aren't emulating */
+ config1 &=
+ ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+ (1 << CP0C1_WR) | (1 << CP0C1_CA));
+ kvm_write_c0_guest_config1(cop0, config1);
+
+ kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+ /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+ kvm_write_c0_guest_config3(cop0,
+ MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+ CP0C3_ULRI));
+
+ /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+ kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+ /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+ kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+ /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+ kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+ return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+ /* exit handlers */
+ .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+ .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+ .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+ .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+ .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+ .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+ .handle_syscall = kvm_trap_emul_handle_syscall,
+ .handle_res_inst = kvm_trap_emul_handle_res_inst,
+ .handle_break = kvm_trap_emul_handle_break,
+
+ .vm_init = kvm_trap_emul_vm_init,
+ .vcpu_init = kvm_trap_emul_vcpu_init,
+ .vcpu_setup = kvm_trap_emul_vcpu_setup,
+ .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+ .queue_timer_int = kvm_mips_queue_timer_int_cb,
+ .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+ .queue_io_int = kvm_mips_queue_io_int_cb,
+ .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+ .irq_deliver = kvm_mips_irq_deliver_cb,
+ .irq_clear = kvm_mips_irq_clear_cb,
+ .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
+ .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+ *install_callbacks = &kvm_trap_emul_callbacks;
+ return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 000000000000..bc9e0f406c08
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason),
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(unsigned int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("[%s]PC: 0x%08lx",
+ kvm_mips_exit_types_str[__entry->reason],
+ __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index a64daee740ee..3b2a1e78a543 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -19,7 +19,7 @@
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit);
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- volatile unsigned long *a = addr;
+ unsigned long *a = (unsigned long *)addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 32b9f21bfd85..8a12d00908e0 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,6 +11,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
+#include <asm/mmu_context.h>
static inline const char *msk2str(unsigned int mask)
{
@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
- asid = s_entryhi & 0xff;
+ asid = ASID_MASK(s_entryhi);
for (i = first; i <= last; i++) {
write_c0_index(i);
@@ -85,7 +86,7 @@ static void dump_tlb(int first, int last)
printk("va=%0*lx asid=%02lx\n",
width, (entryhi & ~0x1fffUL),
- entryhi & 0xff);
+ ASID_MASK(entryhi));
printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
width,
(entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 053d3b0b0317..0580194e7402 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -5,7 +5,8 @@
*
* Copyright (C) 1998, 1999, 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
@@ -19,6 +20,20 @@
#define LONG_S_R sdr
#endif
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
@@ -26,23 +41,25 @@
.previous
.macro f_fill64 dst, offset, val, fixup
- EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
-#if LONGSIZE == 4
- EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
- EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+ EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+ EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
#endif
.endm
@@ -71,16 +88,20 @@ LEAF(memset)
1:
FEXPORT(__bzero)
- sltiu t0, a2, LONGSIZE /* very small region? */
+ sltiu t0, a2, STORSIZE /* very small region? */
bnez t0, .Lsmall_memset
- andi t0, a0, LONGMASK /* aligned? */
+ andi t0, a0, STORMASK /* aligned? */
+#ifdef CONFIG_CPU_MICROMIPS
+ move t8, a1 /* used by 'swp' instruction */
+ move t9, a1
+#endif
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
beqz t0, 1f
- PTR_SUBU t0, LONGSIZE /* alignment in bytes */
+ PTR_SUBU t0, STORSIZE /* alignment in bytes */
#else
.set noat
- li AT, LONGSIZE
+ li AT, STORSIZE
beqz t0, 1f
PTR_SUBU t0, AT /* alignment in bytes */
.set at
@@ -99,24 +120,27 @@ FEXPORT(__bzero)
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
beqz t1, .Lmemset_partial /* no block to fill */
- andi t0, a2, 0x40-LONGSIZE
+ andi t0, a2, 0x40-STORSIZE
PTR_ADDU t1, a0 /* end address */
.set reorder
1: PTR_ADDIU a0, 64
R10KCBARRIER(0(ra))
- f_fill64 a0, -64, a1, .Lfwd_fixup
+ f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
bne t1, a0, 1b
.set noreorder
.Lmemset_partial:
R10KCBARRIER(0(ra))
PTR_LA t1, 2f /* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+ LONG_SRL t7, t0, 1
+#endif
#if LONGSIZE == 4
- PTR_SUBU t1, t0
+ PTR_SUBU t1, FILLPTRG
#else
.set noat
- LONG_SRL AT, t0, 1
+ LONG_SRL AT, FILLPTRG, 1
PTR_SUBU t1, AT
.set at
#endif
@@ -126,9 +150,9 @@ FEXPORT(__bzero)
.set push
.set noreorder
.set nomacro
- f_fill64 a0, -64, a1, .Lpartial_fixup /* ... but first do longs ... */
+ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */
2: .set pop
- andi a2, LONGMASK /* At most one long to go */
+ andi a2, STORMASK /* At most one long to go */
beqz a2, 1f
PTR_ADDU a0, a2 /* What's left */
@@ -169,7 +193,7 @@ FEXPORT(__bzero)
.Lpartial_fixup:
PTR_L t0, TI_TASK($28)
- andi a2, LONGMASK
+ andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
jr ra
@@ -177,4 +201,4 @@ FEXPORT(__bzero)
.Llast_fixup:
jr ra
- andi v1, a2, LONGMASK
+ andi v1, a2, STORMASK
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index cd160be3ce4d..6807f7172eaf 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -13,6 +13,7 @@
#include <linux/compiler.h>
#include <linux/preempt.h>
#include <linux/export.h>
+#include <linux/stringify.h>
#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
@@ -34,8 +35,11 @@
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
-__asm__(
- " .macro arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+ preempt_disable();
+
+ __asm__ __volatile__(
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
- " irq_disable_hazard \n"
+ " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
- " .endm \n");
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
-notrace void arch_local_irq_disable(void)
-{
- preempt_disable();
- __asm__ __volatile__(
- "arch_local_irq_disable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_disable);
-__asm__(
- " .macro arch_local_irq_save result \n"
+notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ preempt_disable();
+
+ __asm__ __volatile__(
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\result, $2, 1 \n"
- " ori $1, \\result, 0x400 \n"
+ " mfc0 %[flags], $2, 1 \n"
+ " ori $1, %[flags], 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
- " andi \\result, \\result, 0x400 \n"
+ " andi %[flags], %[flags], 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
- " mfc0 \\result, $12 \n"
- " ori $1, \\result, 0x1f \n"
+ " mfc0 %[flags], $12 \n"
+ " ori $1, %[flags], 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
- " irq_disable_hazard \n"
+ " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
- " .endm \n");
+ : [flags] "=r" (flags)
+ : /* no inputs */
+ : "memory");
-notrace unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
- preempt_disable();
- asm volatile("arch_local_irq_save\t%0"
- : "=r" (flags)
- : /* no inputs */
- : "memory");
preempt_enable();
+
return flags;
}
EXPORT_SYMBOL(arch_local_irq_save);
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of branch and call overhead on each
+ * local_irq_restore()
+ */
+ if (unlikely(!(flags & 0x0400)))
+ smtc_ipi_replay();
+#endif
+ preempt_disable();
-__asm__(
- " .macro arch_local_irq_restore flags \n"
+ __asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
- "mfc0 $1, $2, 1 \n"
- "andi \\flags, 0x400 \n"
- "ori $1, 0x400 \n"
- "xori $1, 0x400 \n"
- "or \\flags, $1 \n"
- "mtc0 \\flags, $2, 1 \n"
+ " mfc0 $1, $2, 1 \n"
+ " andi %[flags], 0x400 \n"
+ " ori $1, 0x400 \n"
+ " xori $1, 0x400 \n"
+ " or %[flags], $1 \n"
+ " mtc0 %[flags], $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
- " andi \\flags, 1 \n"
+ " andi %[flags], 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
- " or \\flags, $1 \n"
- " mtc0 \\flags, $12 \n"
+ " or %[flags], $1 \n"
+ " mtc0 %[flags], $12 \n"
#endif
- " irq_disable_hazard \n"
+ " " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
- " .endm \n");
+ : [flags] "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
-notrace void arch_local_irq_restore(unsigned long flags)
-{
- unsigned long __tmp1;
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of branch and call overhead on each
- * local_irq_restore()
- */
- if (unlikely(!(flags & 0x0400)))
- smtc_ipi_replay();
-#endif
- preempt_disable();
- __asm__ __volatile__(
- "arch_local_irq_restore\t%0"
- : "=r" (__tmp1)
- : "0" (flags)
- : "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
unsigned long __tmp1;
preempt_disable();
+
__asm__ __volatile__(
- "arch_local_irq_restore\t%0"
- : "=r" (__tmp1)
- : "0" (flags)
- : "memory");
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 \n"
+ " andi %[flags], 0x400 \n"
+ " ori $1, 0x400 \n"
+ " xori $1, 0x400 \n"
+ " or %[flags], $1 \n"
+ " mtc0 %[flags], $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+ /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1, $12 \n"
+ " andi %[flags], 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " or %[flags], $1 \n"
+ " mtc0 %[flags], $12 \n"
+#endif
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : [flags] "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+
preempt_enable();
}
EXPORT_SYMBOL(__arch_local_irq_restore);
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2ef0cf..8327698b9937 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
unsigned int asid;
unsigned long entryhi, entrylo0;
- asid = read_c0_entryhi() & 0xfc0;
+ asid = ASID_MASK(read_c0_entryhi());
for (i = first; i <= last; i++) {
write_c0_index(i<<8);
@@ -35,7 +36,7 @@ static void dump_tlb(int first, int last)
/* Unused entries have a virtual address of KSEG0. */
if ((entryhi & 0xffffe000) != 0x80000000
- && (entryhi & 0xfc0) == asid) {
+ && (ASID_MASK(entryhi) == asid)) {
/*
* Only print entries in use
*/
@@ -44,7 +45,7 @@ static void dump_tlb(int first, int last)
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
(entryhi & 0xffffe000),
- entryhi & 0xfc0,
+ ASID_MASK(entryhi),
entrylo0 & PAGE_MASK,
(entrylo0 & (1 << 11)) ? 1 : 0,
(entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index fdbb970f670d..e362dcdc69d1 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -3,8 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
- * Copyright (c) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@ LEAF(__strlen_user_asm)
FEXPORT(__strlen_user_nocheck_asm)
move v0, a0
-1: EX(lb, t0, (v0), .Lfault)
+1: EX(lbu, v1, (v0), .Lfault)
PTR_ADDIU v0, 1
- bnez t0, 1b
+ bnez v1, 1b
PTR_SUBU v0, a0
jr ra
END(__strlen_user_asm)
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index bad539487503..92870b6b53ea 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -3,7 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
*/
#include <linux/errno.h>
#include <asm/asm.h>
@@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm)
bnez v0, .Lfault
FEXPORT(__strncpy_from_user_nocheck_asm)
- move v0, zero
- move v1, a1
.set noreorder
-1: EX(lbu, t0, (v1), .Lfault)
+ move t0, zero
+ move v1, a1
+1: EX(lbu, v0, (v1), .Lfault)
PTR_ADDIU v1, 1
R10KCBARRIER(0(ra))
- beqz t0, 2f
- sb t0, (a0)
- PTR_ADDIU v0, 1
- .set reorder
- PTR_ADDIU a0, 1
- bne v0, a2, 1b
-2: PTR_ADDU t0, a1, v0
- xor t0, a1
- bltz t0, .Lfault
+ beqz v0, 2f
+ sb v0, (a0)
+ PTR_ADDIU t0, 1
+ bne t0, a2, 1b
+ PTR_ADDIU a0, 1
+2: PTR_ADDU v0, a1, t0
+ xor v0, a1
+ bltz v0, .Lfault
+ nop
jr ra # return n
+ move v0, t0
END(__strncpy_from_user_asm)
-.Lfault: li v0, -EFAULT
- jr ra
+.Lfault: jr ra
+ li v0, -EFAULT
.section __ex_table,"a"
PTR 1b, .Lfault
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index beea03c8c0ce..fcacea5e61f1 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm)
PTR_ADDU a1, a0 # stop pointer
1: beq v0, a1, 1f # limit reached?
EX(lb, t0, (v0), .Lfault)
- PTR_ADDU v0, 1
+ PTR_ADDIU v0, 1
bnez t0, 1b
1: PTR_SUBU v0, a0
jr ra
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index afb5a0bcf7a5..f03771900813 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -45,6 +45,7 @@
#include <asm/signal.h>
#include <asm/mipsregs.h>
#include <asm/fpu_emulator.h>
+#include <asm/fpu.h>
#include <asm/uaccess.h>
#include <asm/branch.h>
@@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
/* Determine rounding mode from the RM bits of the FCSR */
#define modeindex(v) ((v) & FPU_CSR_RM)
+/* microMIPS bitfields */
+#define MM_POOL32A_MINOR_MASK 0x3f
+#define MM_POOL32A_MINOR_SHIFT 0x6
+#define MM_MIPS32_COND_FC 0x30
+
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = {
};
#endif
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
+static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
+static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
+static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
+static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
+
+/*
+ * This functions translates a 32-bit microMIPS instruction
+ * into a 32-bit MIPS32 instruction. Returns 0 on success
+ * and SIGILL otherwise.
+ */
+static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
+{
+ union mips_instruction insn = *insn_ptr;
+ union mips_instruction mips32_insn = insn;
+ int func, fmt, op;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_ldc132_op:
+ mips32_insn.mm_i_format.opcode = ldc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_lwc132_op:
+ mips32_insn.mm_i_format.opcode = lwc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_sdc132_op:
+ mips32_insn.mm_i_format.opcode = sdc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_swc132_op:
+ mips32_insn.mm_i_format.opcode = swc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_pool32i_op:
+ /* NOTE: offset is << by 1 if in microMIPS mode. */
+ if ((insn.mm_i_format.rt == mm_bc1f_op) ||
+ (insn.mm_i_format.rt == mm_bc1t_op)) {
+ mips32_insn.fb_format.opcode = cop1_op;
+ mips32_insn.fb_format.bc = bc_op;
+ mips32_insn.fb_format.flag =
+ (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
+ } else
+ return SIGILL;
+ break;
+ case mm_pool32f_op:
+ switch (insn.mm_fp0_format.func) {
+ case mm_32f_01_op:
+ case mm_32f_11_op:
+ case mm_32f_02_op:
+ case mm_32f_12_op:
+ case mm_32f_41_op:
+ case mm_32f_51_op:
+ case mm_32f_42_op:
+ case mm_32f_52_op:
+ op = insn.mm_fp0_format.func;
+ if (op == mm_32f_01_op)
+ func = madd_s_op;
+ else if (op == mm_32f_11_op)
+ func = madd_d_op;
+ else if (op == mm_32f_02_op)
+ func = nmadd_s_op;
+ else if (op == mm_32f_12_op)
+ func = nmadd_d_op;
+ else if (op == mm_32f_41_op)
+ func = msub_s_op;
+ else if (op == mm_32f_51_op)
+ func = msub_d_op;
+ else if (op == mm_32f_42_op)
+ func = nmsub_s_op;
+ else
+ func = nmsub_d_op;
+ mips32_insn.fp6_format.opcode = cop1x_op;
+ mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
+ mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
+ mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
+ mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
+ mips32_insn.fp6_format.func = func;
+ break;
+ case mm_32f_10_op:
+ func = -1; /* Invalid */
+ op = insn.mm_fp5_format.op & 0x7;
+ if (op == mm_ldxc1_op)
+ func = ldxc1_op;
+ else if (op == mm_sdxc1_op)
+ func = sdxc1_op;
+ else if (op == mm_lwxc1_op)
+ func = lwxc1_op;
+ else if (op == mm_swxc1_op)
+ func = swxc1_op;
+
+ if (func != -1) {
+ mips32_insn.r_format.opcode = cop1x_op;
+ mips32_insn.r_format.rs =
+ insn.mm_fp5_format.base;
+ mips32_insn.r_format.rt =
+ insn.mm_fp5_format.index;
+ mips32_insn.r_format.rd = 0;
+ mips32_insn.r_format.re = insn.mm_fp5_format.fd;
+ mips32_insn.r_format.func = func;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_40_op:
+ op = -1; /* Invalid */
+ if (insn.mm_fp2_format.op == mm_fmovt_op)
+ op = 1;
+ else if (insn.mm_fp2_format.op == mm_fmovf_op)
+ op = 0;
+ if (op != -1) {
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp2_format.fmt];
+ mips32_insn.fp0_format.ft =
+ (insn.mm_fp2_format.cc<<2) + op;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp2_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp2_format.fd;
+ mips32_insn.fp0_format.func = fmovc_op;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_60_op:
+ func = -1; /* Invalid */
+ if (insn.mm_fp0_format.op == mm_fadd_op)
+ func = fadd_op;
+ else if (insn.mm_fp0_format.op == mm_fsub_op)
+ func = fsub_op;
+ else if (insn.mm_fp0_format.op == mm_fmul_op)
+ func = fmul_op;
+ else if (insn.mm_fp0_format.op == mm_fdiv_op)
+ func = fdiv_op;
+ if (func != -1) {
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp0_format.fmt];
+ mips32_insn.fp0_format.ft =
+ insn.mm_fp0_format.ft;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp0_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp0_format.fd;
+ mips32_insn.fp0_format.func = func;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_70_op:
+ func = -1; /* Invalid */
+ if (insn.mm_fp0_format.op == mm_fmovn_op)
+ func = fmovn_op;
+ else if (insn.mm_fp0_format.op == mm_fmovz_op)
+ func = fmovz_op;
+ if (func != -1) {
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp0_format.fmt];
+ mips32_insn.fp0_format.ft =
+ insn.mm_fp0_format.ft;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp0_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp0_format.fd;
+ mips32_insn.fp0_format.func = func;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_73_op: /* POOL32FXF */
+ switch (insn.mm_fp1_format.op) {
+ case mm_movf0_op:
+ case mm_movf1_op:
+ case mm_movt0_op:
+ case mm_movt1_op:
+ if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_movf0_op)
+ op = 0;
+ else
+ op = 1;
+ mips32_insn.r_format.opcode = spec_op;
+ mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
+ mips32_insn.r_format.rt =
+ (insn.mm_fp4_format.cc << 2) + op;
+ mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
+ mips32_insn.r_format.re = 0;
+ mips32_insn.r_format.func = movc_op;
+ break;
+ case mm_fcvtd0_op:
+ case mm_fcvtd1_op:
+ case mm_fcvts0_op:
+ case mm_fcvts1_op:
+ if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_fcvtd0_op) {
+ func = fcvtd_op;
+ fmt = swl_format[insn.mm_fp3_format.fmt];
+ } else {
+ func = fcvts_op;
+ fmt = dwl_format[insn.mm_fp3_format.fmt];
+ }
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt = fmt;
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp3_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp3_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_fmov0_op:
+ case mm_fmov1_op:
+ case mm_fabs0_op:
+ case mm_fabs1_op:
+ case mm_fneg0_op:
+ case mm_fneg1_op:
+ if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_fmov0_op)
+ func = fmov_op;
+ else if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_fabs0_op)
+ func = fabs_op;
+ else
+ func = fneg_op;
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp3_format.fmt];
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp3_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp3_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_ffloorl_op:
+ case mm_ffloorw_op:
+ case mm_fceill_op:
+ case mm_fceilw_op:
+ case mm_ftruncl_op:
+ case mm_ftruncw_op:
+ case mm_froundl_op:
+ case mm_froundw_op:
+ case mm_fcvtl_op:
+ case mm_fcvtw_op:
+ if (insn.mm_fp1_format.op == mm_ffloorl_op)
+ func = ffloorl_op;
+ else if (insn.mm_fp1_format.op == mm_ffloorw_op)
+ func = ffloor_op;
+ else if (insn.mm_fp1_format.op == mm_fceill_op)
+ func = fceill_op;
+ else if (insn.mm_fp1_format.op == mm_fceilw_op)
+ func = fceil_op;
+ else if (insn.mm_fp1_format.op == mm_ftruncl_op)
+ func = ftruncl_op;
+ else if (insn.mm_fp1_format.op == mm_ftruncw_op)
+ func = ftrunc_op;
+ else if (insn.mm_fp1_format.op == mm_froundl_op)
+ func = froundl_op;
+ else if (insn.mm_fp1_format.op == mm_froundw_op)
+ func = fround_op;
+ else if (insn.mm_fp1_format.op == mm_fcvtl_op)
+ func = fcvtl_op;
+ else
+ func = fcvtw_op;
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sd_format[insn.mm_fp1_format.fmt];
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp1_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp1_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_frsqrt_op:
+ case mm_fsqrt_op:
+ case mm_frecip_op:
+ if (insn.mm_fp1_format.op == mm_frsqrt_op)
+ func = frsqrt_op;
+ else if (insn.mm_fp1_format.op == mm_fsqrt_op)
+ func = fsqrt_op;
+ else
+ func = frecip_op;
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp1_format.fmt];
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp1_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp1_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_mfc1_op:
+ case mm_mtc1_op:
+ case mm_cfc1_op:
+ case mm_ctc1_op:
+ if (insn.mm_fp1_format.op == mm_mfc1_op)
+ op = mfc_op;
+ else if (insn.mm_fp1_format.op == mm_mtc1_op)
+ op = mtc_op;
+ else if (insn.mm_fp1_format.op == mm_cfc1_op)
+ op = cfc_op;
+ else
+ op = ctc_op;
+ mips32_insn.fp1_format.opcode = cop1_op;
+ mips32_insn.fp1_format.op = op;
+ mips32_insn.fp1_format.rt =
+ insn.mm_fp1_format.rt;
+ mips32_insn.fp1_format.fs =
+ insn.mm_fp1_format.fs;
+ mips32_insn.fp1_format.fd = 0;
+ mips32_insn.fp1_format.func = 0;
+ break;
+ default:
+ return SIGILL;
+ break;
+ }
+ break;
+ case mm_32f_74_op: /* c.cond.fmt */
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp4_format.fmt];
+ mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
+ mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
+ mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
+ mips32_insn.fp0_format.func =
+ insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
+ break;
+ default:
+ return SIGILL;
+ break;
+ }
+ break;
+ default:
+ return SIGILL;
+ break;
+ }
+
+ *insn_ptr = mips32_insn;
+ return 0;
+}
+
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ int bc_false = 0;
+ unsigned int fcr31;
+ unsigned int bit;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_pool32a_op:
+ if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+ mm_pool32axf_op) {
+ switch (insn.mm_i_format.simmediate >>
+ MM_POOL32A_MINOR_SHIFT) {
+ case mm_jalr_op:
+ case mm_jalrhb_op:
+ case mm_jalrs_op:
+ case mm_jalrshb_op:
+ if (insn.mm_i_format.rt != 0) /* Not mm_jr */
+ regs->regs[insn.mm_i_format.rt] =
+ regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ break;
+ }
+ }
+ break;
+ case mm_pool32i_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_bltzals_op:
+ case mm_bltzal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_bltz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_bgezals_op:
+ case mm_bgezal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_bgez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_blez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_bgtz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_bc2f_op:
+ case mm_bc1f_op:
+ bc_false = 1;
+ /* Fall through */
+ case mm_bc2t_op:
+ case mm_bc1t_op:
+ preempt_disable();
+ if (is_fpu_owner())
+ asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ if (bc_false)
+ fcr31 = ~fcr31;
+
+ bit = (insn.mm_i_format.rs >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+ break;
+ case mm_pool16c_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_jalr16_op:
+ case mm_jalrs16_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_jr16_op:
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ break;
+ }
+ break;
+ case mm_beqz16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_bnez16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_b16_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ (insn.mm_b0_format.simmediate << 1);
+ return 1;
+ break;
+ case mm_beq32_op:
+ if (regs->regs[insn.mm_i_format.rs] ==
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_bne32_op:
+ if (regs->regs[insn.mm_i_format.rs] !=
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case mm_jalx32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ return 1;
+ break;
+ case mm_jals32_op:
+ case mm_jal32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_j32_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 27;
+ *contpc <<= 27;
+ *contpc |= (insn.j_format.target << 1);
+ set_isa16_mode(*contpc);
+ return 1;
+ break;
+ }
+ return 0;
+}
/*
* Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = {
* a single subroutine should be used across both
* modules.
*/
-static int isBranchInstr(mips_instruction * i)
+static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
{
- switch (MIPSInst_OPCODE(*i)) {
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ unsigned int fcr31;
+ unsigned int bit = 0;
+
+ switch (insn.i_format.opcode) {
case spec_op:
- switch (MIPSInst_FUNC(*i)) {
+ switch (insn.r_format.func) {
case jalr_op:
+ regs->regs[insn.r_format.rd] =
+ regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
case jr_op:
+ *contpc = regs->regs[insn.r_format.rs];
return 1;
+ break;
}
break;
-
case bcond_op:
- switch (MIPSInst_RT(*i)) {
+ switch (insn.i_format.rt) {
+ case bltzal_op:
+ case bltzall_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
case bltz_op:
- case bgez_op:
case bltzl_op:
- case bgezl_op:
- case bltzal_op:
+ if ((long)regs->regs[insn.i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
case bgezal_op:
- case bltzall_op:
case bgezall_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case bgez_op:
+ case bgezl_op:
+ if ((long)regs->regs[insn.i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
return 1;
+ break;
}
break;
-
- case j_op:
- case jal_op:
case jalx_op:
+ set_isa16_mode(bit);
+ case jal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case j_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ /* Set microMIPS mode bit: XOR for jalx. */
+ *contpc ^= bit;
+ return 1;
+ break;
case beq_op:
- case bne_op:
- case blez_op:
- case bgtz_op:
case beql_op:
+ if (regs->regs[insn.i_format.rs] ==
+ regs->regs[insn.i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case bne_op:
case bnel_op:
+ if (regs->regs[insn.i_format.rs] !=
+ regs->regs[insn.i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case blez_op:
case blezl_op:
+ if ((long)regs->regs[insn.i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case bgtz_op:
case bgtzl_op:
+ if ((long)regs->regs[insn.i_format.rs] > 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
return 1;
-
+ break;
case cop0_op:
case cop1_op:
case cop2_op:
case cop1x_op:
- if (MIPSInst_RS(*i) == bc_op)
- return 1;
+ if (insn.i_format.rs == bc_op) {
+ preempt_disable();
+ if (is_fpu_owner())
+ asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ bit = (insn.i_format.rt >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ switch (insn.i_format.rt & 3) {
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+ }
break;
}
-
return 0;
}
@@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
*/
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
- void *__user *fault_addr)
+ struct mm_decoded_insn dec_insn, void *__user *fault_addr)
{
mips_instruction ir;
- unsigned long emulpc, contpc;
+ unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
unsigned int cond;
-
- if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
- return SIGBUS;
- }
- if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
- return SIGSEGV;
- }
+ int pc_inc;
/* XXX NEC Vr54xx bug workaround */
- if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
- xcp->cp0_cause &= ~CAUSEF_BD;
+ if (xcp->cp0_cause & CAUSEF_BD) {
+ if (dec_insn.micro_mips_mode) {
+ if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
+ xcp->cp0_cause &= ~CAUSEF_BD;
+ } else {
+ if (!isBranchInstr(xcp, dec_insn, &contpc))
+ xcp->cp0_cause &= ~CAUSEF_BD;
+ }
+ }
if (xcp->cp0_cause & CAUSEF_BD) {
/*
@@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
* Linux MIPS branch emulator operates on context, updating the
* cp0_epc.
*/
- emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */
+ ir = dec_insn.next_insn; /* process delay slot instr */
+ pc_inc = dec_insn.next_pc_inc;
+ } else {
+ ir = dec_insn.insn; /* process current instr */
+ pc_inc = dec_insn.pc_inc;
+ }
- if (__compute_return_epc(xcp) < 0) {
-#ifdef CP1DBG
- printk("failed to emulate branch at %p\n",
- (void *) (xcp->cp0_epc));
-#endif
+ /*
+ * Since microMIPS FPU instructios are a subset of MIPS32 FPU
+ * instructions, we want to convert microMIPS FPU instructions
+ * into MIPS32 instructions so that we could reuse all of the
+ * FPU emulation code.
+ *
+ * NOTE: We cannot do this for branch instructions since they
+ * are not a subset. Example: Cannot emulate a 16-bit
+ * aligned target address with a MIPS32 instruction.
+ */
+ if (dec_insn.micro_mips_mode) {
+ /*
+ * If next instruction is a 16-bit instruction, then it
+ * it cannot be a FPU instruction. This could happen
+ * since we can be called for non-FPU instructions.
+ */
+ if ((pc_inc == 2) ||
+ (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
+ == SIGILL))
return SIGILL;
- }
- if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)emulpc;
- return SIGBUS;
- }
- if (__get_user(ir, (mips_instruction __user *) emulpc)) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)emulpc;
- return SIGSEGV;
- }
- /* __compute_return_epc() will have updated cp0_epc */
- contpc = xcp->cp0_epc;
- /* In order not to confuse ptrace() et al, tweak context */
- xcp->cp0_epc = emulpc - 4;
- } else {
- emulpc = xcp->cp0_epc;
- contpc = xcp->cp0_epc + 4;
}
emul:
@@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
/* branch taken: emulate dslot
* instruction
*/
- xcp->cp0_epc += 4;
- contpc = (xcp->cp0_epc +
- (MIPSInst_SIMM(ir) << 2));
-
- if (!access_ok(VERIFY_READ, xcp->cp0_epc,
- sizeof(mips_instruction))) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
- return SIGBUS;
- }
- if (__get_user(ir,
- (mips_instruction __user *) xcp->cp0_epc)) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
- return SIGSEGV;
- }
+ xcp->cp0_epc += dec_insn.pc_inc;
+
+ contpc = MIPSInst_SIMM(ir);
+ ir = dec_insn.next_insn;
+ if (dec_insn.micro_mips_mode) {
+ contpc = (xcp->cp0_epc + (contpc << 1));
+
+ /* If 16-bit instruction, not FPU. */
+ if ((dec_insn.next_pc_inc == 2) ||
+ (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
+
+ /*
+ * Since this instruction will
+ * be put on the stack with
+ * 32-bit words, get around
+ * this problem by putting a
+ * NOP16 as the second one.
+ */
+ if (dec_insn.next_pc_inc == 2)
+ ir = (ir & (~0xffff)) | MM_NOP16;
+
+ /*
+ * Single step the non-CP1
+ * instruction in the dslot.
+ */
+ return mips_dsemul(xcp, ir, contpc);
+ }
+ } else
+ contpc = (xcp->cp0_epc + (contpc << 2));
switch (MIPSInst_OPCODE(ir)) {
case lwc1_op:
@@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
* branch likely nullifies
* dslot if not taken
*/
- xcp->cp0_epc += 4;
- contpc += 4;
+ xcp->cp0_epc += dec_insn.pc_inc;
+ contpc += dec_insn.pc_inc;
/*
* else continue & execute
* dslot as normal insn
@@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
unsigned long oldepc, prevepc;
- mips_instruction insn;
+ struct mm_decoded_insn dec_insn;
+ u16 instr[4];
+ u16 *instr_ptr;
int sig = 0;
oldepc = xcp->cp0_epc;
do {
prevepc = xcp->cp0_epc;
- if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
- return SIGBUS;
- }
- if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
- MIPS_FPU_EMU_INC_STATS(errors);
- *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
- return SIGSEGV;
+ if (get_isa16_mode(prevepc) && cpu_has_mmips) {
+ /*
+ * Get next 2 microMIPS instructions and convert them
+ * into 32-bit instructions.
+ */
+ if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
+ (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
+ (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
+ (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return SIGBUS;
+ }
+ instr_ptr = instr;
+
+ /* Get first instruction. */
+ if (mm_insn_16bit(*instr_ptr)) {
+ /* Duplicate the half-word. */
+ dec_insn.insn = (*instr_ptr << 16) |
+ (*instr_ptr);
+ /* 16-bit instruction. */
+ dec_insn.pc_inc = 2;
+ instr_ptr += 1;
+ } else {
+ dec_insn.insn = (*instr_ptr << 16) |
+ *(instr_ptr+1);
+ /* 32-bit instruction. */
+ dec_insn.pc_inc = 4;
+ instr_ptr += 2;
+ }
+ /* Get second instruction. */
+ if (mm_insn_16bit(*instr_ptr)) {
+ /* Duplicate the half-word. */
+ dec_insn.next_insn = (*instr_ptr << 16) |
+ (*instr_ptr);
+ /* 16-bit instruction. */
+ dec_insn.next_pc_inc = 2;
+ } else {
+ dec_insn.next_insn = (*instr_ptr << 16) |
+ *(instr_ptr+1);
+ /* 32-bit instruction. */
+ dec_insn.next_pc_inc = 4;
+ }
+ dec_insn.micro_mips_mode = 1;
+ } else {
+ if ((get_user(dec_insn.insn,
+ (mips_instruction __user *) xcp->cp0_epc)) ||
+ (get_user(dec_insn.next_insn,
+ (mips_instruction __user *)(xcp->cp0_epc+4)))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return SIGBUS;
+ }
+ dec_insn.pc_inc = 4;
+ dec_insn.next_pc_inc = 4;
+ dec_insn.micro_mips_mode = 0;
}
- if (insn == 0)
- xcp->cp0_epc += 4; /* skip nops */
+
+ if ((dec_insn.insn == 0) ||
+ ((dec_insn.pc_inc == 2) &&
+ ((dec_insn.insn & 0xffff) == MM_NOP16)))
+ xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */
else {
/*
* The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
*/
/* convert to ieee library modes */
ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
- sig = cop1Emulate(xcp, ctx, fault_addr);
+ sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
/* revert to mips rounding mode */
ieee754_csr.rm = mips_rm[ieee754_csr.rm];
}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 384a3b0091ea..7ea622ab8dad 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
struct emuframe __user *fr;
int err;
- if (ir == 0) { /* a nop is easy */
+ if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
+ (ir == 0)) {
+ /* NOP is easy */
regs->cp0_epc = cpc;
regs->cp0_cause &= ~CAUSEF_BD;
return 0;
@@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
return SIGBUS;
- err = __put_user(ir, &fr->emul);
- err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+ if (get_isa16_mode(regs->cp0_epc)) {
+ err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
+ err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
+ err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
+ err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+ } else {
+ err = __put_user(ir, &fr->emul);
+ err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+ }
+
err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
err |= __put_user(cpc, &fr->epc);
@@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
return SIGBUS;
}
- regs->cp0_epc = (unsigned long) &fr->emul;
+ regs->cp0_epc = ((unsigned long) &fr->emul) |
+ get_isa16_mode(regs->cp0_epc);
flush_cache_sigtramp((unsigned long)&fr->badinst);
@@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp)
unsigned long epc;
u32 insn, cookie;
int err = 0;
+ u16 instr[2];
fr = (struct emuframe __user *)
- (xcp->cp0_epc - sizeof(mips_instruction));
+ (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
/*
* If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp)
* - Is the instruction pointed to by the EPC an BREAK_MATH?
* - Is the following memory word the BD_COOKIE?
*/
- err = __get_user(insn, &fr->badinst);
+ if (get_isa16_mode(xcp->cp0_epc)) {
+ err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
+ err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
+ insn = (instr[0] << 16) | instr[1];
+ } else {
+ err = __get_user(insn, &fr->badinst);
+ }
err |= __get_user(cookie, &fr->cookie);
if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 1dcec30ad1c4..e87aae1f2e80 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
obj-y += cache.o dma-default.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
- tlbex.o tlbex-fault.o uasm.o
+ tlbex.o tlbex-fault.o uasm-mips.o
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
+
+obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2078915eacb9..21813beec7a5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -33,6 +33,7 @@
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
+#include <asm/dma-coherence.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
}
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
static void __cpuinit r4k_blast_dcache_setup(void)
{
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
}
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
static void __cpuinit r4k_blast_icache_setup(void)
{
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
}
}
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
- coherentio = 1;
-
- return 0;
-}
-
-early_param("coherentio", setcoherentio);
-#endif
-
static void __cpuinit r4k_cache_error_setup(void)
{
extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
build_clear_page();
build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
+
+ /*
+ * We want to run CMP kernels on core with and without coherent
+ * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+ * or not to flush caches.
+ */
local_r4k___flush_cache_all(NULL);
-#endif
+
coherency_setup();
board_cache_error_setup = r4k_cache_error_setup;
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec4407b0c..5aeb3eb0b72f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
#ifdef CONFIG_DMA_NONCOHERENT
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f9ef83829a52..caf92ecb37d6 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -22,6 +22,26 @@
#include <dma-coherence.h>
+int coherentio = 0; /* User defined DMA coherency from command line. */
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = 1;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ coherentio = 0;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+
static inline struct page *dma_addr_to_page(struct device *dev,
dma_addr_t dma_addr)
{
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
if (!plat_device_is_coherent(dev)) {
dma_cache_wback_inv((unsigned long) ret, size);
- ret = UNCAC_ADDR(ret);
+ if (!hw_coherentio)
+ ret = UNCAC_ADDR(ret);
}
}
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
- if (!plat_device_is_coherent(dev))
+ if (!plat_device_is_coherent(dev) && !hw_coherentio)
addr = CAC_ADDR(addr);
free_pages(addr, get_order(size));
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index a29fba55b53e..4eb8dcfaf1ce 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
+ static atomic_t run_once = ATOMIC_INIT(0);
+
+ if (atomic_xchg(&run_once, 1)) {
+ return;
+ }
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
+ static atomic_t run_once = ATOMIC_INIT(0);
+
+ if (atomic_xchg(&run_once, 1)) {
+ return;
+ }
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..4a13c150f31b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
#endif
local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = ASID_MASK(read_c0_entryhi());
write_c0_entrylo0(0);
entry = r3k_have_wired_reg ? read_c0_wired() : 8;
for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
- cpu_context(cpu, mm) & ASID_MASK, start, end);
+ ASID_MASK(cpu_context(cpu, mm)), start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
- int oldpid = read_c0_entryhi() & ASID_MASK;
- int newpid = cpu_context(cpu, mm) & ASID_MASK;
+ int oldpid = ASID_MASK(read_c0_entryhi());
+ int newpid = ASID_MASK(cpu_context(cpu, mm));
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
#ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif
- newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+ newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
page &= PAGE_MASK;
local_irq_save(flags);
- oldpid = read_c0_entryhi() & ASID_MASK;
+ oldpid = ASID_MASK(read_c0_entryhi());
write_c0_entryhi(page | newpid);
BARRIER;
tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = ASID_MASK(read_c0_entryhi());
#ifdef DEBUG_TLB
- if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid);
}
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = ASID_MASK(read_c0_entryhi());
old_pagemask = read_c0_pagemask();
w = read_c0_wired();
write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif
local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = ASID_MASK(read_c0_entryhi());
write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi);
write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 493131c81a29..09653b290d53 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <linux/module.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
FLUSH_ITLB;
EXIT_CRITICAL(flags);
}
+EXPORT_SYMBOL(local_flush_tlb_all);
/* All entries common to a mm share an asid. To effectively flush
these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ENTER_CRITICAL(flags);
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = ASID_MASK(read_c0_entryhi());
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..122f9207f49e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = ASID_MASK(read_c0_entryhi());
local_irq_save(flags);
address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 820e6612d744..4d46d3787576 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/cache.h>
+#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/war.h>
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
static int check_for_high_segbits __cpuinitdata;
#endif
+static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
+ unsigned int i_const)
+{
+ unsigned int **p;
+
+ for (p = start; p < stop; p++) {
+#ifndef CONFIG_CPU_MICROMIPS
+ unsigned int *ip;
+
+ ip = *p;
+ *ip = (*ip & 0xffff0000) | i_const;
+#else
+ unsigned short *ip;
+
+ ip = ((unsigned short *)((unsigned int)*p - 1));
+ if ((*ip & 0xf000) == 0x4000) {
+ *ip &= 0xfff1;
+ *ip |= (i_const << 1);
+ } else if ((*ip & 0xf000) == 0x6000) {
+ *ip &= 0xfff1;
+ *ip |= ((i_const >> 2) << 1);
+ } else {
+ ip++;
+ *ip = i_const;
+ }
+#endif
+ local_flush_icache_range((unsigned long)ip,
+ (unsigned long)ip + sizeof(*ip));
+ }
+}
+
+#define asid_insn_fixup(section, const) \
+do { \
+ extern unsigned int *__start_ ## section; \
+ extern unsigned int *__stop_ ## section; \
+ insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
+} while(0)
+
+/*
+ * Caller is assumed to flush the caches before the first context switch.
+ */
+static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
+ unsigned int version_mask,
+ unsigned int first_version)
+{
+ extern asmlinkage void handle_ri_rdhwr_vivt(void);
+ unsigned long *vivt_exc;
+
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Worst case optimised microMIPS addiu instructions support
+ * only a 3-bit immediate value.
+ */
+ if(inc > 7)
+ panic("Invalid ASID increment value!");
+#endif
+ asid_insn_fixup(__asid_inc, inc);
+ asid_insn_fixup(__asid_mask, mask);
+ asid_insn_fixup(__asid_version_mask, version_mask);
+ asid_insn_fixup(__asid_first_version, first_version);
+
+ /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
+ vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
+#ifdef CONFIG_CPU_MICROMIPS
+ vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
+#endif
+ vivt_exc++;
+ *vivt_exc = (*vivt_exc & ~mask) | mask;
+
+ current_cpu_data.asid_cache = first_version;
+}
+
static int check_for_high_segbits __cpuinitdata;
static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
static void __cpuinit build_r4000_setup_pgd(void)
{
const int a0 = 4;
const int a1 = 5;
- u32 *p = tlbmiss_handler_setup_pgd;
+ u32 *p = tlbmiss_handler_setup_pgd_array;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+ memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, 31, pgd_reg);
}
- if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
- panic("tlbmiss_handler_setup_pgd space exceeded");
+ if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+ panic("tlbmiss_handler_setup_pgd_array space exceeded");
uasm_resolve_relocs(relocs, labels);
- pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
- (unsigned int)(p - tlbmiss_handler_setup_pgd));
+ pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+ (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
dump_handler("tlbmiss_handler",
- tlbmiss_handler_setup_pgd,
- ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+ tlbmiss_handler_setup_pgd_array,
+ ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
}
#endif
@@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_l_nopage_tlbl(&l, p);
build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_0 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
@@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
uasm_l_nopage_tlbs(&l, p);
build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_1 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
@@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
uasm_l_nopage_tlbm(&l, p);
build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_1 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
@@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
case CPU_TX3922:
case CPU_TX3927:
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
- build_r3000_tlb_refill_handler();
+ setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
+ if (cpu_has_local_ebase)
+ build_r3000_tlb_refill_handler();
if (!run_once) {
+ if (!cpu_has_local_ebase)
+ build_r3000_tlb_refill_handler();
build_r3000_tlb_load_handler();
build_r3000_tlb_store_handler();
build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
break;
default:
+#ifndef CONFIG_MIPS_MT_SMTC
+ setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
+#else
+ setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
+#endif
if (!run_once) {
scratch_reg = allocate_kscratch();
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
build_r4000_tlb_load_handler();
build_r4000_tlb_store_handler();
build_r4000_tlb_modify_handler();
+ if (!cpu_has_local_ebase)
+ build_r4000_tlb_refill_handler();
run_once++;
}
- build_r4000_tlb_refill_handler();
+ if (cpu_has_local_ebase)
+ build_r4000_tlb_refill_handler();
}
}
@@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
local_flush_icache_range((unsigned long)handle_tlbm,
(unsigned long)handle_tlbm + sizeof(handle_tlbm));
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
- local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
- (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+ local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+ (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
#endif
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 000000000000..162ee6d62788
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA _UASM_ISA_MICROMIPS
+#include <asm/uasm.h>
+
+#define RS_MASK 0x1f
+#define RS_SH 16
+#define RT_MASK 0x1f
+#define RT_SH 21
+#define SCIMM_MASK 0x3ff
+#define SCIMM_SH 16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RT_SH \
+ | (c) << RS_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifndef CONFIG_CPU_MICROMIPS
+#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table_MM[] __uasminitdata = {
+ { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
+ { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
+ { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+ { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beql, 0, 0 },
+ { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
+ { insn_bgezl, 0, 0 },
+ { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
+ { insn_bltzl, 0, 0 },
+ { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
+ { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+ { insn_daddu, 0, 0 },
+ { insn_daddiu, 0, 0 },
+ { insn_dmfc0, 0, 0 },
+ { insn_dmtc0, 0, 0 },
+ { insn_dsll, 0, 0 },
+ { insn_dsll32, 0, 0 },
+ { insn_dsra, 0, 0 },
+ { insn_dsrl, 0, 0 },
+ { insn_dsrl32, 0, 0 },
+ { insn_drotr, 0, 0 },
+ { insn_drotr32, 0, 0 },
+ { insn_dsubu, 0, 0 },
+ { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
+ { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
+ { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
+ { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+ { insn_ld, 0, 0 },
+ { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+ { insn_lld, 0, 0 },
+ { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+ { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
+ { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+ { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
+ { insn_rfe, 0, 0 },
+ { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
+ { insn_scd, 0, 0 },
+ { insn_sd, 0, 0 },
+ { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
+ { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
+ { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
+ { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
+ { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
+ { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
+ { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
+ { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
+ { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
+ { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
+ { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+ { insn_dins, 0, 0 },
+ { insn_dinsm, 0, 0 },
+ { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+ { insn_bbit0, 0, 0 },
+ { insn_bbit1, 0, 0 },
+ { insn_lwx, 0, 0 },
+ { insn_ldx, 0, 0 },
+ { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+ WARN(arg > 0xffff || arg < -0x10000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+
+ WARN(arg & ~((JIMM_MASK << 2) | 1),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+ struct insn *ip = NULL;
+ unsigned int i;
+ va_list ap;
+ u32 op;
+
+ for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
+ if (insn_table_MM[i].opcode == opc) {
+ ip = &insn_table_MM[i];
+ break;
+ }
+
+ if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS) {
+ if (opc == insn_mfc0 || opc == insn_mtc0)
+ op |= build_rt(va_arg(ap, u32));
+ else
+ op |= build_rs(va_arg(ap, u32));
+ }
+ if (ip->fields & RT) {
+ if (opc == insn_mfc0 || opc == insn_mtc0)
+ op |= build_rs(va_arg(ap, u32));
+ else
+ op |= build_rt(va_arg(ap, u32));
+ }
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
+ va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ **buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+ **buf = op;
+#endif
+ (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 000000000000..5fcdd8fe3e83
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA _UASM_ISA_CLASSIC
+#include <asm/uasm.h>
+
+#define RS_MASK 0x1f
+#define RS_SH 21
+#define RT_MASK 0x1f
+#define RT_SH 16
+#define SCIMM_MASK 0xfffff
+#define SCIMM_SH 6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table[] __uasminitdata = {
+ { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+ { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+ { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+ { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+ { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+ { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+ { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+ { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+ { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+ { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+ { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+ { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+ { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+ { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
+ { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
+ { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+ { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+ { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
+ { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
+ { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
+ { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
+ { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+ { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
+ { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
+ { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
+ { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
+ { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
+ { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+ WARN(arg > 0x1ffff || arg < -0x20000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+ WARN(arg & ~(JIMM_MASK << 2),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+ struct insn *ip = NULL;
+ unsigned int i;
+ va_list ap;
+ u32 op;
+
+ for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+ if (insn_table[i].opcode == opc) {
+ ip = &insn_table[i];
+ break;
+ }
+
+ if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS)
+ op |= build_rs(va_arg(ap, u32));
+ if (ip->fields & RT)
+ op |= build_rt(va_arg(ap, u32));
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
+ va_end(ap);
+
+ **buf = op;
+ (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 942ff6c2eba2..7eb5e4355d25 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -10,17 +10,9 @@
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
enum fields {
RS = 0x001,
RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
#define OP_MASK 0x3f
#define OP_SH 26
-#define RS_MASK 0x1f
-#define RS_SH 21
-#define RT_MASK 0x1f
-#define RT_SH 16
#define RD_MASK 0x1f
#define RD_SH 11
#define RE_MASK 0x1f
@@ -53,8 +41,6 @@ enum fields {
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
-#define SCIMM_MASK 0xfffff
-#define SCIMM_SH 6
enum opcode {
insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
enum fields fields;
};
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f) \
- ((a) << OP_SH \
- | (b) << RS_SH \
- | (c) << RT_SH \
- | (d) << RD_SH \
- | (e) << RE_SH \
- | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
- { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
- { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
- { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
- { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
- { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
- { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
- { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
- { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
- { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
- { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
- { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
- { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
- { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
- { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
- { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
- { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
- { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
- { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
- { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
- { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
- { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
- { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
- { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
- { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
- { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
- { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
- { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
- { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
- { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
- { insn_invalid, 0, 0 }
-};
-
-#undef M
-
static inline __uasminit u32 build_rs(u32 arg)
{
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
return arg & IMM_MASK;
}
-static inline __uasminit u32 build_bimm(s32 arg)
-{
- WARN(arg > 0x1ffff || arg < -0x20000,
- KERN_WARNING "Micro-assembler field overflow\n");
-
- WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
- return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
- WARN(arg & ~(JIMM_MASK << 2),
- KERN_WARNING "Micro-assembler field overflow\n");
-
- return (arg >> 2) & JIMM_MASK;
-}
-
static inline __uasminit u32 build_scimm(u32 arg)
{
WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
return arg & SET_MASK;
}
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
- struct insn *ip = NULL;
- unsigned int i;
- va_list ap;
- u32 op;
-
- for (i = 0; insn_table[i].opcode != insn_invalid; i++)
- if (insn_table[i].opcode == opc) {
- ip = &insn_table[i];
- break;
- }
-
- if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
- panic("Unsupported Micro-assembler instruction %d", opc);
-
- op = ip->match;
- va_start(ap, opc);
- if (ip->fields & RS)
- op |= build_rs(va_arg(ap, u32));
- if (ip->fields & RT)
- op |= build_rt(va_arg(ap, u32));
- if (ip->fields & RD)
- op |= build_rd(va_arg(ap, u32));
- if (ip->fields & RE)
- op |= build_re(va_arg(ap, u32));
- if (ip->fields & SIMM)
- op |= build_simm(va_arg(ap, s32));
- if (ip->fields & UIMM)
- op |= build_uimm(va_arg(ap, u32));
- if (ip->fields & BIMM)
- op |= build_bimm(va_arg(ap, s32));
- if (ip->fields & JIMM)
- op |= build_jimm(va_arg(ap, u32));
- if (ip->fields & FUNC)
- op |= build_func(va_arg(ap, u32));
- if (ip->fields & SET)
- op |= build_set(va_arg(ap, u32));
- if (ip->fields & SCIMM)
- op |= build_scimm(va_arg(ap, u32));
- va_end(ap);
-
- **buf = op;
- (*buf)++;
-}
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
#define I_u1u2u3(op) \
Ip_u1u2u3(op) \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
else
build_insn(buf, insn_pref, c, a, b);
}
-UASM_EXPORT_SYMBOL(uasm_i_pref);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
-UASM_EXPORT_SYMBOL(uasm_build_label);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
-int __uasminit uasm_in_compat_space_p(long addr)
+int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
return 1;
#endif
}
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
static int __uasminit uasm_rel_highest(long val)
{
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
#endif
}
-int __uasminit uasm_rel_hi(long val)
+int __uasminit ISAFUNC(uasm_rel_hi)(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
-int __uasminit uasm_rel_lo(long val)
+int __uasminit ISAFUNC(uasm_rel_lo)(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
{
- if (!uasm_in_compat_space_p(addr)) {
- uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+ if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
+ ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
- uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
- if (uasm_rel_hi(addr)) {
- uasm_i_dsll(buf, rs, rs, 16);
- uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
- uasm_i_dsll(buf, rs, rs, 16);
+ ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
+ if (ISAFUNC(uasm_rel_hi(addr))) {
+ ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+ ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+ ISAFUNC(uasm_rel_hi)(addr));
+ ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
} else
- uasm_i_dsll32(buf, rs, rs, 0);
+ ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
} else
- uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+ ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
}
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
{
- UASM_i_LA_mostly(buf, rs, addr);
- if (uasm_rel_lo(addr)) {
- if (!uasm_in_compat_space_p(addr))
- uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+ ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
+ if (ISAFUNC(uasm_rel_lo(addr))) {
+ if (!ISAFUNC(uasm_in_compat_space_p)(addr))
+ ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+ ISAFUNC(uasm_rel_lo(addr)));
else
- uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+ ISAFUNC(uasm_i_addiu)(buf, rs, rs,
+ ISAFUNC(uasm_rel_lo(addr)));
}
}
-UASM_EXPORT_SYMBOL(UASM_i_LA);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
/* Handle relocations. */
void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
- long laddr = (long)lab->addr;
- long raddr = (long)rel->addr;
-
- switch (rel->type) {
- case R_MIPS_PC16:
- *rel->addr |= build_bimm(laddr - (raddr + 4));
- break;
-
- default:
- panic("Unsupported Micro-assembler relocation %d",
- rel->type);
- }
-}
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
{
struct uasm_label *l;
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
-UASM_EXPORT_SYMBOL(uasm_move_labels);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
- uasm_move_relocs(rel, first, end, off);
- uasm_move_labels(lab, first, end, off);
+ ISAFUNC(uasm_move_relocs(rel, first, end, off));
+ ISAFUNC(uasm_move_labels(lab, first, end, off));
}
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
return 0;
}
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
/* Convenience functions for labeled branches. */
void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bltz(p, reg, 0);
+ ISAFUNC(uasm_i_bltz)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_b(p, 0);
+ ISAFUNC(uasm_i_b)(p, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_b);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_beqz(p, reg, 0);
+ ISAFUNC(uasm_i_beqz)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_beqzl(p, reg, 0);
+ ISAFUNC(uasm_i_beqzl)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bne(p, reg1, reg2, 0);
+ ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bne);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bnez(p, reg, 0);
+ ISAFUNC(uasm_i_bnez)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bgezl(p, reg, 0);
+ ISAFUNC(uasm_i_bgezl)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bgez(p, reg, 0);
+ ISAFUNC(uasm_i_bgez)(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bbit0(p, reg, bit, 0);
+ ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- uasm_i_bbit1(p, reg, bit, 0);
+ ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 6079ef33b5f0..0388fc8b5613 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -5,9 +5,8 @@
# Copyright (C) 2008 Wind River Systems, Inc.
# written by Ralf Baechle <ralf@linux-mips.org>
#
-obj-y := malta-amon.o malta-cmdline.o \
- malta-display.o malta-init.o malta-int.o \
- malta-memory.o malta-platform.o \
+obj-y := malta-amon.o malta-display.o malta-init.o \
+ malta-int.o malta-memory.o malta-platform.o \
malta-reset.o malta-setup.o malta-time.o
obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
index 5b548b5a4fcf..2cc72c9b38e3 100644
--- a/arch/mips/mti-malta/Platform
+++ b/arch/mips/mti-malta/Platform
@@ -3,5 +3,9 @@
#
platform-$(CONFIG_MIPS_MALTA) += mti-malta/
cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
-load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
+ifdef CONFIG_KVM_GUEST
+ load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000
+else
+ load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
+endif
all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644
index 5576a306a145..000000000000
--- a/arch/mips/mti-malta/malta-cmdline.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
- return &(arcs_cmdline[0]);
-}
-
-
-void __init prom_init_cmdline(void)
-{
- char *cp;
- int actr;
-
- actr = 1; /* Always ignore argv[0] */
-
- cp = &(arcs_cmdline[0]);
- while(actr < prom_argc) {
- strcpy(cp, prom_argv(actr));
- cp += strlen(prom_argv(actr));
- *cp++ = ' ';
- actr++;
- }
- if (cp != &(arcs_cmdline[0])) {
- /* get rid of trailing space */
- --cp;
- *cp = '\0';
- }
-}
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 9bc58a24e80a..d4f807191ecd 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -1,28 +1,20 @@
/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
* Display routines for display messages in MIPS boards ascii display.
+ *
+ * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
-
#include <linux/compiler.h>
#include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
extern const char display_string[];
static unsigned int display_count;
@@ -36,11 +28,11 @@ void mips_display_message(const char *str)
if (unlikely(display == NULL))
display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
- for (i = 0; i <= 14; i=i+2) {
- if (*str)
- __raw_writel(*str++, display + i);
- else
- __raw_writel(' ', display + i);
+ for (i = 0; i <= 14; i += 2) {
+ if (*str)
+ __raw_writel(*str++, display + i);
+ else
+ __raw_writel(' ', display + i);
}
}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index c2cbce9e435e..ff8caffd3266 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -1,54 +1,28 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
- * All rights reserved.
- * Authors: Carsten Langgaard <carstenl@mips.com>
- * Maciej W. Rozycki <macro@mips.com>
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
* PROM library initialisation code.
+ *
+ * Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <asm/bootinfo.h>
-#include <asm/gt64120.h>
-#include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/smp-ops.h>
#include <asm/traps.h>
-
+#include <asm/fw/fw.h>
#include <asm/gcmpregs.h>
-#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/bonito64.h>
-#include <asm/mips-boards/msc01_pci.h>
-
#include <asm/mips-boards/malta.h>
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-int init_debug;
-
static int mips_revision_corid;
int mips_revision_sconid;
@@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120;
/* MIPS System controller register base */
unsigned long _pcictrl_msc;
-char *prom_getenv(char *envname)
-{
- /*
- * Return a pointer to the given environment variable.
- * In 64-bit mode: we're using 64-bit pointers, but all pointers
- * in the PROM structures are only 32-bit, so we need some
- * workarounds, if we are running in 64-bit mode.
- */
- int i, index=0;
-
- i = strlen(envname);
-
- while (prom_envp(index)) {
- if(strncmp(envname, prom_envp(index), i) == 0) {
- return(prom_envp(index+1));
- }
- index += 2;
- }
-
- return NULL;
-}
-
-static inline unsigned char str2hexnum(unsigned char c)
-{
- if (c >= '0' && c <= '9')
- return c - '0';
- if (c >= 'a' && c <= 'f')
- return c - 'a' + 10;
- return 0; /* foo */
-}
-
-static inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
- int i;
-
- for (i = 0; i < 6; i++) {
- unsigned char num;
-
- if((*str == '.') || (*str == ':'))
- str++;
- num = str2hexnum(*str++) << 4;
- num |= (str2hexnum(*str++));
- ea[i] = num;
- }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
- char *ethaddr_str;
-
- ethaddr_str = prom_getenv("ethaddr");
- if (!ethaddr_str) {
- printk("ethaddr not set in boot prom\n");
- return -1;
- }
- str2eaddr(ethernet_addr, ethaddr_str);
-
- if (init_debug > 1) {
- int i;
- printk("get_ethernet_addr: ");
- for (i=0; i<5; i++)
- printk("%02x:", (unsigned char)*(ethernet_addr+i));
- printk("%02x\n", *(ethernet_addr+i));
- }
-
- return 0;
-}
-
#ifdef CONFIG_SERIAL_8250_CONSOLE
static void __init console_config(void)
{
@@ -138,17 +44,23 @@ static void __init console_config(void)
char parity = '\0', bits = '\0', flow = '\0';
char *s;
- if ((strstr(prom_getcmdline(), "console=")) == NULL) {
- s = prom_getenv("modetty0");
+ if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+ s = fw_getenv("modetty0");
if (s) {
while (*s >= '0' && *s <= '9')
baud = baud*10 + *s++ - '0';
- if (*s == ',') s++;
- if (*s) parity = *s++;
- if (*s == ',') s++;
- if (*s) bits = *s++;
- if (*s == ',') s++;
- if (*s == 'h') flow = 'r';
+ if (*s == ',')
+ s++;
+ if (*s)
+ parity = *s++;
+ if (*s == ',')
+ s++;
+ if (*s)
+ bits = *s++;
+ if (*s == ',')
+ s++;
+ if (*s == 'h')
+ flow = 'r';
}
if (baud == 0)
baud = 38400;
@@ -158,8 +70,9 @@ static void __init console_config(void)
bits = '8';
if (flow == '\0')
flow = 'r';
- sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
- strcat(prom_getcmdline(), console_string);
+ sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+ parity, bits, flow);
+ strcat(fw_getcmdline(), console_string);
pr_info("Config serial console:%s\n", console_string);
}
}
@@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void)
{
- prom_argc = fw_arg0;
- _prom_argv = (int *) fw_arg1;
- _prom_envp = (int *) fw_arg2;
-
mips_display_message("LINUX");
/*
@@ -306,7 +215,7 @@ void __init prom_init(void)
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
_pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
- mips_pci_controller:
+mips_pci_controller:
mb();
MSC_READ(MSC01_PCI_CFG, data);
MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@ void __init prom_init(void)
default:
/* Unknown system controller */
mips_display_message("SC Error");
- while (1); /* We die here... */
+ while (1); /* We die here... */
}
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
- prom_init_cmdline();
- prom_meminit();
+ fw_init_cmdline();
+ fw_meminit();
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index e364af70e6cf..0a1339ac3ec8 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -47,7 +47,6 @@
#include <asm/setup.h>
int gcmp_present = -1;
-int gic_present;
static unsigned long _msc01_biu_base;
static unsigned long _gcmp_base;
static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void)
{
int irq;
+ if (gic_compare_int())
+ do_IRQ(MIPS_GIC_IRQ_BASE);
+
irq = gic_get_int();
if (irq < 0)
return; /* interrupt has already been cleared */
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index f3d43aa023a9..1f73d63e92a7 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -1,73 +1,45 @@
/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
* PROM library functions for acquiring/using memory descriptors given to
* us from the YAMON.
+ *
+ * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/bootmem.h>
-#include <linux/pfn.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
-#include <asm/page.h>
#include <asm/sections.h>
+#include <asm/fw/fw.h>
-#include <asm/mips-boards/prom.h>
-
-/*#define DEBUG*/
-
-enum yamon_memtypes {
- yamon_dontuse,
- yamon_prom,
- yamon_free,
-};
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-#ifdef DEBUG
-static char *mtypes[3] = {
- "Dont use memory",
- "YAMON PROM memory",
- "Free memory",
-};
-#endif
+static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
/* determined physical memory size, not overridden by command line args */
unsigned long physical_memsize = 0L;
-static struct prom_pmemblock * __init prom_getmdesc(void)
+fw_memblock_t * __init fw_getmdesc(void)
{
- char *memsize_str;
+ char *memsize_str, *ptr;
unsigned int memsize;
- char *ptr;
static char cmdline[COMMAND_LINE_SIZE] __initdata;
+ long val;
+ int tmp;
/* otherwise look in the environment */
- memsize_str = prom_getenv("memsize");
+ memsize_str = fw_getenv("memsize");
if (!memsize_str) {
- printk(KERN_WARNING
- "memsize not set in boot prom, set to default (32Mb)\n");
+ pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
physical_memsize = 0x02000000;
} else {
-#ifdef DEBUG
- pr_debug("prom_memsize = %s\n", memsize_str);
-#endif
- physical_memsize = simple_strtol(memsize_str, NULL, 0);
+ tmp = kstrtol(memsize_str, 0, &val);
+ physical_memsize = (unsigned long)val;
}
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
memset(mdesc, 0, sizeof(mdesc));
- mdesc[0].type = yamon_dontuse;
+ mdesc[0].type = fw_dontuse;
mdesc[0].base = 0x00000000;
mdesc[0].size = 0x00001000;
- mdesc[1].type = yamon_prom;
+ mdesc[1].type = fw_code;
mdesc[1].base = 0x00001000;
mdesc[1].size = 0x000ef000;
@@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
* This mean that this area can't be used as DMA memory for PCI
* devices.
*/
- mdesc[2].type = yamon_dontuse;
+ mdesc[2].type = fw_dontuse;
mdesc[2].base = 0x000f0000;
mdesc[2].size = 0x00010000;
- mdesc[3].type = yamon_dontuse;
+ mdesc[3].type = fw_dontuse;
mdesc[3].base = 0x00100000;
- mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;
+ mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
+ mdesc[3].base;
- mdesc[4].type = yamon_free;
+ mdesc[4].type = fw_free;
mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
mdesc[4].size = memsize - mdesc[4].base;
return &mdesc[0];
}
-static int __init prom_memtype_classify(unsigned int type)
+static int __init fw_memtype_classify(unsigned int type)
{
switch (type) {
- case yamon_free:
+ case fw_free:
return BOOT_MEM_RAM;
- case yamon_prom:
+ case fw_code:
return BOOT_MEM_ROM_DATA;
default:
return BOOT_MEM_RESERVED;
}
}
-void __init prom_meminit(void)
+void __init fw_meminit(void)
{
- struct prom_pmemblock *p;
+ fw_memblock_t *p;
-#ifdef DEBUG
- pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
- p = prom_getmdesc();
- while (p->size) {
- int i = 0;
- pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
- i, p, p->base, p->size, mtypes[p->type]);
- p++;
- i++;
- }
-#endif
- p = prom_getmdesc();
+ p = fw_getmdesc();
while (p->size) {
long type;
unsigned long base, size;
- type = prom_memtype_classify(p->type);
+ type = fw_memtype_classify(p->type);
base = p->base;
size = p->size;
@@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void)
continue;
addr = boot_mem_map.map[i].addr;
- free_init_pages("prom memory",
+ free_init_pages("YAMON memory",
addr, addr + boot_mem_map.map[i].size);
}
}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 200f64df2c9b..c72a06936781 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -25,13 +25,13 @@
#include <linux/screen_info.h>
#include <linux/time.h>
-#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/dma.h>
#include <asm/traps.h>
+#include <asm/gcmpregs.h>
#ifdef CONFIG_VT
#include <linux/console.h>
#endif
@@ -105,6 +105,66 @@ static void __init fd_activate(void)
}
#endif
+static int __init plat_enable_iocoherency(void)
+{
+ int supported = 0;
+ if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+ if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+ BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+ pr_info("Enabled Bonito CPU coherency\n");
+ supported = 1;
+ }
+ if (strstr(fw_getcmdline(), "iobcuncached")) {
+ BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
+ BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+ ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+ BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+ pr_info("Disabled Bonito IOBC coherency\n");
+ } else {
+ BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
+ BONITO_PCIMEMBASECFG |=
+ (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+ BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+ pr_info("Enabled Bonito IOBC coherency\n");
+ }
+ } else if (gcmp_niocu() != 0) {
+ /* Nothing special needs to be done to enable coherency */
+ pr_info("CMP IOCU detected\n");
+ if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+ pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+ return 0;
+ }
+ supported = 1;
+ }
+ hw_coherentio = supported;
+ return supported;
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+#ifdef CONFIG_DMA_NONCOHERENT
+ /*
+ * Kernel has been configured with software coherency
+ * but we might choose to turn it off and use hardware
+ * coherency instead.
+ */
+ if (plat_enable_iocoherency()) {
+ if (coherentio == 0)
+ pr_info("Hardware DMA cache coherency disabled\n");
+ else
+ pr_info("Hardware DMA cache coherency enabled\n");
+ } else {
+ if (coherentio == 1)
+ pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+ else
+ pr_info("Software DMA cache coherency enabled\n");
+ }
+#else
+ if (!plat_enable_iocoherency())
+ panic("Hardware DMA cache coherency not supported!");
+#endif
+}
+
#ifdef CONFIG_BLK_DEV_IDE
static void __init pci_clock_check(void)
{
@@ -115,16 +175,15 @@ static void __init pci_clock_check(void)
33, 20, 25, 30, 12, 16, 37, 10
};
int pciclock = pciclocks[jmpr];
- char *argptr = prom_getcmdline();
+ char *argptr = fw_getcmdline();
if (pciclock != 33 && !strstr(argptr, "idebus=")) {
- printk(KERN_WARNING "WARNING: PCI clock is %dMHz, "
- "setting idebus\n", pciclock);
+ pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
+ pciclock);
argptr += strlen(argptr);
sprintf(argptr, " idebus=%d", pciclock);
if (pciclock < 20 || pciclock > 66)
- printk(KERN_WARNING "WARNING: IDE timing "
- "calculations will be incorrect\n");
+ pr_warn("WARNING: IDE timing calculations will be incorrect\n");
}
}
#endif
@@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void)
{
char *argptr;
- argptr = prom_getcmdline();
+ argptr = fw_getcmdline();
if (strstr(argptr, "debug")) {
BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
- printk(KERN_INFO "Enabled Bonito debug mode\n");
+ pr_info("Enabled Bonito debug mode\n");
} else
BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
#ifdef CONFIG_DMA_COHERENT
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
- printk(KERN_INFO "Enabled Bonito CPU coherency\n");
+ pr_info("Enabled Bonito CPU coherency\n");
- argptr = prom_getcmdline();
+ argptr = fw_getcmdline();
if (strstr(argptr, "iobcuncached")) {
BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- printk(KERN_INFO "Disabled Bonito IOBC coherency\n");
+ pr_info("Disabled Bonito IOBC coherency\n");
} else {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
BONITO_PCIMEMBASECFG |=
(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- printk(KERN_INFO "Enabled Bonito IOBC coherency\n");
+ pr_info("Enabled Bonito IOBC coherency\n");
}
} else
panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@ void __init plat_mem_setup(void)
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
bonito_quirks_setup();
+ plat_setup_iocoherency();
+
#ifdef CONFIG_BLK_DEV_IDE
pci_clock_check();
#endif
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index a144b89cf9ba..0ad305f75802 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -39,12 +39,9 @@
#include <asm/gic.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
#include <asm/mips-boards/maltaint.h>
unsigned long cpu_khz;
-int gic_frequency;
static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@ static void __init estimate_frequencies(void)
{
unsigned long flags;
unsigned int count, start;
+#ifdef CONFIG_IRQ_GIC
unsigned int giccount = 0, gicstart = 0;
+#endif
+
+#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
+ unsigned int prid = read_c0_prid() & 0xffff00;
+
+ /*
+ * XXXKYMA: hardwire the CPU frequency to Host Freq/4
+ */
+ count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
+ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+ (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+ count *= 2;
+
+ mips_hpt_frequency = count;
+ return;
+#endif
local_irq_save(flags);
@@ -84,26 +98,32 @@ static void __init estimate_frequencies(void)
/* Initialize counters. */
start = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
if (gic_present)
GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
+#endif
/* Read counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
count = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
if (gic_present)
GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
+#endif
local_irq_restore(flags);
count -= start;
- if (gic_present)
- giccount -= gicstart;
-
mips_hpt_frequency = count;
- if (gic_present)
+
+#ifdef CONFIG_IRQ_GIC
+ if (gic_present) {
+ giccount -= gicstart;
gic_frequency = giccount;
+ }
+#endif
}
void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@ void __init plat_time_init(void)
(prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
freq *= 2;
freq = freqround(freq, 5000);
- pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+ printk("CPU frequency %d.%02d MHz\n", freq/1000000,
(freq%1000000)*100/1000000);
cpu_khz = freq / 1000;
- if (gic_present) {
- freq = freqround(gic_frequency, 5000);
- pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
- (freq%1000000)*100/1000000);
- gic_clocksource_init(gic_frequency);
- } else
- init_r4k_clocksource();
+ mips_scroll_message();
#ifdef CONFIG_I8253
/* Only Malta has a PIT. */
setup_pit_timer();
#endif
- mips_scroll_message();
+#ifdef CONFIG_IRQ_GIC
+ if (gic_present) {
+ freq = freqround(gic_frequency, 5000);
+ printk("GIC frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+#ifdef CONFIG_CSRC_GIC
+ gic_clocksource_init(gic_frequency);
+#endif
+ }
+#endif
plat_perf_setup();
}
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 10ec701ce6c7..be114209217c 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -8,10 +8,10 @@
# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved.
# Steven J. Hill <sjhill@mips.com>
#
-obj-y := sead3-lcd.o sead3-cmdline.o \
- sead3-display.o sead3-init.o sead3-int.o \
- sead3-mtd.o sead3-net.o sead3-platform.o \
- sead3-reset.o sead3-setup.o sead3-time.o
+obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
+ sead3-int.o sead3-mtd.o sead3-net.o \
+ sead3-platform.o sead3-reset.o \
+ sead3-setup.o sead3-time.o
obj-y += sead3-i2c-dev.o sead3-i2c.o \
sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index 322148c353ed..0a168c948b01 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
static struct led_classdev sead3_pled = {
.name = "sead3::pled",
.brightness_set = sead3_pled_set,
+ .flags = LED_CORE_SUSPENDRESUME,
};
static struct led_classdev sead3_fled = {
.name = "sead3::fled",
.brightness_set = sead3_fled_set,
+ .flags = LED_CORE_SUSPENDRESUME,
};
-#ifdef CONFIG_PM
-static int sead3_led_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- led_classdev_suspend(&sead3_pled);
- led_classdev_suspend(&sead3_fled);
- return 0;
-}
-
-static int sead3_led_resume(struct platform_device *dev)
-{
- led_classdev_resume(&sead3_pled);
- led_classdev_resume(&sead3_fled);
- return 0;
-}
-#else
-#define sead3_led_suspend NULL
-#define sead3_led_resume NULL
-#endif
-
static int sead3_led_probe(struct platform_device *pdev)
{
int ret;
@@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev)
static struct platform_driver sead3_led_driver = {
.probe = sead3_led_probe,
.remove = sead3_led_remove,
- .suspend = sead3_led_suspend,
- .resume = sead3_led_resume,
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644
index a2e6cec67f57..000000000000
--- a/arch/mips/mti-sead3/sead3-cmdline.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
- return &(arcs_cmdline[0]);
-}
-
-void __init prom_init_cmdline(void)
-{
- char *cp;
- int actr;
-
- actr = 1; /* Always ignore argv[0] */
-
- cp = &(arcs_cmdline[0]);
- while (actr < prom_argc) {
- strcpy(cp, prom_argv(actr));
- cp += strlen(prom_argv(actr));
- *cp++ = ' ';
- actr++;
- }
- if (cp != &(arcs_cmdline[0])) {
- /* get rid of trailing space */
- --cp;
- *cp = '\0';
- }
-}
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
index 2ddef19a9adc..031f47d69770 100644
--- a/arch/mips/mti-sead3/sead3-console.c
+++ b/arch/mips/mti-sead3/sead3-console.c
@@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr)
__raw_writel(value, PORT(base_addr, offset));
}
-void __init prom_init_early_console(char port)
+void __init fw_init_early_console(char port)
{
console_port = port;
}
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
index e389326cfa42..94875991907b 100644
--- a/arch/mips/mti-sead3/sead3-display.c
+++ b/arch/mips/mti-sead3/sead3-display.c
@@ -8,7 +8,6 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
static unsigned int display_count;
static unsigned int max_display_count;
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index f95abaa1aa5d..bfbd17b120a2 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -12,38 +12,51 @@
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
-extern void prom_init_early_console(char port);
+#include <asm/fw/fw.h>
extern char except_vec_nmi;
extern char except_vec_ejtag_debug;
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void __init console_config(void)
{
- /*
- * Return a pointer to the given environment variable.
- * In 64-bit mode: we're using 64-bit pointers, but all pointers
- * in the PROM structures are only 32-bit, so we need some
- * workarounds, if we are running in 64-bit mode.
- */
- int i, index = 0;
-
- i = strlen(envname);
-
- while (prom_envp(index)) {
- if (strncmp(envname, prom_envp(index), i) == 0)
- return prom_envp(index+1);
- index += 2;
+ char console_string[40];
+ int baud = 0;
+ char parity = '\0', bits = '\0', flow = '\0';
+ char *s;
+
+ if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+ s = fw_getenv("modetty0");
+ if (s) {
+ while (*s >= '0' && *s <= '9')
+ baud = baud*10 + *s++ - '0';
+ if (*s == ',')
+ s++;
+ if (*s)
+ parity = *s++;
+ if (*s == ',')
+ s++;
+ if (*s)
+ bits = *s++;
+ if (*s == ',')
+ s++;
+ if (*s == 'h')
+ flow = 'r';
+ }
+ if (baud == 0)
+ baud = 38400;
+ if (parity != 'n' && parity != 'o' && parity != 'e')
+ parity = 'n';
+ if (bits != '7' && bits != '8')
+ bits = '8';
+ if (flow == '\0')
+ flow = 'r';
+ sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+ parity, bits, flow);
+ strcat(fw_getcmdline(), console_string);
}
-
- return NULL;
}
+#endif
static void __init mips_nmi_setup(void)
{
@@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void)
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) :
(void *)(CAC_BASE + 0x380);
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Decrement the exception vector address by one for microMIPS.
+ */
+ memcpy(base, (&except_vec_nmi - 1), 0x80);
+
+ /*
+ * This is a hack. We do not know if the boot loader was built with
+ * microMIPS instructions or not. If it was not, the NMI exception
+ * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
+ * assembly below forces us into microMIPS mode if we are a pure
+ * microMIPS kernel. The assembly instructions are:
+ *
+ * 3C1A8000 lui k0,0x8000
+ * 375A0381 ori k0,k0,0x381
+ * 03400008 jr k0
+ * 00000000 nop
+ *
+ * The mode switch occurs by jumping to the unaligned exception
+ * vector address at 0x80000381 which would have been 0x80000380
+ * in MIPS32 mode. The jump to the unaligned address transitions
+ * us into microMIPS mode.
+ */
+ if (!cpu_has_veic) {
+ void *base2 = (void *)(CAC_BASE + 0xa80);
+ *((unsigned int *)base2) = 0x3c1a8000;
+ *((unsigned int *)base2 + 1) = 0x375a0381;
+ *((unsigned int *)base2 + 2) = 0x03400008;
+ *((unsigned int *)base2 + 3) = 0x00000000;
+ flush_icache_range((unsigned long)base2,
+ (unsigned long)base2 + 0x10);
+ }
+#else
memcpy(base, &except_vec_nmi, 0x80);
+#endif
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
@@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void)
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa00) :
(void *)(CAC_BASE + 0x300);
+#ifdef CONFIG_CPU_MICROMIPS
+ /* Deja vu... */
+ memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
+ if (!cpu_has_veic) {
+ void *base2 = (void *)(CAC_BASE + 0xa00);
+ *((unsigned int *)base2) = 0x3c1a8000;
+ *((unsigned int *)base2 + 1) = 0x375a0301;
+ *((unsigned int *)base2 + 2) = 0x03400008;
+ *((unsigned int *)base2 + 3) = 0x00000000;
+ flush_icache_range((unsigned long)base2,
+ (unsigned long)base2 + 0x10);
+ }
+#else
memcpy(base, &except_vec_ejtag_debug, 0x80);
+#endif
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
void __init prom_init(void)
{
- prom_argc = fw_arg0;
- _prom_argv = (int *) fw_arg1;
- _prom_envp = (int *) fw_arg2;
-
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
- prom_init_cmdline();
+ fw_init_cmdline();
#ifdef CONFIG_EARLY_PRINTK
- if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
- prom_init_early_console(0);
- else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL)
- prom_init_early_console(1);
+ if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
+ fw_init_early_console(0);
+ else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
+ fw_init_early_console(1);
#endif
#ifdef CONFIG_SERIAL_8250_CONSOLE
- if ((strstr(prom_getcmdline(), "console=")) == NULL)
- strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
+ if ((strstr(fw_getcmdline(), "console=")) == NULL)
+ strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
+ console_config();
#endif
}
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
index e26e08274fc5..6a560ac03def 100644
--- a/arch/mips/mti-sead3/sead3-int.c
+++ b/arch/mips/mti-sead3/sead3-int.c
@@ -20,7 +20,6 @@
#define SEAD_CONFIG_BASE 0x1b100110
#define SEAD_CONFIG_SIZE 4
-int gic_present;
static unsigned long sead3_config_reg;
/*
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index f012fd164cee..b5059dc899f4 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -11,10 +11,6 @@
#include <linux/bootmem.h>
#include <asm/mips-boards/generic.h>
-#include <asm/prom.h>
-
-int coherentio; /* 0 => no DMA cache coherency (may be set by user) */
-int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */
const char *get_system_type(void)
{
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 239e4e32757f..96b42eb9b5e2 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -11,7 +11,6 @@
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
unsigned long cpu_khz;
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 3c05bf9e280a..e0873a31ebaa 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD
if NLM_XLP_BOARD
config DT_XLP_EVP
- bool "Built-in device tree for XLP EVP/SVP boards"
+ bool "Built-in device tree for XLP EVP boards"
default y
help
- Add an FDT blob for XLP EVP and SVP boards into the kernel.
+ Add an FDT blob for XLP EVP boards into the kernel.
This DTB will be used if the firmware does not pass in a DTB
- pointer to the kernel. The corresponding DTS file is at
- arch/mips/netlogic/dts/xlp_evp.dts
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+ bool "Built-in device tree for XLP SVP boards"
+ default y
+ help
+ Add an FDT blob for XLP VP boards into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_svp.dts
config NLM_MULTINODE
bool "Support for multi-chip boards"
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 2bb95dcfe20a..ffba52489bef 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -148,8 +148,7 @@ void nlm_cpus_done(void)
int nlm_cpu_ready[NR_CPUS];
unsigned long nlm_next_gp;
unsigned long nlm_next_sp;
-
-cpumask_t phys_cpu_present_map;
+static cpumask_t phys_cpu_present_mask;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
{
@@ -169,11 +168,12 @@ void __init nlm_smp_setup(void)
{
unsigned int boot_cpu;
int num_cpus, i, ncore;
+ char buf[64];
boot_cpu = hard_smp_processor_id();
- cpumask_clear(&phys_cpu_present_map);
+ cpumask_clear(&phys_cpu_present_mask);
- cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
+ cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
set_cpu_possible(0, true);
@@ -185,7 +185,7 @@ void __init nlm_smp_setup(void)
* it is only set for ASPs (see smpboot.S)
*/
if (nlm_cpu_ready[i]) {
- cpumask_set_cpu(i, &phys_cpu_present_map);
+ cpumask_set_cpu(i, &phys_cpu_present_mask);
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@ void __init nlm_smp_setup(void)
}
}
+ cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
+ pr_info("Physical CPU mask: %s\n", buf);
+ cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
+ pr_info("Possible CPU mask: %s\n", buf);
+
/* check with the cores we have worken up */
for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
ncore += hweight32(nlm_get_node(i)->coremask);
- pr_info("Phys CPU present map: %lx, possible map %lx\n",
- (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
- (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
-
pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
nlm_threads_per_core, num_cpus);
+
+ /* switch NMI handler to boot CPUs */
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
}
diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile
index d117d46413aa..aecb6fa9a9c3 100644
--- a/arch/mips/netlogic/dts/Makefile
+++ b/arch/mips/netlogic/dts/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
+obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index 7628b5464fc7..e14f42308064 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -20,7 +20,7 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "simple-bus";
- ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
1 0 0 0x16000000 0x01000000>; // GBU chipselects
serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644
index 000000000000..8af4bdbe5d99
--- /dev/null
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -0,0 +1,124 @@
+/*
+ * XLP3XX Device Tree Source for SVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-SVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ memory {
+ device_type = "memory";
+ reg = <0 0x00100000 0 0x0FF00000 // 255M at 1M
+ 0 0x20000000 0 0xa0000000 // 2560M at 512M
+ 0 0xe0000000 0 0x40000000>;
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x01000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x30100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ serial1: serial@31000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x31100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <18>;
+ };
+ i2c0: ocores@32000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x32100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <30>;
+ };
+ i2c1: ocores@33000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x33100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <31>;
+
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ };
+
+ dtt@4c {
+ compatible = "national,lm90";
+ reg = <0x4c>;
+ };
+ };
+ pic: pic@4000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x4000 0x200>;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index c68fd4026104..87560e4db35f 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -61,43 +61,61 @@ void nlm_node_init(int node)
int nlm_irq_to_irt(int irq)
{
- if (!PIC_IRQ_IS_IRT(irq))
- return -1;
+ uint64_t pcibase;
+ int devoff, irt;
switch (irq) {
case PIC_UART_0_IRQ:
- return PIC_IRT_UART_0_INDEX;
+ devoff = XLP_IO_UART0_OFFSET(0);
+ break;
case PIC_UART_1_IRQ:
- return PIC_IRT_UART_1_INDEX;
- case PIC_PCIE_LINK_0_IRQ:
- return PIC_IRT_PCIE_LINK_0_INDEX;
- case PIC_PCIE_LINK_1_IRQ:
- return PIC_IRT_PCIE_LINK_1_INDEX;
- case PIC_PCIE_LINK_2_IRQ:
- return PIC_IRT_PCIE_LINK_2_INDEX;
- case PIC_PCIE_LINK_3_IRQ:
- return PIC_IRT_PCIE_LINK_3_INDEX;
+ devoff = XLP_IO_UART1_OFFSET(0);
+ break;
case PIC_EHCI_0_IRQ:
- return PIC_IRT_EHCI_0_INDEX;
+ devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+ break;
case PIC_EHCI_1_IRQ:
- return PIC_IRT_EHCI_1_INDEX;
+ devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+ break;
case PIC_OHCI_0_IRQ:
- return PIC_IRT_OHCI_0_INDEX;
+ devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+ break;
case PIC_OHCI_1_IRQ:
- return PIC_IRT_OHCI_1_INDEX;
+ devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+ break;
case PIC_OHCI_2_IRQ:
- return PIC_IRT_OHCI_2_INDEX;
+ devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+ break;
case PIC_OHCI_3_IRQ:
- return PIC_IRT_OHCI_3_INDEX;
+ devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+ break;
case PIC_MMC_IRQ:
- return PIC_IRT_MMC_INDEX;
+ devoff = XLP_IO_SD_OFFSET(0);
+ break;
case PIC_I2C_0_IRQ:
- return PIC_IRT_I2C_0_INDEX;
+ devoff = XLP_IO_I2C0_OFFSET(0);
+ break;
case PIC_I2C_1_IRQ:
- return PIC_IRT_I2C_1_INDEX;
+ devoff = XLP_IO_I2C1_OFFSET(0);
+ break;
default:
- return -1;
+ devoff = 0;
+ break;
}
+
+ if (devoff != 0) {
+ pcibase = nlm_pcicfg_base(devoff);
+ irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
+ /* HW bug, I2C 1 irt entry is off by one */
+ if (irq == PIC_I2C_1_IRQ)
+ irt = irt + 1;
+ } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
+ /* HW bug, PCI IRT entries are bad on early silicon, fix */
+ irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
+ } else {
+ irt = -1;
+ }
+ return irt;
}
unsigned int nlm_get_core_frequency(int node, int core)
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 4894d62043ac..af319143b591 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -56,7 +56,7 @@ uint64_t nlm_io_base;
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
unsigned int nlm_threads_per_core;
-extern u32 __dtb_start[];
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
static void nlm_linux_exit(void)
{
@@ -82,8 +82,24 @@ void __init plat_mem_setup(void)
* 64-bit, so convert pointer.
*/
fdtp = (void *)(long)fw_arg0;
- if (!fdtp)
- fdtp = __dtb_start;
+ if (!fdtp) {
+ switch (current_cpu_data.processor_id & 0xff00) {
+#ifdef CONFIG_DT_XLP_SVP
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ fdtp = __dtb_xlp_svp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ fdtp = __dtb_xlp_evp_begin;
+ break;
+#endif
+ default:
+ /* Pick a built-in if any, and hope for the best */
+ fdtp = __dtb_start;
+ break;
+ }
+ }
fdtp = phys_to_virt(__pa(fdtp));
early_init_devtree(fdtp);
}
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 1d0b66c62fd1..9c401dd78337 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -42,7 +42,30 @@
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/xlp.h>
-#include <asm/netlogic/xlp-hal/usb.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0 0x01
+#define USB_PHY_0 0x0A
+#define USB_PHY_RESET 0x01
+#define USB_PHY_PORT_RESET_0 0x10
+#define USB_PHY_PORT_RESET_1 0x20
+#define USB_CONTROLLER_RESET 0x01
+#define USB_INT_STATUS 0x0E
+#define USB_INT_EN 0x0F
+#define USB_PHY_INTERRUPT_EN 0x01
+#define USB_OHCI_INTERRUPT_EN 0x02
+#define USB_OHCI_INTERRUPT1_EN 0x04
+#define USB_OHCI_INTERRUPT2_EN 0x08
+#define USB_CTRL_INTERRUPT_EN 0x10
+
+#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst) \
+ nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst) \
+ (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
static void nlm_usb_intr_en(int node, int port)
{
@@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev)
dev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
switch (dev->devfn) {
case 0x10:
- dev->irq = PIC_EHCI_0_IRQ;
- break;
+ dev->irq = PIC_EHCI_0_IRQ;
+ break;
case 0x11:
- dev->irq = PIC_OHCI_0_IRQ;
- break;
+ dev->irq = PIC_OHCI_0_IRQ;
+ break;
case 0x12:
- dev->irq = PIC_OHCI_1_IRQ;
- break;
+ dev->irq = PIC_OHCI_1_IRQ;
+ break;
case 0x13:
- dev->irq = PIC_EHCI_1_IRQ;
- break;
+ dev->irq = PIC_EHCI_1_IRQ;
+ break;
case 0x14:
- dev->irq = PIC_OHCI_2_IRQ;
- break;
+ dev->irq = PIC_OHCI_2_IRQ;
+ break;
case 0x15:
- dev->irq = PIC_OHCI_3_IRQ;
- break;
+ dev->irq = PIC_OHCI_3_IRQ;
+ break;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1fd361462c03..e4b1140cdae0 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -41,7 +41,7 @@ static int (*save_perf_irq)(void);
* first hardware thread in the core for setup and init.
* Skip CPUs with non-zero hardware thread id (4 hwt per core)
*/
-#ifdef CONFIG_CPU_XLR
+#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
#else
#define oprofile_skip_cpu(c) 0
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 412ec025cf55..18517dd0f709 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!apc->cfg_base)
- return -ENOMEM;
+ apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(apc->cfg_base))
+ return PTR_ERR(apc->cfg_base);
apc->irq = platform_get_irq(pdev, 0);
if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 8a0700d448fe..65ec032fa0b4 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
- if (apc->ctrl_base == NULL)
- return -EBUSY;
+ apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(apc->ctrl_base))
+ return PTR_ERR(apc->ctrl_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
if (!res)
return -EINVAL;
- apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!apc->devcfg_base)
- return -EBUSY;
+ apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(apc->devcfg_base))
+ return PTR_ERR(apc->devcfg_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
if (!res)
return -EINVAL;
- apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
- if (apc->crp_base == NULL)
- return -EBUSY;
+ apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(apc->crp_base))
+ return PTR_ERR(apc->crp_base);
apc->irq = platform_get_irq(pdev, 0);
if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index 88e781c6b5ba..2eb954239bc5 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -121,11 +121,17 @@ void __iomem *pci_iospace_start;
static void __init bcm63xx_reset_pcie(void)
{
u32 val;
+ u32 reg;
/* enable SERDES */
- val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
+ if (BCMCPU_IS_6328())
+ reg = MISC_SERDES_CTRL_6328_REG;
+ else
+ reg = MISC_SERDES_CTRL_6362_REG;
+
+ val = bcm_misc_readl(reg);
val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
- bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
+ bcm_misc_writel(val, reg);
/* reset the PCIe core */
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void)
switch (bcm63xx_get_cpu_id()) {
case BCM6328_CPU_ID:
+ case BCM6362_CPU_ID:
return bcm63xx_register_pcie();
case BCM6348_CPU_ID:
case BCM6358_CPU_ID:
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index 5bd9d8f468cc..a01baff52cae 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -29,10 +29,11 @@
#include <asm/cacheflush.h>
#include <asm/traps.h>
-#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/generic.h>
#include <asm/mach-powertv/asic.h>
+#include "init.h"
+
static int *_prom_envp;
unsigned long _prom_memsize;
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
index b194c34ca966..c1a8bd0dbe4b 100644
--- a/arch/mips/powertv/init.h
+++ b/arch/mips/powertv/init.h
@@ -23,4 +23,6 @@
#ifndef _POWERTV_INIT_H
#define _POWERTV_INIT_H
extern unsigned long _prom_memsize;
+extern void prom_meminit(void);
+extern char *prom_getenv(char *name);
#endif
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 6e5f1bdc59b5..bc2f3ca22b41 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -29,7 +29,6 @@
#include <asm/page.h>
#include <asm/sections.h>
-#include <asm/mips-boards/prom.h>
#include <asm/mach-powertv/asic.h>
#include <asm/mach-powertv/ioremap.h>
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
index 820b8480f222..24689bff1039 100644
--- a/arch/mips/powertv/powertv_setup.c
+++ b/arch/mips/powertv/powertv_setup.c
@@ -31,7 +31,6 @@
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
#include <asm/dma.h>
#include <asm/asm.h>
#include <asm/traps.h>
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index a0b0197cab0a..026e823d871d 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -6,12 +6,23 @@ choice
help
Select Ralink MIPS SoC type.
+ config SOC_RT288X
+ bool "RT288x"
+
config SOC_RT305X
bool "RT305x"
select USB_ARCH_HAS_HCD
select USB_ARCH_HAS_OHCI
select USB_ARCH_HAS_EHCI
+ config SOC_RT3883
+ bool "RT3883"
+ select USB_ARCH_HAS_OHCI
+ select USB_ARCH_HAS_EHCI
+
+ config SOC_MT7620
+ bool "MT7620"
+
endchoice
choice
@@ -23,10 +34,22 @@ choice
config DTB_RT_NONE
bool "None"
+ config DTB_RT2880_EVAL
+ bool "RT2880 eval kit"
+ depends on SOC_RT288X
+
config DTB_RT305X_EVAL
bool "RT305x eval kit"
depends on SOC_RT305X
+ config DTB_RT3883_EVAL
+ bool "RT3883 eval kit"
+ depends on SOC_RT3883
+
+ config DTB_MT7620A_EVAL
+ bool "MT7620A eval kit"
+ depends on SOC_MT7620
+
endchoice
endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 939757f0e71f..38cf1a880aaa 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -8,7 +8,10 @@
obj-y := prom.o of.o reset.o clk.o irq.o
+obj-$(CONFIG_SOC_RT288X) += rt288x.o
obj-$(CONFIG_SOC_RT305X) += rt305x.o
+obj-$(CONFIG_SOC_RT3883) += rt3883.o
+obj-$(CONFIG_SOC_MT7620) += mt7620.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
index 6babd65765e6..cda4b6645c50 100644
--- a/arch/mips/ralink/Platform
+++ b/arch/mips/ralink/Platform
@@ -5,6 +5,24 @@ core-$(CONFIG_RALINK) += arch/mips/ralink/
cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink
#
+# Ralink RT288x
+#
+load-$(CONFIG_SOC_RT288X) += 0xffffffff88000000
+cflags-$(CONFIG_SOC_RT288X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
+
+#
# Ralink RT305x
#
load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT305X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
+
+#
+# Ralink RT3883
+#
+load-$(CONFIG_SOC_RT3883) += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT3883) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
+
+#
+# Ralink MT7620
+#
+load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 300990313e1b..83144c3fc5ac 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -22,13 +22,22 @@ struct ralink_pinmux {
struct ralink_pinmux_grp *mode;
struct ralink_pinmux_grp *uart;
int uart_shift;
+ u32 uart_mask;
void (*wdt_reset)(void);
+ struct ralink_pinmux_grp *pci;
+ int pci_shift;
+ u32 pci_mask;
};
-extern struct ralink_pinmux gpio_pinmux;
+extern struct ralink_pinmux rt_gpio_pinmux;
struct ralink_soc_info {
unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
unsigned char *compatible;
+
+ unsigned long mem_base;
+ unsigned long mem_size;
+ unsigned long mem_size_min;
+ unsigned long mem_size_max;
};
extern struct ralink_soc_info soc_info;
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
index 1a69fb300955..18194fa93e80 100644
--- a/arch/mips/ralink/dts/Makefile
+++ b/arch/mips/ralink/dts/Makefile
@@ -1 +1,4 @@
+obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
+obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
+obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644
index 000000000000..08bf24fefe9f
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a.dtsi
@@ -0,0 +1,58 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,mtk7620a-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24KEc";
+ };
+ };
+
+ cpuintc: cpuintc@0 {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,mt7620a-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644
index 000000000000..35eb874ab7f1
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a_eval.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "mt7620a.dtsi"
+
+/ {
+ compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
+ model = "Ralink MT7620A evaluation board";
+
+ memory@0 {
+ reg = <0x0 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644
index 000000000000..182afde2f2e1
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880.dtsi
@@ -0,0 +1,58 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt2880-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips4KEc";
+ };
+ };
+
+ cpuintc: cpuintc@0 {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@300000 {
+ compatible = "palmbus";
+ reg = <0x300000 0x200000>;
+ ranges = <0x0 0x300000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt2880-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt2880-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <8>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644
index 000000000000..322d7002595b
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880_eval.dts
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "rt2880.dtsi"
+
+/ {
+ compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
+ model = "Ralink RT2880 evaluation board";
+
+ memory@0 {
+ reg = <0x8000000 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+
+ cfi@1f000000 {
+ compatible = "cfi-flash";
+ reg = <0x1f000000 0x400000>;
+
+ bank-width = <2>;
+ device-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+ partition@30000 {
+ label = "uboot-env";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "calibration";
+ reg = <0x40000 0x10000>;
+ read-only;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x3b0000>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
index 069d0660e1dd..ef7da1e227e6 100644
--- a/arch/mips/ralink/dts/rt3050.dtsi
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -1,7 +1,7 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+ compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
cpus {
cpu@0 {
@@ -9,10 +9,6 @@
};
};
- chosen {
- bootargs = "console=ttyS0,57600 init=/init";
- };
-
cpuintc: cpuintc@0 {
#address-cells = <0>;
#interrupt-cells = <1>;
@@ -23,7 +19,7 @@
palmbus@10000000 {
compatible = "palmbus";
reg = <0x10000000 0x200000>;
- ranges = <0x0 0x10000000 0x1FFFFF>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
#address-cells = <1>;
#size-cells = <1>;
@@ -33,11 +29,6 @@
reg = <0x0 0x100>;
};
- timer@100 {
- compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
- reg = <0x100 0x100>;
- };
-
intc: intc@200 {
compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
reg = <0x200 0x100>;
@@ -54,45 +45,6 @@
reg = <0x300 0x100>;
};
- gpio0: gpio@600 {
- compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
- reg = <0x600 0x34>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- ralink,ngpio = <24>;
- ralink,regs = [ 00 04 08 0c
- 20 24 28 2c
- 30 34 ];
- };
-
- gpio1: gpio@638 {
- compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
- reg = <0x638 0x24>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- ralink,ngpio = <16>;
- ralink,regs = [ 00 04 08 0c
- 10 14 18 1c
- 20 24 ];
- };
-
- gpio2: gpio@660 {
- compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
- reg = <0x660 0x24>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- ralink,ngpio = <12>;
- ralink,regs = [ 00 04 08 0c
- 10 14 18 1c
- 20 24 ];
- };
-
uartlite@c00 {
compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
reg = <0xc00 0x100>;
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index 148a590bc419..c18c9a84f4c4 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -1,10 +1,8 @@
/dts-v1/;
-/include/ "rt3050.dtsi"
+#include "rt3050.dtsi"
/ {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
model = "Ralink RT3052 evaluation board";
@@ -12,12 +10,8 @@
reg = <0x0 0x2000000>;
};
- palmbus@10000000 {
- sysc@0 {
- ralink,pinmmux = "uartlite", "spi";
- ralink,uartmux = "gpio";
- ralink,wdtmux = <0>;
- };
+ chosen {
+ bootargs = "console=ttyS0,57600";
};
cfi@1f000000 {
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644
index 000000000000..3b131dd0d5ac
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883.dtsi
@@ -0,0 +1,58 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3883-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips74Kc";
+ };
+ };
+
+ cpuintc: cpuintc@0 {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644
index 000000000000..2fa6b330bf4f
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883_eval.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "rt3883.dtsi"
+
+/ {
+ compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
+ model = "Ralink RT3883 evaluation board";
+
+ memory@0 {
+ reg = <0x0 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index c4ae47eb24ab..b46d0419d09b 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -11,7 +11,11 @@
#include <asm/addrspace.h>
+#ifdef CONFIG_SOC_RT288X
+#define EARLY_UART_BASE 0x300c00
+#else
#define EARLY_UART_BASE 0x10000c00
+#endif
#define UART_REG_RX 0x00
#define UART_REG_TX 0x04
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 6d054c5ec9ab..320b1f1043ff 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -31,6 +31,7 @@
#define INTC_INT_GLOBAL BIT(31)
#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4)
#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void)
else if (pending & STATUSF_IP6)
do_IRQ(RALINK_CPU_IRQ_WIFI);
+ else if (pending & STATUSF_IP4)
+ do_IRQ(RALINK_CPU_IRQ_PCI);
+
else if (pending & STATUSF_IP2)
do_IRQ(RALINK_CPU_IRQ_INTC);
@@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node,
irq_set_chained_handler(irq, ralink_intc_irq_handler);
irq_set_handler_data(irq, domain);
+ /* tell the kernel which irq is used for performance monitoring */
cp0_perfcount_irq = irq_create_mapping(domain, 9);
return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644
index 000000000000..0018b1a661f6
--- /dev/null
+++ b/arch/mips/ralink/mt7620.c
@@ -0,0 +1,234 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#include "common.h"
+
+/* does the board have sdram or ddram */
+static int dram_type;
+
+/* the pll dividers */
+static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
+
+static struct ralink_pinmux_grp mode_mux[] = {
+ {
+ .name = "i2c",
+ .mask = MT7620_GPIO_MODE_I2C,
+ .gpio_first = 1,
+ .gpio_last = 2,
+ }, {
+ .name = "spi",
+ .mask = MT7620_GPIO_MODE_SPI,
+ .gpio_first = 3,
+ .gpio_last = 6,
+ }, {
+ .name = "uartlite",
+ .mask = MT7620_GPIO_MODE_UART1,
+ .gpio_first = 15,
+ .gpio_last = 16,
+ }, {
+ .name = "wdt",
+ .mask = MT7620_GPIO_MODE_WDT,
+ .gpio_first = 17,
+ .gpio_last = 17,
+ }, {
+ .name = "mdio",
+ .mask = MT7620_GPIO_MODE_MDIO,
+ .gpio_first = 22,
+ .gpio_last = 23,
+ }, {
+ .name = "rgmii1",
+ .mask = MT7620_GPIO_MODE_RGMII1,
+ .gpio_first = 24,
+ .gpio_last = 35,
+ }, {
+ .name = "spi refclk",
+ .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
+ .gpio_first = 37,
+ .gpio_last = 39,
+ }, {
+ .name = "jtag",
+ .mask = MT7620_GPIO_MODE_JTAG,
+ .gpio_first = 40,
+ .gpio_last = 44,
+ }, {
+ /* shared lines with jtag */
+ .name = "ephy",
+ .mask = MT7620_GPIO_MODE_EPHY,
+ .gpio_first = 40,
+ .gpio_last = 44,
+ }, {
+ .name = "nand",
+ .mask = MT7620_GPIO_MODE_JTAG,
+ .gpio_first = 45,
+ .gpio_last = 59,
+ }, {
+ .name = "rgmii2",
+ .mask = MT7620_GPIO_MODE_RGMII2,
+ .gpio_first = 60,
+ .gpio_last = 71,
+ }, {
+ .name = "wled",
+ .mask = MT7620_GPIO_MODE_WLED,
+ .gpio_first = 72,
+ .gpio_last = 72,
+ }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+ {
+ .name = "uartf",
+ .mask = MT7620_GPIO_MODE_UARTF,
+ .gpio_first = 7,
+ .gpio_last = 14,
+ }, {
+ .name = "pcm uartf",
+ .mask = MT7620_GPIO_MODE_PCM_UARTF,
+ .gpio_first = 7,
+ .gpio_last = 14,
+ }, {
+ .name = "pcm i2s",
+ .mask = MT7620_GPIO_MODE_PCM_I2S,
+ .gpio_first = 7,
+ .gpio_last = 14,
+ }, {
+ .name = "i2s uartf",
+ .mask = MT7620_GPIO_MODE_I2S_UARTF,
+ .gpio_first = 7,
+ .gpio_last = 14,
+ }, {
+ .name = "pcm gpio",
+ .mask = MT7620_GPIO_MODE_PCM_GPIO,
+ .gpio_first = 11,
+ .gpio_last = 14,
+ }, {
+ .name = "gpio uartf",
+ .mask = MT7620_GPIO_MODE_GPIO_UARTF,
+ .gpio_first = 7,
+ .gpio_last = 10,
+ }, {
+ .name = "gpio i2s",
+ .mask = MT7620_GPIO_MODE_GPIO_I2S,
+ .gpio_first = 7,
+ .gpio_last = 10,
+ }, {
+ .name = "gpio",
+ .mask = MT7620_GPIO_MODE_GPIO,
+ }, {0}
+};
+
+struct ralink_pinmux rt_gpio_pinmux = {
+ .mode = mode_mux,
+ .uart = uart_mux,
+ .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
+ .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, sys_rate;
+ u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
+ u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
+ u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
+ u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
+
+ if (cpu_clk) {
+ cpu_rate = 480000000;
+ } else if (!swconfig) {
+ cpu_rate = 600000000;
+ } else {
+ u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
+ u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
+
+ cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
+ }
+
+ if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
+ sys_rate = cpu_rate / 4;
+ else
+ sys_rate = cpu_rate / 3;
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("10000100.timer", 40000000);
+ ralink_clk_add("10000500.uart", 40000000);
+ ralink_clk_add("10000c00.uartlite", 40000000);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
+ unsigned char *name = NULL;
+ u32 n0;
+ u32 n1;
+ u32 rev;
+ u32 cfg0;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+ if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
+ name = "MT7620N";
+ soc_info->compatible = "ralink,mt7620n-soc";
+ } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
+ name = "MT7620A";
+ soc_info->compatible = "ralink,mt7620a-soc";
+ } else {
+ panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ }
+
+ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s ver:%u eco:%u",
+ name,
+ (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+ (rev & CHIP_REV_ECO_MASK));
+
+ cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
+ dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
+
+ switch (dram_type) {
+ case SYSCFG0_DRAM_TYPE_SDRAM:
+ soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR1:
+ soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR2:
+ soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+ break;
+ default:
+ BUG();
+ }
+ soc_info->mem_base = MT7620_DRAM_BASE;
+}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 4165e70775be..fb1569580def 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/kernel.h>
#include <linux/bootmem.h>
@@ -85,6 +86,14 @@ void __init plat_mem_setup(void)
* parsed resulting in our memory appearing
*/
__dt_setup_arch(&__dtb_start);
+
+ if (soc_info.mem_size)
+ add_memory_region(soc_info.mem_base, soc_info.mem_size,
+ BOOT_MEM_RAM);
+ else
+ detect_memory_region(soc_info.mem_base,
+ soc_info.mem_size_min * SZ_1M,
+ soc_info.mem_size_max * SZ_1M);
}
static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644
index 000000000000..f87de1ab2198
--- /dev/null
+++ b/arch/mips/ralink/rt288x.c
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt288x.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+ {
+ .name = "i2c",
+ .mask = RT2880_GPIO_MODE_I2C,
+ .gpio_first = 1,
+ .gpio_last = 2,
+ }, {
+ .name = "spi",
+ .mask = RT2880_GPIO_MODE_SPI,
+ .gpio_first = 3,
+ .gpio_last = 6,
+ }, {
+ .name = "uartlite",
+ .mask = RT2880_GPIO_MODE_UART0,
+ .gpio_first = 7,
+ .gpio_last = 14,
+ }, {
+ .name = "jtag",
+ .mask = RT2880_GPIO_MODE_JTAG,
+ .gpio_first = 17,
+ .gpio_last = 21,
+ }, {
+ .name = "mdio",
+ .mask = RT2880_GPIO_MODE_MDIO,
+ .gpio_first = 22,
+ .gpio_last = 23,
+ }, {
+ .name = "sdram",
+ .mask = RT2880_GPIO_MODE_SDRAM,
+ .gpio_first = 24,
+ .gpio_last = 39,
+ }, {
+ .name = "pci",
+ .mask = RT2880_GPIO_MODE_PCI,
+ .gpio_first = 40,
+ .gpio_last = 71,
+ }, {0}
+};
+
+static void rt288x_wdt_reset(void)
+{
+ u32 t;
+
+ /* enable WDT reset output on pin SRAM_CS_N */
+ t = rt_sysc_r32(SYSC_REG_CLKCFG);
+ t |= CLKCFG_SRAM_CS_N_WDT;
+ rt_sysc_w32(t, SYSC_REG_CLKCFG);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+ .mode = mode_mux,
+ .wdt_reset = rt288x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate;
+ u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+ t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
+
+ switch (t) {
+ case SYSTEM_CONFIG_CPUCLK_250:
+ cpu_rate = 250000000;
+ break;
+ case SYSTEM_CONFIG_CPUCLK_266:
+ cpu_rate = 266666667;
+ break;
+ case SYSTEM_CONFIG_CPUCLK_280:
+ cpu_rate = 280000000;
+ break;
+ case SYSTEM_CONFIG_CPUCLK_300:
+ cpu_rate = 300000000;
+ break;
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("300100.timer", cpu_rate / 2);
+ ralink_clk_add("300120.watchdog", cpu_rate / 2);
+ ralink_clk_add("300500.uart", cpu_rate / 2);
+ ralink_clk_add("300c00.uartlite", cpu_rate / 2);
+ ralink_clk_add("400000.ethernet", cpu_rate / 2);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
+ const char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+ id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+ if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
+ soc_info->compatible = "ralink,r2880-soc";
+ name = "RT2880";
+ } else {
+ panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+ }
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s id:%u rev:%u",
+ name,
+ (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+ (id & CHIP_ID_REV_MASK));
+
+ soc_info->mem_base = RT2880_SDRAM_BASE;
+ soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 0a4bbdcf59d9..ca7ee3a33790 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -22,7 +22,7 @@
enum rt305x_soc_type rt305x_soc;
-struct ralink_pinmux_grp mode_mux[] = {
+static struct ralink_pinmux_grp mode_mux[] = {
{
.name = "i2c",
.mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = {
}, {0}
};
-struct ralink_pinmux_grp uart_mux[] = {
+static struct ralink_pinmux_grp uart_mux[] = {
{
.name = "uartf",
.mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = {
.name = "gpio uartf",
.mask = RT305X_GPIO_MODE_GPIO_UARTF,
.gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_14,
+ .gpio_last = RT305X_GPIO_10,
}, {
.name = "gpio i2s",
.mask = RT305X_GPIO_MODE_GPIO_I2S,
.gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_14,
+ .gpio_last = RT305X_GPIO_10,
}, {
.name = "gpio",
.mask = RT305X_GPIO_MODE_GPIO,
}, {0}
};
-void rt305x_wdt_reset(void)
+static void rt305x_wdt_reset(void)
{
u32 t;
@@ -114,16 +114,53 @@ void rt305x_wdt_reset(void)
rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
}
-struct ralink_pinmux gpio_pinmux = {
+struct ralink_pinmux rt_gpio_pinmux = {
.mode = mode_mux,
.uart = uart_mux,
.uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+ .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
.wdt_reset = rt305x_wdt_reset,
};
+static unsigned long rt5350_get_mem_size(void)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+ unsigned long ret;
+ u32 t;
+
+ t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
+ t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
+ RT5350_SYSCFG0_DRAM_SIZE_MASK;
+
+ switch (t) {
+ case RT5350_SYSCFG0_DRAM_SIZE_2M:
+ ret = 2;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_8M:
+ ret = 8;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_16M:
+ ret = 16;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_32M:
+ ret = 32;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_64M:
+ ret = 64;
+ break;
+ default:
+ panic("rt5350: invalid DRAM size: %u", t);
+ break;
+ }
+
+ return ret;
+}
+
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+ unsigned long wmac_rate = 40000000;
+
u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@ void __init ralink_clk_init(void)
BUG();
}
+ if (soc_is_rt3352() || soc_is_rt5350()) {
+ u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
+
+ if (!(val & RT3352_CLKCFG0_XTAL_SEL))
+ wmac_rate = 20000000;
+ }
+
ralink_clk_add("cpu", cpu_rate);
ralink_clk_add("10000b00.spi", sys_rate);
ralink_clk_add("10000100.timer", wdt_rate);
+ ralink_clk_add("10000120.watchdog", wdt_rate);
ralink_clk_add("10000500.uart", uart_rate);
ralink_clk_add("10000c00.uartlite", uart_rate);
+ ralink_clk_add("10100000.ethernet", sys_rate);
+ ralink_clk_add("10180000.wmac", wmac_rate);
}
void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
name,
(id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
(id & CHIP_ID_REV_MASK));
+
+ soc_info->mem_base = RT305X_SDRAM_BASE;
+ if (soc_is_rt5350()) {
+ soc_info->mem_size = rt5350_get_mem_size();
+ } else if (soc_is_rt305x() || soc_is_rt3350()) {
+ soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+ } else if (soc_is_rt3352()) {
+ soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+ }
}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644
index 000000000000..b474ac284b83
--- /dev/null
+++ b/arch/mips/ralink/rt3883.c
@@ -0,0 +1,246 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt3883.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+ {
+ .name = "i2c",
+ .mask = RT3883_GPIO_MODE_I2C,
+ .gpio_first = RT3883_GPIO_I2C_SD,
+ .gpio_last = RT3883_GPIO_I2C_SCLK,
+ }, {
+ .name = "spi",
+ .mask = RT3883_GPIO_MODE_SPI,
+ .gpio_first = RT3883_GPIO_SPI_CS0,
+ .gpio_last = RT3883_GPIO_SPI_MISO,
+ }, {
+ .name = "uartlite",
+ .mask = RT3883_GPIO_MODE_UART1,
+ .gpio_first = RT3883_GPIO_UART1_TXD,
+ .gpio_last = RT3883_GPIO_UART1_RXD,
+ }, {
+ .name = "jtag",
+ .mask = RT3883_GPIO_MODE_JTAG,
+ .gpio_first = RT3883_GPIO_JTAG_TDO,
+ .gpio_last = RT3883_GPIO_JTAG_TCLK,
+ }, {
+ .name = "mdio",
+ .mask = RT3883_GPIO_MODE_MDIO,
+ .gpio_first = RT3883_GPIO_MDIO_MDC,
+ .gpio_last = RT3883_GPIO_MDIO_MDIO,
+ }, {
+ .name = "ge1",
+ .mask = RT3883_GPIO_MODE_GE1,
+ .gpio_first = RT3883_GPIO_GE1_TXD0,
+ .gpio_last = RT3883_GPIO_GE1_RXCLK,
+ }, {
+ .name = "ge2",
+ .mask = RT3883_GPIO_MODE_GE2,
+ .gpio_first = RT3883_GPIO_GE2_TXD0,
+ .gpio_last = RT3883_GPIO_GE2_RXCLK,
+ }, {
+ .name = "pci",
+ .mask = RT3883_GPIO_MODE_PCI,
+ .gpio_first = RT3883_GPIO_PCI_AD0,
+ .gpio_last = RT3883_GPIO_PCI_AD31,
+ }, {
+ .name = "lna a",
+ .mask = RT3883_GPIO_MODE_LNA_A,
+ .gpio_first = RT3883_GPIO_LNA_PE_A0,
+ .gpio_last = RT3883_GPIO_LNA_PE_A2,
+ }, {
+ .name = "lna g",
+ .mask = RT3883_GPIO_MODE_LNA_G,
+ .gpio_first = RT3883_GPIO_LNA_PE_G0,
+ .gpio_last = RT3883_GPIO_LNA_PE_G2,
+ }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+ {
+ .name = "uartf",
+ .mask = RT3883_GPIO_MODE_UARTF,
+ .gpio_first = RT3883_GPIO_7,
+ .gpio_last = RT3883_GPIO_14,
+ }, {
+ .name = "pcm uartf",
+ .mask = RT3883_GPIO_MODE_PCM_UARTF,
+ .gpio_first = RT3883_GPIO_7,
+ .gpio_last = RT3883_GPIO_14,
+ }, {
+ .name = "pcm i2s",
+ .mask = RT3883_GPIO_MODE_PCM_I2S,
+ .gpio_first = RT3883_GPIO_7,
+ .gpio_last = RT3883_GPIO_14,
+ }, {
+ .name = "i2s uartf",
+ .mask = RT3883_GPIO_MODE_I2S_UARTF,
+ .gpio_first = RT3883_GPIO_7,
+ .gpio_last = RT3883_GPIO_14,
+ }, {
+ .name = "pcm gpio",
+ .mask = RT3883_GPIO_MODE_PCM_GPIO,
+ .gpio_first = RT3883_GPIO_11,
+ .gpio_last = RT3883_GPIO_14,
+ }, {
+ .name = "gpio uartf",
+ .mask = RT3883_GPIO_MODE_GPIO_UARTF,
+ .gpio_first = RT3883_GPIO_7,
+ .gpio_last = RT3883_GPIO_10,
+ }, {
+ .name = "gpio i2s",
+ .mask = RT3883_GPIO_MODE_GPIO_I2S,
+ .gpio_first = RT3883_GPIO_7,
+ .gpio_last = RT3883_GPIO_10,
+ }, {
+ .name = "gpio",
+ .mask = RT3883_GPIO_MODE_GPIO,
+ }, {0}
+};
+
+static struct ralink_pinmux_grp pci_mux[] = {
+ {
+ .name = "pci-dev",
+ .mask = 0,
+ .gpio_first = RT3883_GPIO_PCI_AD0,
+ .gpio_last = RT3883_GPIO_PCI_AD31,
+ }, {
+ .name = "pci-host2",
+ .mask = 1,
+ .gpio_first = RT3883_GPIO_PCI_AD0,
+ .gpio_last = RT3883_GPIO_PCI_AD31,
+ }, {
+ .name = "pci-host1",
+ .mask = 2,
+ .gpio_first = RT3883_GPIO_PCI_AD0,
+ .gpio_last = RT3883_GPIO_PCI_AD31,
+ }, {
+ .name = "pci-fnc",
+ .mask = 3,
+ .gpio_first = RT3883_GPIO_PCI_AD0,
+ .gpio_last = RT3883_GPIO_PCI_AD31,
+ }, {
+ .name = "pci-gpio",
+ .mask = 7,
+ .gpio_first = RT3883_GPIO_PCI_AD0,
+ .gpio_last = RT3883_GPIO_PCI_AD31,
+ }, {0}
+};
+
+static void rt3883_wdt_reset(void)
+{
+ u32 t;
+
+ /* enable WDT reset output on GPIO 2 */
+ t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+ t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
+ rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+ .mode = mode_mux,
+ .uart = uart_mux,
+ .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
+ .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
+ .wdt_reset = rt3883_wdt_reset,
+ .pci = pci_mux,
+ .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
+ .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, sys_rate;
+ u32 syscfg0;
+ u32 clksel;
+ u32 ddr2;
+
+ syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
+ clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
+ RT3883_SYSCFG0_CPUCLK_MASK);
+ ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
+
+ switch (clksel) {
+ case RT3883_SYSCFG0_CPUCLK_250:
+ cpu_rate = 250000000;
+ sys_rate = (ddr2) ? 125000000 : 83000000;
+ break;
+ case RT3883_SYSCFG0_CPUCLK_384:
+ cpu_rate = 384000000;
+ sys_rate = (ddr2) ? 128000000 : 96000000;
+ break;
+ case RT3883_SYSCFG0_CPUCLK_480:
+ cpu_rate = 480000000;
+ sys_rate = (ddr2) ? 160000000 : 120000000;
+ break;
+ case RT3883_SYSCFG0_CPUCLK_500:
+ cpu_rate = 500000000;
+ sys_rate = (ddr2) ? 166000000 : 125000000;
+ break;
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("10000100.timer", sys_rate);
+ ralink_clk_add("10000120.watchdog", sys_rate);
+ ralink_clk_add("10000500.uart", 40000000);
+ ralink_clk_add("10000b00.spi", sys_rate);
+ ralink_clk_add("10000c00.uartlite", 40000000);
+ ralink_clk_add("10100000.ethernet", sys_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
+ const char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
+ n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
+ id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
+
+ if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
+ soc_info->compatible = "ralink,rt3883-soc";
+ name = "RT3883";
+ } else {
+ panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
+ }
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s ver:%u eco:%u",
+ name,
+ (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
+ (id & RT3883_REVID_ECO_ID_MASK));
+
+ soc_info->mem_base = RT3883_SDRAM_BASE;
+ soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 1d1919a44e88..7a53b1e28a93 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
* data structures on the first couple of pages of the first slot of each
* node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
*/
-pfn_t node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(cnodeid_t cnode)
{
unsigned long loadbase = REP_BASE;
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 5f2bddb1860e..1230f56429d7 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
}
}
-static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
{
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
- return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+ return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
}
-static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
{
nasid_t nasid;
lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
static void __init szmem(void)
{
- pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
+ unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
int slot;
cnodeid_t node;
@@ -390,10 +390,10 @@ static void __init szmem(void)
static void __init node_mem_init(cnodeid_t node)
{
- pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
- pfn_t slot_freepfn = node_getfirstfree(node);
+ unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+ unsigned long slot_freepfn = node_getfirstfree(node);
unsigned long bootmap_size;
- pfn_t start_pfn, end_pfn;
+ unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
@@ -467,7 +467,7 @@ void __init paging_init(void)
pagetable_init();
for_each_online_node(node) {
- pfn_t start_pfn, end_pfn;
+ unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index fff58ac176f3..2e21b761cb9c 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode,
/* Nothing to do ... */
}
-int rt_timer_irq;
+unsigned int rt_timer_irq;
static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
static DEFINE_PER_CPU(char [11], hub_rt_name);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 52a3ecd40421..6fa2ae77fffd 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -30,7 +30,6 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
-#include <bcm63xx_clk.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index ddabaa867b0d..700cac067b46 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
switch (blank_mode) {
case VESA_NO_BLANKING:
- /* Turn on panel */
- fbdev->regs->lcd_control |= LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
- if (fbdev->panel_idx == 1) {
- au_writew(au_readw(PB1100_G_CONTROL)
- | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
- PB1100_G_CONTROL);
- }
-#endif
+ /* Turn on panel */
+ fbdev->regs->lcd_control |= LCD_CONTROL_GO;
au_sync();
break;
case VESA_VSYNC_SUSPEND:
case VESA_HSYNC_SUSPEND:
case VESA_POWERDOWN:
- /* Turn off panel */
- fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
- if (fbdev->panel_idx == 1) {
- au_writew(au_readw(PB1100_G_CONTROL)
- & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
- PB1100_G_CONTROL);
- }
-#endif
+ /* Turn off panel */
+ fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
au_sync();
break;
default:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 45f09362ee7b..ae88b719bd2e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1978,7 +1978,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (vcpu->kvm->mm != current->mm)
return -EIO;
-#if defined(CONFIG_S390) || defined(CONFIG_PPC)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
/*
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
* so vcpu_load() would break it.