summaryrefslogtreecommitdiff
path: root/arch/arm/mach-imx/gpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-imx/gpc.c')
-rw-r--r--arch/arm/mach-imx/gpc.c547
1 files changed, 508 insertions, 39 deletions
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index b54db47f6f32..551ecd0f2a4f 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -1,6 +1,7 @@
/*
- * Copyright 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright 2011-2016 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
+ * Copyright 2017 NXP.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -26,7 +27,13 @@
#include "hardware.h"
#define GPC_CNTR 0x000
+#define GPC_CNTR_PCIE_PHY_PDU_SHIFT 0x7
+#define GPC_CNTR_PCIE_PHY_PDN_SHIFT 0x6
+#define GPC_CNTR_L2_PGE 22
+#define PGC_PCIE_PHY_CTRL 0x200
+#define PGC_PCIE_PHY_PDN_EN 0x1
#define GPC_IMR1 0x008
+#define GPC_PGC_MF_PDN 0x220
#define GPC_PGC_GPU_PDN 0x260
#define GPC_PGC_GPU_PUPSCR 0x264
#define GPC_PGC_GPU_PDNSCR 0x268
@@ -35,6 +42,22 @@
#define GPC_PGC_CPU_PDNSCR 0x2a8
#define GPC_PGC_SW2ISO_SHIFT 0x8
#define GPC_PGC_SW_SHIFT 0x0
+#define GPC_PGC_DISP_PGCR_OFFSET 0x240
+#define GPC_PGC_DISP_PUPSCR_OFFSET 0x244
+#define GPC_PGC_DISP_PDNSCR_OFFSET 0x248
+#define GPC_PGC_DISP_SR_OFFSET 0x24c
+#define GPC_M4_LPSR 0x2c
+#define GPC_M4_LPSR_M4_SLEEPING_SHIFT 4
+#define GPC_M4_LPSR_M4_SLEEPING_MASK 0x1
+#define GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_MASK 0x1
+#define GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_SHIFT 0
+#define GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_MASK 0x1
+#define GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_SHIFT 1
+
+#define GPC_PGC_CPU_SW_SHIFT 0
+#define GPC_PGC_CPU_SW_MASK 0x3f
+#define GPC_PGC_CPU_SW2ISO_SHIFT 8
+#define GPC_PGC_CPU_SW2ISO_MASK 0x3f
#define IMR_NUM 4
#define GPC_MAX_IRQS (IMR_NUM * 32)
@@ -42,7 +65,15 @@
#define GPU_VPU_PUP_REQ BIT(1)
#define GPU_VPU_PDN_REQ BIT(0)
-#define GPC_CLK_MAX 6
+#define GPC_CLK_MAX 10
+#define DEFAULT_IPG_RATE 66000000
+#define GPC_PU_UP_DELAY_MARGIN 2
+
+/* for irq #74 and #75 */
+#define GPC_USB_VBUS_WAKEUP_IRQ_MASK 0xc00
+
+/* for irq #150 and #151 */
+#define GPC_ENET_WAKEUP_IRQ_MASK 0xC00000
struct pu_domain {
struct generic_pm_domain base;
@@ -51,9 +82,133 @@ struct pu_domain {
int num_clks;
};
+struct disp_domain {
+ struct generic_pm_domain base;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
+};
+
static void __iomem *gpc_base;
static u32 gpc_wake_irqs[IMR_NUM];
static u32 gpc_saved_imrs[IMR_NUM];
+static u32 gpc_mf_irqs[IMR_NUM];
+static u32 gpc_mf_request_on[IMR_NUM];
+static DEFINE_SPINLOCK(gpc_lock);
+static struct notifier_block nb_pcie;
+static struct pu_domain imx6q_pu_domain;
+static bool pu_on; /* keep always on i.mx6qp */
+static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd);
+static void _imx6q_pm_pu_power_on(struct generic_pm_domain *genpd);
+static struct clk *ipg;
+
+void imx_gpc_add_m4_wake_up_irq(u32 hwirq, bool enable)
+{
+ unsigned int idx = hwirq / 32;
+ unsigned long flags;
+ u32 mask;
+
+ /* Sanity check for SPI irq */
+ if (hwirq < 32)
+ return;
+
+ mask = 1 << hwirq % 32;
+ spin_lock_irqsave(&gpc_lock, flags);
+ gpc_wake_irqs[idx] = enable ? gpc_wake_irqs[idx] | mask :
+ gpc_wake_irqs[idx] & ~mask;
+ spin_unlock_irqrestore(&gpc_lock, flags);
+}
+
+void imx_gpc_hold_m4_in_sleep(void)
+{
+ int val;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+
+ /* wait M4 in wfi before asserting hold request */
+ while (!imx_gpc_is_m4_sleeping())
+ if (time_after(jiffies, timeout))
+ pr_err("M4 is NOT in expected sleep!\n");
+
+ val = readl_relaxed(gpc_base + GPC_M4_LPSR);
+ val &= ~(GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_MASK <<
+ GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_SHIFT);
+ writel_relaxed(val, gpc_base + GPC_M4_LPSR);
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (readl_relaxed(gpc_base + GPC_M4_LPSR)
+ & (GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_MASK <<
+ GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_SHIFT))
+ if (time_after(jiffies, timeout))
+ pr_err("Wait M4 hold ack timeout!\n");
+}
+
+void imx_gpc_release_m4_in_sleep(void)
+{
+ int val;
+
+ val = readl_relaxed(gpc_base + GPC_M4_LPSR);
+ val |= GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_MASK <<
+ GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_SHIFT;
+ writel_relaxed(val, gpc_base + GPC_M4_LPSR);
+}
+
+unsigned int imx_gpc_is_m4_sleeping(void)
+{
+ if (readl_relaxed(gpc_base + GPC_M4_LPSR) &
+ (GPC_M4_LPSR_M4_SLEEPING_MASK <<
+ GPC_M4_LPSR_M4_SLEEPING_SHIFT))
+ return 1;
+
+ return 0;
+}
+
+bool imx_gpc_usb_wakeup_enabled(void)
+{
+ if (!(cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
+ || cpu_is_imx6sll()))
+ return false;
+
+ /*
+ * for SoC later than i.MX6SX, USB vbus wakeup
+ * only needs weak 2P5 on, stop_mode_config is
+ * NOT needed, so we check if is USB vbus wakeup
+ * is enabled(assume irq #74 and #75) to decide
+ * if to keep weak 2P5 on.
+ */
+ if (gpc_wake_irqs[1] & GPC_USB_VBUS_WAKEUP_IRQ_MASK)
+ return true;
+
+ return false;
+}
+
+bool imx_gpc_enet_wakeup_enabled(void)
+{
+ if (!cpu_is_imx6q())
+ return false;
+
+ if (gpc_wake_irqs[3] & GPC_ENET_WAKEUP_IRQ_MASK)
+ return true;
+
+ return false;
+}
+
+unsigned int imx_gpc_is_mf_mix_off(void)
+{
+ return readl_relaxed(gpc_base + GPC_PGC_MF_PDN);
+}
+
+static void imx_gpc_mf_mix_off(void)
+{
+ int i;
+
+ for (i = 0; i < IMR_NUM; i++)
+ if (((gpc_wake_irqs[i] | gpc_mf_request_on[i]) &
+ gpc_mf_irqs[i]) != 0)
+ return;
+
+ pr_info("Turn off M/F mix!\n");
+ /* turn off mega/fast mix */
+ writel_relaxed(0x1, gpc_base + GPC_PGC_MF_PDN);
+}
void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw)
{
@@ -77,6 +232,14 @@ void imx_gpc_pre_suspend(bool arm_power_off)
void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
int i;
+ if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
+ _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
+
+ /* power down the mega-fast power domain */
+ if ((cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
+ || cpu_is_imx6sll()) && arm_power_off)
+ imx_gpc_mf_mix_off();
+
/* Tell GPC to power off ARM core when suspend */
if (arm_power_off)
imx_gpc_set_arm_power_in_lpm(arm_power_off);
@@ -92,8 +255,15 @@ void imx_gpc_post_resume(void)
void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
int i;
+ if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
+ _imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+
/* Keep ARM core powered on for other low-power modes */
imx_gpc_set_arm_power_in_lpm(false);
+ /* Keep M/F mix powered on for other low-power modes */
+ if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
+ || cpu_is_imx6sll())
+ writel_relaxed(0x0, gpc_base + GPC_PGC_MF_PDN);
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
@@ -102,11 +272,14 @@ void imx_gpc_post_resume(void)
static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
{
unsigned int idx = d->hwirq / 32;
+ unsigned long flags;
u32 mask;
mask = 1 << d->hwirq % 32;
+ spin_lock_irqsave(&gpc_lock, flags);
gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
gpc_wake_irqs[idx] & ~mask;
+ spin_unlock_irqrestore(&gpc_lock, flags);
/*
* Do *not* call into the parent, as the GIC doesn't have any
@@ -238,11 +411,102 @@ static const struct irq_domain_ops imx_gpc_domain_ops = {
.free = irq_domain_free_irqs_common,
};
+int imx_gpc_mf_power_on(unsigned int irq, unsigned int on)
+{
+ struct irq_desc *d = irq_to_desc(irq);
+ unsigned int idx = d->irq_data.hwirq / 32;
+ unsigned long flags;
+ u32 mask;
+
+ mask = 1 << (d->irq_data.hwirq % 32);
+ spin_lock_irqsave(&gpc_lock, flags);
+ gpc_mf_request_on[idx] = on ? gpc_mf_request_on[idx] | mask :
+ gpc_mf_request_on[idx] & ~mask;
+ spin_unlock_irqrestore(&gpc_lock, flags);
+
+ return 0;
+}
+
+int imx_gpc_mf_request_on(unsigned int irq, unsigned int on)
+{
+ if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
+ || cpu_is_imx6sll())
+ return imx_gpc_mf_power_on(irq, on);
+ else if (cpu_is_imx7d())
+ return imx_gpcv2_mf_power_on(irq, on);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_gpc_mf_request_on);
+
+void imx_gpc_switch_pupscr_clk(bool flag)
+{
+ static u32 pupscr_sw2iso, pupscr_sw;
+ u32 ratio, pupscr = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
+
+ if (flag) {
+ /* save the init clock setting IPG/2048 for IPG@66Mhz */
+ pupscr_sw2iso = (pupscr >> GPC_PGC_CPU_SW2ISO_SHIFT) &
+ GPC_PGC_CPU_SW2ISO_MASK;
+ pupscr_sw = (pupscr >> GPC_PGC_CPU_SW_SHIFT) &
+ GPC_PGC_CPU_SW_MASK;
+ /*
+ * i.MX6UL TO1.0 ARM power up uses IPG/2048 as clock source,
+ * from TO1.1, PGC_CPU_PUPSCR bit [5] is re-defined to switch
+ * clock to IPG/32, enable this bit to speed up the ARM power
+ * up process in low power idle case(IPG@1.5Mhz). So the sw and
+ * sw2iso need to be adjusted as below:
+ * sw_new(sw2iso_new) = (2048 * 1.5 / 66 * 32) * sw(sw2iso)
+ */
+ ratio = 3072 / (66 * 32);
+ pupscr &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT |
+ GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
+ pupscr |= (ratio * pupscr_sw + 1) << GPC_PGC_CPU_SW_SHIFT |
+ 1 << 5 | (ratio * pupscr_sw2iso + 1) <<
+ GPC_PGC_CPU_SW2ISO_SHIFT;
+ writel_relaxed(pupscr, gpc_base + GPC_PGC_CPU_PUPSCR);
+ } else {
+ /* restore back after exit from low power idle */
+ pupscr &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT |
+ GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
+ pupscr |= pupscr_sw << GPC_PGC_CPU_SW_SHIFT |
+ pupscr_sw2iso << GPC_PGC_CPU_SW2ISO_SHIFT;
+ writel_relaxed(pupscr, gpc_base + GPC_PGC_CPU_PUPSCR);
+ }
+}
+
+static int imx_pcie_regulator_notify(struct notifier_block *nb,
+ unsigned long event,
+ void *ignored)
+{
+ u32 value = readl_relaxed(gpc_base + GPC_CNTR);
+
+ switch (event) {
+ case REGULATOR_EVENT_PRE_DO_ENABLE:
+ value |= 1 << GPC_CNTR_PCIE_PHY_PDU_SHIFT;
+ writel_relaxed(value, gpc_base + GPC_CNTR);
+ break;
+ case REGULATOR_EVENT_PRE_DO_DISABLE:
+ value |= 1 << GPC_CNTR_PCIE_PHY_PDN_SHIFT;
+ writel_relaxed(value, gpc_base + GPC_CNTR);
+ writel_relaxed(PGC_PCIE_PHY_PDN_EN,
+ gpc_base + PGC_PCIE_PHY_CTRL);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int __init imx_gpc_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *parent_domain, *domain;
int i;
+ u32 val;
+ u32 cpu_pupscr_sw2iso, cpu_pupscr_sw;
+ u32 cpu_pdnscr_iso2sw, cpu_pdnscr_iso;
if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name);
@@ -271,12 +535,70 @@ static int __init imx_gpc_init(struct device_node *node,
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
+ /* Read supported wakeup source in M/F domain */
+ if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
+ || cpu_is_imx6sll()) {
+ of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 0,
+ &gpc_mf_irqs[0]);
+ of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 1,
+ &gpc_mf_irqs[1]);
+ of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 2,
+ &gpc_mf_irqs[2]);
+ of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 3,
+ &gpc_mf_irqs[3]);
+ if (!(gpc_mf_irqs[0] | gpc_mf_irqs[1] |
+ gpc_mf_irqs[2] | gpc_mf_irqs[3]))
+ pr_info("No wakeup source in Mega/Fast domain found!\n");
+ }
+
+ /* clear the L2_PGE bit on i.MX6SLL */
+ if (cpu_is_imx6sll()) {
+ val = readl_relaxed(gpc_base + GPC_CNTR);
+ val &= ~(1 << GPC_CNTR_L2_PGE);
+ writel_relaxed(val, gpc_base + GPC_CNTR);
+ }
+
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
* later the GPC power domain driver will not be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
+ /*
+ * If there are CPU isolation timing settings in dts,
+ * update them according to dts, otherwise, keep them
+ * with default value in registers.
+ */
+ cpu_pupscr_sw2iso = cpu_pupscr_sw =
+ cpu_pdnscr_iso2sw = cpu_pdnscr_iso = 0;
+
+ /* Read CPU isolation setting for GPC */
+ of_property_read_u32(node, "fsl,cpu_pupscr_sw2iso", &cpu_pupscr_sw2iso);
+ of_property_read_u32(node, "fsl,cpu_pupscr_sw", &cpu_pupscr_sw);
+ of_property_read_u32(node, "fsl,cpu_pdnscr_iso2sw", &cpu_pdnscr_iso2sw);
+ of_property_read_u32(node, "fsl,cpu_pdnscr_iso", &cpu_pdnscr_iso);
+
+ /* Return if no property found in dtb */
+ if ((cpu_pupscr_sw2iso | cpu_pupscr_sw
+ | cpu_pdnscr_iso2sw | cpu_pdnscr_iso) == 0)
+ return 0;
+
+ /* Update CPU PUPSCR timing if it is defined in dts */
+ val = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
+ val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
+ val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
+ val |= cpu_pupscr_sw2iso << GPC_PGC_CPU_SW2ISO_SHIFT;
+ val |= cpu_pupscr_sw << GPC_PGC_CPU_SW_SHIFT;
+ writel_relaxed(val, gpc_base + GPC_PGC_CPU_PUPSCR);
+
+ /* Update CPU PDNSCR timing if it is defined in dts */
+ val = readl_relaxed(gpc_base + GPC_PGC_CPU_PDNSCR);
+ val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
+ val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
+ val |= cpu_pdnscr_iso2sw << GPC_PGC_CPU_SW2ISO_SHIFT;
+ val |= cpu_pdnscr_iso << GPC_PGC_CPU_SW_SHIFT;
+ writel_relaxed(val, gpc_base + GPC_PGC_CPU_PDNSCR);
+
return 0;
}
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
@@ -317,12 +639,19 @@ static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
/* Wait ISO + ISO2SW IPG clock cycles */
ndelay((iso + iso2sw) * 1000 / 66);
+
+ while (readl_relaxed(gpc_base + GPC_CNTR) & GPU_VPU_PDN_REQ)
+ ;
}
static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
{
struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
+ if (&imx6q_pu_domain == pu && pu_on && cpu_is_imx6q() &&
+ imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
+ return 0;
+
_imx6q_pm_pu_power_off(genpd);
if (pu->reg)
@@ -331,18 +660,11 @@ static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
return 0;
}
-static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
+static void _imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
{
struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
- int i, ret, sw, sw2iso;
- u32 val;
-
- if (pu->reg)
- ret = regulator_enable(pu->reg);
- if (pu->reg && ret) {
- pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
- return ret;
- }
+ int i;
+ u32 val, ipg_rate = clk_get_rate(ipg);
/* Enable reset clocks for all devices in the PU domain */
for (i = 0; i < pu->num_clks; i++)
@@ -351,26 +673,108 @@ static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
/* Gate off PU domain when GPU/VPU when powered down */
writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
- /* Read ISO and ISO2SW power down delays */
- val = readl_relaxed(gpc_base + GPC_PGC_GPU_PUPSCR);
- sw = val & 0x3f;
- sw2iso = (val >> 8) & 0x3f;
-
/* Request GPC to power up GPU/VPU */
val = readl_relaxed(gpc_base + GPC_CNTR);
val |= GPU_VPU_PUP_REQ;
writel_relaxed(val, gpc_base + GPC_CNTR);
- /* Wait ISO + ISO2SW IPG clock cycles */
- ndelay((sw + sw2iso) * 1000 / 66);
+ while (readl_relaxed(gpc_base + GPC_CNTR) & GPU_VPU_PUP_REQ)
+ ;
+ /* Wait power switch done */
+ udelay(2 * DEFAULT_IPG_RATE / ipg_rate +
+ GPC_PU_UP_DELAY_MARGIN);
/* Disable reset clocks for all devices in the PU domain */
for (i = 0; i < pu->num_clks; i++)
clk_disable_unprepare(pu->clk[i]);
+}
+
+static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
+{
+ struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
+ int ret;
+
+ if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0
+ && &imx6q_pu_domain == pu) {
+ if (!pu_on)
+ pu_on = true;
+ else
+ return 0;
+ }
+
+ if (pu->reg)
+ ret = regulator_enable(pu->reg);
+ if (pu->reg && ret) {
+ pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
+ return ret;
+ }
+
+ _imx6q_pm_pu_power_on(genpd);
return 0;
}
+static int imx_pm_dispmix_on(struct generic_pm_domain *genpd)
+{
+ struct disp_domain *disp = container_of(genpd, struct disp_domain, base);
+ u32 val = readl_relaxed(gpc_base + GPC_CNTR);
+ int i;
+ u32 ipg_rate = clk_get_rate(ipg);
+
+ if ((cpu_is_imx6sl() &&
+ imx_get_soc_revision() >= IMX_CHIP_REVISION_1_2) || cpu_is_imx6sx()) {
+
+ /* Enable reset clocks for all devices in the disp domain */
+ for (i = 0; i < disp->num_clks; i++)
+ clk_prepare_enable(disp->clk[i]);
+
+ writel_relaxed(0x0, gpc_base + GPC_PGC_DISP_PGCR_OFFSET);
+ writel_relaxed(0x20 | val, gpc_base + GPC_CNTR);
+ while (readl_relaxed(gpc_base + GPC_CNTR) & 0x20)
+ ;
+
+ writel_relaxed(0x1, gpc_base + GPC_PGC_DISP_SR_OFFSET);
+
+ /* Wait power switch done */
+ udelay(2 * DEFAULT_IPG_RATE / ipg_rate +
+ GPC_PU_UP_DELAY_MARGIN);
+
+ /* Disable reset clocks for all devices in the disp domain */
+ for (i = 0; i < disp->num_clks; i++)
+ clk_disable_unprepare(disp->clk[i]);
+ }
+ return 0;
+}
+
+static int imx_pm_dispmix_off(struct generic_pm_domain *genpd)
+{
+ struct disp_domain *disp = container_of(genpd, struct disp_domain, base);
+ u32 val = readl_relaxed(gpc_base + GPC_CNTR);
+ int i;
+
+ if ((cpu_is_imx6sl() &&
+ imx_get_soc_revision() >= IMX_CHIP_REVISION_1_2) || cpu_is_imx6sx()) {
+
+ /* Enable reset clocks for all devices in the disp domain */
+ for (i = 0; i < disp->num_clks; i++)
+ clk_prepare_enable(disp->clk[i]);
+
+ writel_relaxed(0xFFFFFFFF,
+ gpc_base + GPC_PGC_DISP_PUPSCR_OFFSET);
+ writel_relaxed(0xFFFFFFFF,
+ gpc_base + GPC_PGC_DISP_PDNSCR_OFFSET);
+ writel_relaxed(0x1, gpc_base + GPC_PGC_DISP_PGCR_OFFSET);
+ writel_relaxed(0x10 | val, gpc_base + GPC_CNTR);
+ while (readl_relaxed(gpc_base + GPC_CNTR) & 0x10)
+ ;
+
+ /* Disable reset clocks for all devices in the disp domain */
+ for (i = 0; i < disp->num_clks; i++)
+ clk_disable_unprepare(disp->clk[i]);
+ }
+ return 0;
+}
+
static struct generic_pm_domain imx6q_arm_domain = {
.name = "ARM",
};
@@ -390,14 +794,18 @@ static struct pu_domain imx6q_pu_domain = {
},
};
-static struct generic_pm_domain imx6sl_display_domain = {
- .name = "DISPLAY",
+static struct disp_domain imx6s_display_domain = {
+ .base = {
+ .name = "DISPLAY",
+ .power_off = imx_pm_dispmix_off,
+ .power_on = imx_pm_dispmix_on,
+ },
};
static struct generic_pm_domain *imx_gpc_domains[] = {
&imx6q_arm_domain,
&imx6q_pu_domain.base,
- &imx6sl_display_domain,
+ &imx6s_display_domain.base,
};
static struct genpd_onecell_data imx_gpc_onecell_data = {
@@ -408,30 +816,59 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{
struct clk *clk;
- int i, ret;
+ bool is_off;
+ int pu_clks, disp_clks, ipg_clks = 1;
+ int i = 0, k = 0, ret;
imx6q_pu_domain.reg = pu_reg;
- for (i = 0; ; i++) {
+ if ((cpu_is_imx6sl() &&
+ imx_get_soc_revision() >= IMX_CHIP_REVISION_1_2)) {
+ pu_clks = 2 ;
+ disp_clks = 5;
+ } else if (cpu_is_imx6sx()) {
+ pu_clks = 1;
+ disp_clks = 7;
+ } else {
+ pu_clks = 6;
+ disp_clks = 0;
+ }
+
+ /* Get pu domain clks */
+ for (i = 0; i < pu_clks ; i++) {
clk = of_clk_get(dev->of_node, i);
if (IS_ERR(clk))
break;
- if (i >= GPC_CLK_MAX) {
- dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
- goto clk_err;
- }
imx6q_pu_domain.clk[i] = clk;
}
imx6q_pu_domain.num_clks = i;
- /* Enable power always in case bootloader disabled it. */
- imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+ ipg = of_clk_get(dev->of_node, pu_clks);
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
- return 0;
+ /* Get disp domain clks */
+ for (i = pu_clks + ipg_clks; i < pu_clks + ipg_clks + disp_clks;
+ i++) {
+ clk = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ imx6s_display_domain.clk[k++] = clk;
+ }
+ imx6s_display_domain.num_clks = k;
+
+ is_off = IS_ENABLED(CONFIG_PM);
+ if (is_off && !(cpu_is_imx6q() &&
+ imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)) {
+ _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
+ } else {
+ /*
+ * Enable power if compiled without CONFIG_PM in case the
+ * bootloader disabled it.
+ */
+ imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+ }
for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
- pm_genpd_init(imx_gpc_domains[i], NULL, false);
+ pm_genpd_init(imx_gpc_domains[i], NULL, is_off);
ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
@@ -442,10 +879,6 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
power_off:
imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
-clk_err:
- while (i--)
- clk_put(imx6q_pu_domain.clk[i]);
- imx6q_pu_domain.reg = NULL;
return -EINVAL;
}
@@ -453,6 +886,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
{
struct regulator *pu_reg;
int ret;
+ u32 bypass = 0;
/* bail out if DT too old and doesn't provide the necessary info */
if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
@@ -463,11 +897,46 @@ static int imx_gpc_probe(struct platform_device *pdev)
pu_reg = NULL;
if (IS_ERR(pu_reg)) {
ret = PTR_ERR(pu_reg);
- dev_err(&pdev->dev, "failed to get pu regulator: %d\n", ret);
+ if (ret == -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "pu regulator not ready, retry\n");
+ else
+ dev_err(&pdev->dev, "failed to get pu regulator: %d\n",
+ ret);
return ret;
}
- return imx_gpc_genpd_init(&pdev->dev, pu_reg);
+ if (of_property_read_u32(pdev->dev.of_node, "fsl,ldo-bypass", &bypass))
+ dev_warn(&pdev->dev,
+ "no fsl,ldo-bypass found!\n");
+ /* We only bypass pu since arm and soc has been set in u-boot */
+ if (pu_reg && bypass)
+ regulator_allow_bypass(pu_reg, true);
+
+ if (cpu_is_imx6sx()) {
+ struct regulator *pcie_reg;
+
+ pcie_reg = devm_regulator_get(&pdev->dev, "pcie-phy");
+ if (IS_ERR(pcie_reg)) {
+ ret = PTR_ERR(pcie_reg);
+ dev_info(&pdev->dev, "pcie regulator not ready.\n");
+ return ret;
+ }
+ nb_pcie.notifier_call = &imx_pcie_regulator_notify;
+
+ ret = regulator_register_notifier(pcie_reg, &nb_pcie);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "pcie regulator notifier request failed\n");
+ return ret;
+ }
+ }
+
+ ret = imx_gpc_genpd_init(&pdev->dev, pu_reg);
+ if (ret)
+ return ret;
+ dev_info(&pdev->dev, "Registered imx-gpc\n");
+
+ return 0;
}
static const struct of_device_id imx_gpc_dt_ids[] = {