summaryrefslogtreecommitdiff
path: root/drivers/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk.c')
-rw-r--r--drivers/clk/clk.c606
1 files changed, 468 insertions, 138 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 251e45d6024d..934cfd18f72d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -18,14 +18,78 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/sched.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/*** locking ***/
+static void clk_prepare_lock(void)
+{
+ if (!mutex_trylock(&prepare_lock)) {
+ if (prepare_owner == current) {
+ prepare_refcnt++;
+ return;
+ }
+ mutex_lock(&prepare_lock);
+ }
+ WARN_ON_ONCE(prepare_owner != NULL);
+ WARN_ON_ONCE(prepare_refcnt != 0);
+ prepare_owner = current;
+ prepare_refcnt = 1;
+}
+
+static void clk_prepare_unlock(void)
+{
+ WARN_ON_ONCE(prepare_owner != current);
+ WARN_ON_ONCE(prepare_refcnt == 0);
+
+ if (--prepare_refcnt)
+ return;
+ prepare_owner = NULL;
+ mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&enable_lock, flags)) {
+ if (enable_owner == current) {
+ enable_refcnt++;
+ return flags;
+ }
+ spin_lock_irqsave(&enable_lock, flags);
+ }
+ WARN_ON_ONCE(enable_owner != NULL);
+ WARN_ON_ONCE(enable_refcnt != 0);
+ enable_owner = current;
+ enable_refcnt = 1;
+ return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+ WARN_ON_ONCE(enable_owner != current);
+ WARN_ON_ONCE(enable_refcnt == 0);
+
+ if (--enable_refcnt)
+ return;
+ enable_owner = NULL;
+ spin_unlock_irqrestore(&enable_lock, flags);
+}
+
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
@@ -35,6 +99,133 @@ static struct dentry *rootdir;
static struct dentry *orphandir;
static int inited = 0;
+static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+{
+ if (!c)
+ return;
+
+ seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
+ level * 3 + 1, "",
+ 30 - level * 3, c->name,
+ c->enable_count, c->prepare_count, c->rate);
+ seq_printf(s, "\n");
+}
+
+static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+ int level)
+{
+ struct clk *child;
+
+ if (!c)
+ return;
+
+ clk_summary_show_one(s, c, level);
+
+ hlist_for_each_entry(child, &c->children, child_node)
+ clk_summary_show_subtree(s, child, level + 1);
+}
+
+static int clk_summary_show(struct seq_file *s, void *data)
+{
+ struct clk *c;
+
+ seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
+ seq_printf(s, "---------------------------------------------------------------------\n");
+
+ clk_prepare_lock();
+
+ hlist_for_each_entry(c, &clk_root_list, child_node)
+ clk_summary_show_subtree(s, c, 0);
+
+ hlist_for_each_entry(c, &clk_orphan_list, child_node)
+ clk_summary_show_subtree(s, c, 0);
+
+ clk_prepare_unlock();
+
+ return 0;
+}
+
+
+static int clk_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_summary_show, inode->i_private);
+}
+
+static const struct file_operations clk_summary_fops = {
+ .open = clk_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+{
+ if (!c)
+ return;
+
+ seq_printf(s, "\"%s\": { ", c->name);
+ seq_printf(s, "\"enable_count\": %d,", c->enable_count);
+ seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
+ seq_printf(s, "\"rate\": %lu", c->rate);
+}
+
+static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+{
+ struct clk *child;
+
+ if (!c)
+ return;
+
+ clk_dump_one(s, c, level);
+
+ hlist_for_each_entry(child, &c->children, child_node) {
+ seq_printf(s, ",");
+ clk_dump_subtree(s, child, level + 1);
+ }
+
+ seq_printf(s, "}");
+}
+
+static int clk_dump(struct seq_file *s, void *data)
+{
+ struct clk *c;
+ bool first_node = true;
+
+ seq_printf(s, "{");
+
+ clk_prepare_lock();
+
+ hlist_for_each_entry(c, &clk_root_list, child_node) {
+ if (!first_node)
+ seq_printf(s, ",");
+ first_node = false;
+ clk_dump_subtree(s, c, 0);
+ }
+
+ hlist_for_each_entry(c, &clk_orphan_list, child_node) {
+ seq_printf(s, ",");
+ clk_dump_subtree(s, c, 0);
+ }
+
+ clk_prepare_unlock();
+
+ seq_printf(s, "}");
+ return 0;
+}
+
+
+static int clk_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_dump, inode->i_private);
+}
+
+static const struct file_operations clk_dump_fops = {
+ .open = clk_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/* caller must hold prepare_lock */
static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
{
@@ -90,7 +281,6 @@ out:
static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
{
struct clk *child;
- struct hlist_node *tmp;
int ret = -EINVAL;;
if (!clk || !pdentry)
@@ -101,7 +291,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
if (ret)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_debug_create_subtree(child, clk->dentry);
ret = 0;
@@ -153,6 +343,39 @@ out:
}
/**
+ * clk_debug_reparent - reparent clk node in the debugfs clk tree
+ * @clk: the clk being reparented
+ * @new_parent: the new clk parent, may be NULL
+ *
+ * Rename clk entry in the debugfs clk tree if debugfs has been
+ * initialized. Otherwise it bails out early since the debugfs clk tree
+ * will be created lazily by clk_debug_init as part of a late_initcall.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+{
+ struct dentry *d;
+ struct dentry *new_parent_d;
+
+ if (!inited)
+ return;
+
+ if (new_parent)
+ new_parent_d = new_parent->dentry;
+ else
+ new_parent_d = orphandir;
+
+ d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
+ new_parent_d, clk->name);
+ if (d)
+ clk->dentry = d;
+ else
+ pr_debug("%s: failed to rename debugfs entry for %s\n",
+ __func__, clk->name);
+}
+
+/**
* clk_debug_init - lazily create the debugfs clk tree visualization
*
* clks are often initialized very early during boot before memory can
@@ -167,51 +390,89 @@ out:
static int __init clk_debug_init(void)
{
struct clk *clk;
- struct hlist_node *tmp;
+ struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
if (!rootdir)
return -ENOMEM;
+ d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
+ &clk_summary_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
+ &clk_dump_fops);
+ if (!d)
+ return -ENOMEM;
+
orphandir = debugfs_create_dir("orphans", rootdir);
if (!orphandir)
return -ENOMEM;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
- hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
- hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_debug_create_subtree(clk, orphandir);
inited = 1;
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
+static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+{
+}
#endif
/* caller must hold prepare_lock */
+static void clk_unprepare_unused_subtree(struct clk *clk)
+{
+ struct clk *child;
+
+ if (!clk)
+ return;
+
+ hlist_for_each_entry(child, &clk->children, child_node)
+ clk_unprepare_unused_subtree(child);
+
+ if (clk->prepare_count)
+ return;
+
+ if (clk->flags & CLK_IGNORE_UNUSED)
+ return;
+
+ if (__clk_is_prepared(clk)) {
+ if (clk->ops->unprepare_unused)
+ clk->ops->unprepare_unused(clk->hw);
+ else if (clk->ops->unprepare)
+ clk->ops->unprepare(clk->hw);
+ }
+}
+EXPORT_SYMBOL_GPL(__clk_get_flags);
+
+/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
struct clk *child;
- struct hlist_node *tmp;
unsigned long flags;
if (!clk)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
goto unlock_out;
@@ -232,26 +493,44 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
unlock_out:
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
out:
return;
}
+static bool clk_ignore_unused;
+static int __init clk_ignore_unused_setup(char *__unused)
+{
+ clk_ignore_unused = true;
+ return 1;
+}
+__setup("clk_ignore_unused", clk_ignore_unused_setup);
+
static int clk_disable_unused(void)
{
struct clk *clk;
- struct hlist_node *tmp;
- mutex_lock(&prepare_lock);
+ if (clk_ignore_unused) {
+ pr_warn("clk: Not disabling unused clocks\n");
+ return 0;
+ }
- hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ clk_prepare_lock();
+
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
- hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
- mutex_unlock(&prepare_lock);
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
+ clk_unprepare_unused_subtree(clk);
+
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
+ clk_unprepare_unused_subtree(clk);
+
+ clk_prepare_unlock();
return 0;
}
@@ -259,32 +538,33 @@ late_initcall(clk_disable_unused);
/*** helper functions ***/
-inline const char *__clk_get_name(struct clk *clk)
+const char *__clk_get_name(struct clk *clk)
{
return !clk ? NULL : clk->name;
}
+EXPORT_SYMBOL_GPL(__clk_get_name);
-inline struct clk_hw *__clk_get_hw(struct clk *clk)
+struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
-inline u8 __clk_get_num_parents(struct clk *clk)
+u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? 0 : clk->num_parents;
}
-inline struct clk *__clk_get_parent(struct clk *clk)
+struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
-inline unsigned int __clk_get_enable_count(struct clk *clk)
+unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
}
-inline unsigned int __clk_get_prepare_count(struct clk *clk)
+unsigned int __clk_get_prepare_count(struct clk *clk)
{
return !clk ? 0 : clk->prepare_count;
}
@@ -310,11 +590,32 @@ out:
return ret;
}
-inline unsigned long __clk_get_flags(struct clk *clk)
+unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
+bool __clk_is_prepared(struct clk *clk)
+{
+ int ret;
+
+ if (!clk)
+ return false;
+
+ /*
+ * .is_prepared is optional for clocks that can prepare
+ * fall back to software usage counter if it is missing
+ */
+ if (!clk->ops->is_prepared) {
+ ret = clk->prepare_count ? 1 : 0;
+ goto out;
+ }
+
+ ret = clk->ops->is_prepared(clk->hw);
+out:
+ return !!ret;
+}
+
bool __clk_is_enabled(struct clk *clk)
{
int ret;
@@ -340,12 +641,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
struct clk *child;
struct clk *ret;
- struct hlist_node *tmp;
if (!strcmp(clk->name, name))
return clk;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_lookup_subtree(name, child);
if (ret)
return ret;
@@ -358,20 +658,19 @@ struct clk *__clk_lookup(const char *name)
{
struct clk *root_clk;
struct clk *ret;
- struct hlist_node *tmp;
if (!name)
return NULL;
/* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
+ hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
/* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
+ hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
@@ -414,9 +713,9 @@ void __clk_unprepare(struct clk *clk)
*/
void clk_unprepare(struct clk *clk)
{
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
__clk_unprepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@@ -462,9 +761,9 @@ int clk_prepare(struct clk *clk)
{
int ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_prepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -506,9 +805,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
__clk_disable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
@@ -559,9 +858,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
ret = __clk_enable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
return ret;
}
@@ -606,9 +905,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -668,7 +967,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
- struct hlist_node *tmp;
struct clk *child;
old_rate = clk->rate;
@@ -688,7 +986,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
if (clk->notifier_count && msg)
__clk_notify(clk, msg, old_rate, clk->rate);
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
__clk_recalc_rates(child, msg);
}
@@ -704,13 +1002,13 @@ unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return rate;
}
@@ -734,7 +1032,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
{
- struct hlist_node *tmp;
struct clk *child;
unsigned long new_rate;
int ret = NOTIFY_DONE;
@@ -744,16 +1041,16 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
else
new_rate = parent_rate;
- /* abort the rate change if a driver returns NOTIFY_BAD */
+ /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
if (clk->notifier_count)
ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
- if (ret == NOTIFY_BAD)
+ if (ret & NOTIFY_STOP_MASK)
goto out;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
- if (ret == NOTIFY_BAD)
+ if (ret & NOTIFY_STOP_MASK)
break;
}
@@ -764,11 +1061,10 @@ out:
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
{
struct clk *child;
- struct hlist_node *tmp;
clk->new_rate = new_rate;
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
@@ -839,20 +1135,19 @@ out:
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
- struct hlist_node *tmp;
struct clk *child, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
- return 0;
+ return NULL;
if (clk->notifier_count) {
ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
- if (ret == NOTIFY_BAD)
+ if (ret & NOTIFY_STOP_MASK)
fail_clk = clk;
}
- hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ hlist_for_each_entry(child, &clk->children, child_node) {
clk = clk_propagate_rate_change(child, event);
if (clk)
fail_clk = clk;
@@ -870,7 +1165,6 @@ static void clk_change_rate(struct clk *clk)
struct clk *child;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
- struct hlist_node *tmp;
old_rate = clk->rate;
@@ -888,7 +1182,7 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
- hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node)
clk_change_rate(child);
}
@@ -919,7 +1213,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
int ret = 0;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk->rate)
@@ -950,11 +1244,8 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* change the rates */
clk_change_rate(top);
- mutex_unlock(&prepare_lock);
-
- return 0;
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -970,9 +1261,9 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
parent = __clk_get_parent(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return parent;
}
@@ -1036,16 +1327,8 @@ out:
return ret;
}
-void __clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
-#ifdef CONFIG_COMMON_CLK_DEBUG
- struct dentry *d;
- struct dentry *new_parent_d;
-#endif
-
- if (!clk || !new_parent)
- return;
-
hlist_del(&clk->child_node);
if (new_parent)
@@ -1053,39 +1336,20 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
-#ifdef CONFIG_COMMON_CLK_DEBUG
- if (!inited)
- goto out;
-
- if (new_parent)
- new_parent_d = new_parent->dentry;
- else
- new_parent_d = orphandir;
-
- d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
- new_parent_d, clk->name);
- if (d)
- clk->dentry = d;
- else
- pr_debug("%s: failed to rename debugfs entry for %s\n",
- __func__, clk->name);
-out:
-#endif
-
clk->parent = new_parent;
+}
+void __clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+ clk_reparent(clk, new_parent);
+ clk_debug_reparent(clk, new_parent);
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
-static int __clk_set_parent(struct clk *clk, struct clk *parent)
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
- struct clk *old_parent;
- unsigned long flags;
- int ret = -EINVAL;
u8 i;
- old_parent = clk->parent;
-
if (!clk->parents)
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
@@ -1105,36 +1369,79 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
}
}
- if (i == clk->num_parents) {
- pr_debug("%s: clock %s is not a possible parent of clock %s\n",
- __func__, parent->name, clk->name);
- goto out;
- }
+ return i;
+}
- /* migrate prepare and enable */
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct clk *old_parent = clk->parent;
+ bool migrated_enable = false;
+
+ /* migrate prepare */
if (clk->prepare_count)
__clk_prepare(parent);
- /* FIXME replace with clk_is_enabled(clk) someday */
- spin_lock_irqsave(&enable_lock, flags);
- if (clk->enable_count)
+ flags = clk_enable_lock();
+
+ /* migrate enable */
+ if (clk->enable_count) {
__clk_enable(parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ migrated_enable = true;
+ }
+
+ /* update the clk tree topology */
+ clk_reparent(clk, parent);
+
+ clk_enable_unlock(flags);
/* change clock input source */
- ret = clk->ops->set_parent(clk->hw, i);
+ if (parent && clk->ops->set_parent)
+ ret = clk->ops->set_parent(clk->hw, p_index);
- /* clean up old prepare and enable */
- spin_lock_irqsave(&enable_lock, flags);
- if (clk->enable_count)
+ if (ret) {
+ /*
+ * The error handling is tricky due to that we need to release
+ * the spinlock while issuing the .set_parent callback. This
+ * means the new parent might have been enabled/disabled in
+ * between, which must be considered when doing rollback.
+ */
+ flags = clk_enable_lock();
+
+ clk_reparent(clk, old_parent);
+
+ if (migrated_enable && clk->enable_count) {
+ __clk_disable(parent);
+ } else if (migrated_enable && (clk->enable_count == 0)) {
+ __clk_disable(old_parent);
+ } else if (!migrated_enable && clk->enable_count) {
+ __clk_disable(parent);
+ __clk_enable(old_parent);
+ }
+
+ clk_enable_unlock(flags);
+
+ if (clk->prepare_count)
+ __clk_unprepare(parent);
+
+ return ret;
+ }
+
+ /* clean up enable for old parent if migration was done */
+ if (migrated_enable) {
+ flags = clk_enable_lock();
__clk_disable(old_parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
+ }
+ /* clean up prepare for old parent if migration was done */
if (clk->prepare_count)
__clk_unprepare(old_parent);
-out:
- return ret;
+ /* update debugfs with new clk tree topology */
+ clk_debug_reparent(clk, parent);
+ return 0;
}
/**
@@ -1152,44 +1459,59 @@ out:
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
+ u8 p_index = 0;
+ unsigned long p_rate = 0;
if (!clk || !clk->ops)
return -EINVAL;
- if (!clk->ops->set_parent)
+ /* verify ops for for multi-parent clks */
+ if ((clk->num_parents > 1) && (!clk->ops->set_parent))
return -ENOSYS;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk->parent == parent)
goto out;
+ /* check that we are allowed to re-parent if the clock is in use */
+ if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* try finding the new parent index */
+ if (parent) {
+ p_index = clk_fetch_parent_index(clk, parent);
+ p_rate = parent->rate;
+ if (p_index == clk->num_parents) {
+ pr_debug("%s: clk %s can not be parent of clk %s\n",
+ __func__, parent->name, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
/* propagate PRE_RATE_CHANGE notifications */
if (clk->notifier_count)
- ret = __clk_speculate_rates(clk, parent->rate);
+ ret = __clk_speculate_rates(clk, p_rate);
/* abort if a driver objects */
- if (ret == NOTIFY_STOP)
+ if (ret & NOTIFY_STOP_MASK)
goto out;
- /* only re-parent if the clock is not in use */
- if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
- ret = -EBUSY;
- else
- ret = __clk_set_parent(clk, parent);
+ /* do the re-parent */
+ ret = __clk_set_parent(clk, parent, p_index);
- /* propagate ABORT_RATE_CHANGE if .set_parent failed */
- if (ret) {
+ /* propagate rate recalculation accordingly */
+ if (ret)
__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
- goto out;
- }
-
- /* propagate rate recalculation downstream */
- __clk_reparent(clk, parent);
+ else
+ __clk_recalc_rates(clk, POST_RATE_CHANGE);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1207,12 +1529,12 @@ int __clk_init(struct device *dev, struct clk *clk)
{
int i, ret = 0;
struct clk *orphan;
- struct hlist_node *tmp, *tmp2;
+ struct hlist_node *tmp2;
if (!clk)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
@@ -1307,7 +1629,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* walk the list of orphan clocks and reparent any that are children of
* this clock
*/
- hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
@@ -1336,7 +1658,7 @@ int __clk_init(struct device *dev, struct clk *clk)
clk_debug_register(clk);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1570,7 +1892,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1594,7 +1916,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
clk->notifier_count++;
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1619,7 +1941,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
@@ -1640,7 +1962,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
ret = -ENOENT;
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1663,6 +1985,11 @@ struct of_clk_provider {
void *data;
};
+extern struct of_device_id __clk_of_table[];
+
+static const struct of_device_id __clk_of_table_sentinel
+ __used __section(__clk_of_table_end);
+
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_lock);
@@ -1791,6 +2118,9 @@ void __init of_clk_init(const struct of_device_id *matches)
{
struct device_node *np;
+ if (!matches)
+ matches = __clk_of_table;
+
for_each_matching_node(np, matches) {
const struct of_device_id *match = of_match_node(matches, np);
of_clk_init_cb_t clk_init_cb = match->data;