summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/devres.c19
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/domain.c21
-rw-r--r--drivers/base/power/opp/core.c48
-rw-r--r--drivers/base/power/opp/cpu.c13
-rw-r--r--drivers/base/power/opp/opp.h3
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/base/property.c32
9 files changed, 98 insertions, 52 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 6e810881e48b..71059e32bebc 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
*
* Note, you will need to drop the reference with put_device() after use.
*
- * @fn is allowed to do anything including calling back into class
+ * @match is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 875464690117..8fc654f0807b 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -82,12 +82,12 @@ static struct devres_group * node_to_group(struct devres_node *node)
}
static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp)
+ size_t size, gfp_t gfp, int nid)
{
size_t tot_size = sizeof(struct devres) + size;
struct devres *dr;
- dr = kmalloc_track_caller(tot_size, gfp);
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
@@ -106,24 +106,25 @@ static void add_dr(struct device *dev, struct devres_node *node)
}
#ifdef CONFIG_DEBUG_DEVRES
-void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
+void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
const char *name)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp | __GFP_ZERO);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
-EXPORT_SYMBOL_GPL(__devres_alloc);
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
#else
/**
* devres_alloc - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
+ * @nid: NUMA node
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
@@ -132,16 +133,16 @@ EXPORT_SYMBOL_GPL(__devres_alloc);
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
-void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
+void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp | __GFP_ZERO);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
return dr->data;
}
-EXPORT_SYMBOL_GPL(devres_alloc);
+EXPORT_SYMBOL_GPL(devres_alloc_node);
#endif
/**
@@ -776,7 +777,7 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kmalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
if (unlikely(!dr))
return NULL;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index fd0973b922a7..60ee5591ee8f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -93,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -ENOMEM;
}
} else {
- if (IS_ERR(clk) || !__clk_get(clk)) {
+ if (IS_ERR(clk)) {
kfree(ce);
return -ENOENT;
}
@@ -127,7 +127,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
* @clk: Clock pointer
*
* Add the clock to the list of clocks used for the power management of @dev.
- * It will increment refcount on clock pointer, use clk_put() on it when done.
+ * The power-management code will take control of the clock reference, so
+ * callers should not call clk_put() on @clk after this function sucessfully
+ * returned.
*/
int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 80e298870388..e03b1ad25a90 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -321,8 +321,7 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
- if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe))
+ if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
not_suspended++;
}
@@ -1312,13 +1311,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
- struct gpd_link *link;
+ struct gpd_link *link, *itr;
int ret = 0;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
|| genpd == subdomain)
return -EINVAL;
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
mutex_lock(&genpd->lock);
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
@@ -1328,18 +1331,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(link, &genpd->master_links, master_node) {
- if (link->slave == subdomain && link->master == genpd) {
+ list_for_each_entry(itr, &genpd->master_links, master_node) {
+ if (itr->slave == subdomain && itr->master == genpd) {
ret = -EINVAL;
goto out;
}
}
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link) {
- ret = -ENOMEM;
- goto out;
- }
link->master = genpd;
list_add_tail(&link->master_node, &genpd->master_links);
link->slave = subdomain;
@@ -1350,7 +1348,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
out:
mutex_unlock(&subdomain->lock);
mutex_unlock(&genpd->lock);
-
+ if (ret)
+ kfree(link);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index d5c1149ff123..b8e76f75073b 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -27,7 +29,7 @@
*/
static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
-static DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
@@ -79,14 +81,18 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
- * Locking: This function must be called under rcu_read_lock(). device_opp
- * is a RCU protected pointer. This means that device_opp is valid as long
- * as we are under RCU lock.
+ * Locking: For readers, this function must be called under rcu_read_lock().
+ * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * as long as we are under RCU lock.
+ *
+ * For Writers, this function must be called with dev_opp_list_lock held.
*/
struct device_opp *_find_device_opp(struct device *dev)
{
struct device_opp *dev_opp;
+ opp_rcu_lockdep_assert();
+
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
@@ -100,7 +106,7 @@ struct device_opp *_find_device_opp(struct device *dev)
}
/**
- * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
*
* Return: voltage in micro volt corresponding to the opp, else
@@ -122,7 +128,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
opp_rcu_lockdep_assert();
tmp_opp = rcu_dereference(opp);
- if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(tmp_opp))
pr_err("%s: Invalid parameters\n", __func__);
else
v = tmp_opp->u_volt;
@@ -701,7 +707,7 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
}
/**
- * _opp_add_dynamic() - Allocate a dynamic OPP.
+ * _opp_add_v1() - Allocate a OPP based on v1 bindings.
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
@@ -727,8 +733,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
-static int _opp_add_dynamic(struct device *dev, unsigned long freq,
- long u_volt, bool dynamic)
+static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+ bool dynamic)
{
struct device_opp *dev_opp;
struct dev_pm_opp *new_opp;
@@ -770,9 +776,10 @@ unlock:
}
/* TODO: Support multiple regulators */
-static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
{
u32 microvolt[3] = {0};
+ u32 val;
int count, ret;
/* Missing property isn't a problem, but an invalid entry is */
@@ -805,6 +812,9 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
opp->u_volt_min = microvolt[1];
opp->u_volt_max = microvolt[2];
+ if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ opp->u_amp = val;
+
return 0;
}
@@ -869,13 +879,10 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_get_microvolt(new_opp, dev);
+ ret = opp_parse_supplies(new_opp, dev);
if (ret)
goto free_opp;
- if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
- new_opp->u_amp = val;
-
ret = _opp_add(dev, new_opp, dev_opp);
if (ret)
goto free_opp;
@@ -939,7 +946,7 @@ unlock:
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
- return _opp_add_dynamic(dev, freq, u_volt, true);
+ return _opp_add_v1(dev, freq, u_volt, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
@@ -1172,13 +1179,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
struct device_opp *dev_opp;
int ret = 0, count = 0;
+ mutex_lock(&dev_opp_list_lock);
+
dev_opp = _managed_opp(opp_np);
if (dev_opp) {
/* OPPs are already managed */
if (!_add_list_dev(dev, dev_opp))
ret = -ENOMEM;
+ mutex_unlock(&dev_opp_list_lock);
return ret;
}
+ mutex_unlock(&dev_opp_list_lock);
/* We have opp-list node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
@@ -1196,15 +1207,20 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
if (WARN_ON(!count))
return -ENOENT;
+ mutex_lock(&dev_opp_list_lock);
+
dev_opp = _find_device_opp(dev);
if (WARN_ON(IS_ERR(dev_opp))) {
ret = PTR_ERR(dev_opp);
+ mutex_unlock(&dev_opp_list_lock);
goto free_table;
}
dev_opp->np = opp_np;
dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ mutex_unlock(&dev_opp_list_lock);
+
return 0;
free_table:
@@ -1241,7 +1257,7 @@ static int _of_add_opp_table_v1(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (_opp_add_dynamic(dev, freq, volt, false))
+ if (_opp_add_v1(dev, freq, volt, false))
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
nr -= 2;
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 7654c5606307..7b445e88a0d5 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -10,6 +10,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
@@ -124,12 +127,12 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
struct device *dev;
int cpu, ret = 0;
- rcu_read_lock();
+ mutex_lock(&dev_opp_list_lock);
dev_opp = _find_device_opp(cpu_dev);
if (IS_ERR(dev_opp)) {
ret = -EINVAL;
- goto out_rcu_read_unlock;
+ goto unlock;
}
for_each_cpu(cpu, cpumask) {
@@ -150,10 +153,10 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
continue;
}
}
-out_rcu_read_unlock:
- rcu_read_unlock();
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index dcb38f78dae4..7366b2aa8997 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -21,6 +21,9 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+/* Lock to allow exclusive modification to the device and opp lists */
+extern struct mutex dev_opp_list_lock;
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e67451dec..0d77cd6fd8d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
struct wake_irq *wirq;
int err;
+ if (irq < 0)
+ return -EINVAL;
+
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
struct wake_irq *wirq;
int err;
+ if (irq < 0)
+ return -EINVAL;
+
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index de40623bbd8a..1325ff225cc4 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -598,18 +598,34 @@ unsigned int device_get_child_node_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_child_node_count);
-bool device_dma_is_coherent(struct device *dev)
+bool device_dma_supported(struct device *dev)
{
- bool coherent = false;
-
+ /* For DT, this is always supported.
+ * For ACPI, this depends on CCA, which
+ * is determined by the acpi_dma_supported().
+ */
if (IS_ENABLED(CONFIG_OF) && dev->of_node)
- coherent = of_dma_is_coherent(dev->of_node);
- else
- acpi_check_dma(ACPI_COMPANION(dev), &coherent);
+ return true;
+
+ return acpi_dma_supported(ACPI_COMPANION(dev));
+}
+EXPORT_SYMBOL_GPL(device_dma_supported);
- return coherent;
+enum dev_dma_attr device_get_dma_attr(struct device *dev)
+{
+ enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ if (of_dma_is_coherent(dev->of_node))
+ attr = DEV_DMA_COHERENT;
+ else
+ attr = DEV_DMA_NON_COHERENT;
+ } else
+ attr = acpi_get_dma_attr(ACPI_COMPANION(dev));
+
+ return attr;
}
-EXPORT_SYMBOL_GPL(device_dma_is_coherent);
+EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
* device_get_phy_mode - Get phy mode for given device