@@ -48,9 +48,14 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
{
struct opp_table *opp_table;
+ bool found;
list_for_each_entry(opp_table, &opp_tables, node) {
- if (_find_opp_dev(dev, opp_table)) {
+ mutex_lock(&opp_table->lock);
+ found = !!_find_opp_dev(dev, opp_table);
+ mutex_unlock(&opp_table->lock);
+
+ if (found) {
_get_opp_table_kref(opp_table);
return opp_table;
@@ -766,6 +771,8 @@ struct opp_device *_add_opp_dev(const struct device *dev,
/* Initialize opp-dev */
opp_dev->dev = dev;
+
+ mutex_lock(&opp_table->lock);
list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
@@ -773,6 +780,7 @@ struct opp_device *_add_opp_dev(const struct device *dev,
if (ret)
dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
__func__, ret);
+ mutex_unlock(&opp_table->lock);
return opp_dev;
}
@@ -791,6 +799,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
if (!opp_table)
return NULL;
+ mutex_init(&opp_table->lock);
INIT_LIST_HEAD(&opp_table->dev_list);
opp_dev = _add_opp_dev(dev, opp_table);
@@ -812,7 +821,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
- mutex_init(&opp_table->lock);
kref_init(&opp_table->kref);
/* Secure the device table modification */
@@ -854,6 +862,10 @@ static void _opp_table_kref_release(struct kref *kref)
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
+ /*
+ * No need to take opp_table->lock here as we are guaranteed that no
+ * references to the OPP table are taken at this point.
+ */
opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
node);
@@ -1716,6 +1728,9 @@ void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
{
struct dev_pm_opp *opp, *tmp;
+ /* Protect dev_list */
+ mutex_lock(&opp_table->lock);
+
/* Find if opp_table manages a single device */
if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
@@ -1733,6 +1748,8 @@ void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
} else {
_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
+
+ mutex_unlock(&opp_table->lock);
}
void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
@@ -226,8 +226,10 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
cpumask_clear(cpumask);
if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
+ mutex_lock(&opp_table->lock);
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
+ mutex_unlock(&opp_table->lock);
} else {
cpumask_set_cpu(cpu_dev->id, cpumask);
}
@@ -126,7 +126,7 @@ enum opp_table_access {
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @lock: mutex protecting the opp_list.
+ * @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.