@@ -33,8 +33,6 @@ LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
-static void dev_pm_opp_get(struct dev_pm_opp *opp);
-
static struct opp_device *_find_opp_dev(const struct device *dev,
struct opp_table *opp_table)
{
@@ -901,7 +899,7 @@ static void _opp_kref_release(struct kref *kref)
dev_pm_opp_put_opp_table(opp_table);
}
-static void dev_pm_opp_get(struct dev_pm_opp *opp)
+void dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
}
@@ -673,3 +673,57 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
+
+/**
+ * of_dev_pm_opp_find_required_opp() - Search for required OPP.
+ * @dev: The device whose OPP node is referenced by the 'np' DT node.
+ * @np: Node that contains the "required-opps" property.
+ *
+ * Returns the OPP of the device 'dev', whose phandle is present in the "np"
+ * node. Although the "required-opps" property supports having multiple
+ * phandles, this helper routine only parses the very first phandle in the list.
+ *
+ * Return: Matching opp, else returns ERR_PTR in case of error and should be
+ * handled using IS_ERR.
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev,
+ struct device_node *np)
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct device_node *required_np;
+ struct opp_table *opp_table;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
+
+ required_np = of_parse_phandle(np, "required-opps", 0);
+ if (unlikely(!required_np)) {
+ dev_err(dev, "Unable to parse required-opps\n");
+ goto put_opp_table;
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->np == required_np) {
+ opp = temp_opp;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ of_node_put(required_np);
+put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp);
@@ -187,6 +187,7 @@ struct opp_table {
};
/* Routines internal to opp core */
+void dev_pm_opp_get(struct dev_pm_opp *opp);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
@@ -309,6 +309,7 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
+struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -342,6 +343,11 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device
{
return NULL;
}
+
+static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OPP_H__ */