[v4,2/5] sched/topology: Adding function partition_sched_domains_locked()

Message ID 20180613121711.5018-3-juri.lelli@redhat.com
State New
Headers show
Series
  • [v4,1/5] sched/topology: Add check to backup comment about hotplug lock
Related show

Commit Message

Juri Lelli June 13, 2018, 12:17 p.m.
From: Mathieu Poirier <mathieu.poirier@linaro.org>


Introducing function partition_sched_domains_locked() by taking
the mutex locking code out of the original function.  That way
the work done by partition_sched_domains_locked() can be reused
without dropping the mutex lock.

No change of functionality is introduced by this patch.

Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>

---
 include/linux/sched/topology.h | 10 ++++++++++
 kernel/sched/topology.c        | 18 ++++++++++++++----
 2 files changed, 24 insertions(+), 4 deletions(-)

-- 
2.14.3

Comments

Steven Rostedt June 14, 2018, 1:35 p.m. | #1
On Wed, 13 Jun 2018 14:17:08 +0200
Juri Lelli <juri.lelli@redhat.com> wrote:

> From: Mathieu Poirier <mathieu.poirier@linaro.org>

> 

> Introducing function partition_sched_domains_locked() by taking

> the mutex locking code out of the original function.  That way

> the work done by partition_sched_domains_locked() can be reused

> without dropping the mutex lock.

> 

> No change of functionality is introduced by this patch.

> 

> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>

> ---

>  include/linux/sched/topology.h | 10 ++++++++++

>  kernel/sched/topology.c        | 18 ++++++++++++++----

>  2 files changed, 24 insertions(+), 4 deletions(-)

> 

> diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h

> index 26347741ba50..57997caf61b6 100644

> --- a/include/linux/sched/topology.h

> +++ b/include/linux/sched/topology.h

> @@ -162,6 +162,10 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)

>  	return to_cpumask(sd->span);

>  }

>  

> +extern void partition_sched_domains_locked(int ndoms_new,

> +					   cpumask_var_t doms_new[],

> +					   struct sched_domain_attr *dattr_new);

> +

>  extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],

>  				    struct sched_domain_attr *dattr_new);

>  

> @@ -206,6 +210,12 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);

>  

>  struct sched_domain_attr;

>  

> +static inline void

> +partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],

> +			       struct sched_domain_attr *dattr_new)

> +{

> +}

> +

>  static inline void

>  partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],

>  			struct sched_domain_attr *dattr_new)

> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c

> index 96eee22fafe8..25a5727d3b48 100644

> --- a/kernel/sched/topology.c

> +++ b/kernel/sched/topology.c

> @@ -1850,16 +1850,16 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,

>   * ndoms_new == 0 is a special case for destroying existing domains,

>   * and it will not create the default domain.

>   *

> - * Call with hotplug lock held

> + * Call with hotplug lock and sched_domains_mutex held

>   */

> -void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],

> -			     struct sched_domain_attr *dattr_new)

> +void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],

> +				    struct sched_domain_attr *dattr_new)

>  {

>  	int i, j, n;

>  	int new_topology;

>  

>  	lockdep_assert_cpus_held();

> -	mutex_lock(&sched_domains_mutex);

> +	lockdep_assert_held(&sched_domains_mutex);

>  

>  	/* Always unregister in case we don't destroy any domains: */

>  	unregister_sched_domain_sysctl();

> @@ -1924,6 +1924,16 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],

>  	ndoms_cur = ndoms_new;

>  

>  	register_sched_domain_sysctl();

> +}

>  

> +/*

> + * Call with hotplug lock held

> + */

> +void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],

> +			     struct sched_domain_attr *dattr_new)

> +{

> +	lockdep_assert_cpus_held();


Is the above assert really necessary? The assert will happen in
partition_sched_domains_locked() anyway.

-- Steve

> +	mutex_lock(&sched_domains_mutex);

> +	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);

>  	mutex_unlock(&sched_domains_mutex);

>  }
Juri Lelli June 14, 2018, 1:47 p.m. | #2
On 14/06/18 09:35, Steven Rostedt wrote:
> On Wed, 13 Jun 2018 14:17:08 +0200

> Juri Lelli <juri.lelli@redhat.com> wrote:


[...]

> > +/*

> > + * Call with hotplug lock held

> > + */

> > +void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],

> > +			     struct sched_domain_attr *dattr_new)

> > +{

> > +	lockdep_assert_cpus_held();

> 

> Is the above assert really necessary? The assert will happen in

> partition_sched_domains_locked() anyway.


Indeed, it seems of little use.

Thanks,

- Juri

Patch

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 26347741ba50..57997caf61b6 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -162,6 +162,10 @@  static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
 	return to_cpumask(sd->span);
 }
 
+extern void partition_sched_domains_locked(int ndoms_new,
+					   cpumask_var_t doms_new[],
+					   struct sched_domain_attr *dattr_new);
+
 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 				    struct sched_domain_attr *dattr_new);
 
@@ -206,6 +210,12 @@  extern void set_sched_topology(struct sched_domain_topology_level *tl);
 
 struct sched_domain_attr;
 
+static inline void
+partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
+			       struct sched_domain_attr *dattr_new)
+{
+}
+
 static inline void
 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 			struct sched_domain_attr *dattr_new)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 96eee22fafe8..25a5727d3b48 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1850,16 +1850,16 @@  static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  * ndoms_new == 0 is a special case for destroying existing domains,
  * and it will not create the default domain.
  *
- * Call with hotplug lock held
+ * Call with hotplug lock and sched_domains_mutex held
  */
-void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-			     struct sched_domain_attr *dattr_new)
+void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
+				    struct sched_domain_attr *dattr_new)
 {
 	int i, j, n;
 	int new_topology;
 
 	lockdep_assert_cpus_held();
-	mutex_lock(&sched_domains_mutex);
+	lockdep_assert_held(&sched_domains_mutex);
 
 	/* Always unregister in case we don't destroy any domains: */
 	unregister_sched_domain_sysctl();
@@ -1924,6 +1924,16 @@  void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 	ndoms_cur = ndoms_new;
 
 	register_sched_domain_sysctl();
+}
 
+/*
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+			     struct sched_domain_attr *dattr_new)
+{
+	lockdep_assert_cpus_held();
+	mutex_lock(&sched_domains_mutex);
+	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
 	mutex_unlock(&sched_domains_mutex);
 }