diff mbox series

[RFC,V2,1/2] sched: Start tracking SCHED_IDLE tasks count in cfs_rq

Message ID ec46d6d30116742c48bfc0eb301aa72df266d6ce.1556182965.git.viresh.kumar@linaro.org
State New
Headers show
Series sched/fair: Fallback to sched-idle CPU for better performance | expand

Commit Message

Viresh Kumar April 25, 2019, 9:37 a.m. UTC
Start tracking how many tasks with SCHED_IDLE policy are present in each
cfs_rq. This will be used by later commits.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

---
 kernel/sched/fair.c  | 14 ++++++++++++--
 kernel/sched/sched.h |  2 ++
 2 files changed, 14 insertions(+), 2 deletions(-)

-- 
2.21.0.rc0.269.g1a574e7a288b

Comments

Peter Zijlstra May 10, 2019, 6:55 a.m. UTC | #1
On Thu, Apr 25, 2019 at 03:07:39PM +0530, Viresh Kumar wrote:
> @@ -5166,6 +5170,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

>  {

>  	struct cfs_rq *cfs_rq;

>  	struct sched_entity *se = &p->se;

> +	int idle_h_nr_running = unlikely(task_has_idle_policy(p)) ? 1 : 0;

>  

>  	/*

>  	 * The code below (indirectly) updates schedutil which looks at


> @@ -5266,6 +5273,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

>  	struct cfs_rq *cfs_rq;

>  	struct sched_entity *se = &p->se;

>  	int task_sleep = flags & DEQUEUE_SLEEP;

> +	int idle_h_nr_running = unlikely(task_has_idle_policy(p)) ? 1 : 0;

>  

>  	for_each_sched_entity(se) {

>  		cfs_rq = cfs_rq_of(se);


That's a completely pointless branch there (and I suspect the compiler
will see that too), just write:

	int idle_h_nr_running = task_has_idle_policy(p);
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4275eb07c0b2..6511cb57acdd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4475,7 +4475,7 @@  static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 	struct rq *rq = rq_of(cfs_rq);
 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 	struct sched_entity *se;
-	long task_delta, dequeue = 1;
+	long task_delta, idle_task_delta, dequeue = 1;
 	bool empty;
 
 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
@@ -4486,6 +4486,7 @@  static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 	rcu_read_unlock();
 
 	task_delta = cfs_rq->h_nr_running;
+	idle_task_delta = cfs_rq->idle_h_nr_running;
 	for_each_sched_entity(se) {
 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 		/* throttled entity or throttle-on-deactivate */
@@ -4495,6 +4496,7 @@  static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 		if (dequeue)
 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 		qcfs_rq->h_nr_running -= task_delta;
+		qcfs_rq->idle_h_nr_running -= idle_task_delta;
 
 		if (qcfs_rq->load.weight)
 			dequeue = 0;
@@ -4534,7 +4536,7 @@  void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
 	struct sched_entity *se;
 	int enqueue = 1;
-	long task_delta;
+	long task_delta, idle_task_delta;
 
 	se = cfs_rq->tg->se[cpu_of(rq)];
 
@@ -4554,6 +4556,7 @@  void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 		return;
 
 	task_delta = cfs_rq->h_nr_running;
+	idle_task_delta = cfs_rq->idle_h_nr_running;
 	for_each_sched_entity(se) {
 		if (se->on_rq)
 			enqueue = 0;
@@ -4562,6 +4565,7 @@  void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 		if (enqueue)
 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
 		cfs_rq->h_nr_running += task_delta;
+		cfs_rq->idle_h_nr_running += idle_task_delta;
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -5166,6 +5170,7 @@  enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &p->se;
+	int idle_h_nr_running = unlikely(task_has_idle_policy(p)) ? 1 : 0;
 
 	/*
 	 * The code below (indirectly) updates schedutil which looks at
@@ -5198,6 +5203,7 @@  enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running++;
+		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
 		flags = ENQUEUE_WAKEUP;
 	}
@@ -5205,6 +5211,7 @@  enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running++;
+		cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -5266,6 +5273,7 @@  static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &p->se;
 	int task_sleep = flags & DEQUEUE_SLEEP;
+	int idle_h_nr_running = unlikely(task_has_idle_policy(p)) ? 1 : 0;
 
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
@@ -5280,6 +5288,7 @@  static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running--;
+		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight) {
@@ -5299,6 +5308,7 @@  static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running--;
+		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b52ed1ada0be..0f07d9b55a79 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -490,6 +490,8 @@  struct cfs_rq {
 	unsigned long		runnable_weight;
 	unsigned int		nr_running;
 	unsigned int		h_nr_running;
+	/* h_nr_running for SCHED_IDLE tasks */
+	unsigned int		idle_h_nr_running;
 
 	u64			exec_clock;
 	u64			min_vruntime;