Message ID | 1426685070-4419-1-git-send-email-xlpang@126.com |
---|---|
State | New |
Headers | show |
Ping Juri > From: Xunlei Pang <pang.xunlei@linaro.org> > > In check_preempt_equal_dl(), cpudl_find() is called with a NULL > later_mask, thus cpudl_find() here doesn't check cpudl::free_cpus > at all. > > This patch takles this issue by always passing a non-NULL later_mask > to cpudl_find(), thereby fixing this issue. > > Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org> > --- > kernel/sched/cpudeadline.c | 8 +++----- > kernel/sched/deadline.c | 15 +++++++++++---- > 2 files changed, 14 insertions(+), 9 deletions(-) > > diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c > index c6acb07..f331fcf 100644 > --- a/kernel/sched/cpudeadline.c > +++ b/kernel/sched/cpudeadline.c > @@ -97,7 +97,7 @@ static inline int cpudl_maximum(struct cpudl *cp) > * cpudl_find - find the best (later-dl) CPU in the system > * @cp: the cpudl max-heap context > * @p: the task > - * @later_mask: a mask to fill in with the selected CPUs (or NULL) > + * @later_mask: a mask to fill in with the selected CPUs (not NULL) > * > * Returns: int - best CPU (heap maximum if suitable) > */ > @@ -107,15 +107,13 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, > int best_cpu = -1; > const struct sched_dl_entity *dl_se = &p->dl; > > - if (later_mask && > - cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { > + if (cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { > best_cpu = cpumask_any(later_mask); > goto out; > } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && > dl_time_before(dl_se->deadline, cp->elements[0].dl)) { > best_cpu = cpudl_maximum(cp); > - if (later_mask) > - cpumask_set_cpu(best_cpu, later_mask); > + cpumask_set_cpu(best_cpu, later_mask); > } > > out: > diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c > index 0a81a95..256099c 100644 > --- a/kernel/sched/deadline.c > +++ b/kernel/sched/deadline.c > @@ -964,14 +964,23 @@ out: > return cpu; > } > > +static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); > + > static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) > { > + struct cpumask *later_mask = > + this_cpu_cpumask_var_ptr(local_cpu_mask_dl); > + > + /* Make sure the mask is initialized first */ > + if (unlikely(!later_mask)) > + return; > + > /* > * Current can't be migrated, useless to reschedule, > * let's hope p can move out. > */ > if (rq->curr->nr_cpus_allowed == 1 || > - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) > + cpudl_find(&rq->rd->cpudl, rq->curr, later_mask) == -1) > return; > > /* > @@ -979,7 +988,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) > * see if it is pushed or pulled somewhere else. > */ > if (p->nr_cpus_allowed != 1 && > - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) > + cpudl_find(&rq->rd->cpudl, p, later_mask) != -1) > return; > > resched_curr(rq); > @@ -1173,8 +1182,6 @@ next_node: > return NULL; > } > > -static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); > - > static int find_later_rq(struct task_struct *task) > { > struct sched_domain *sd; > -- > 1.9.1 > > -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index c6acb07..f331fcf 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -97,7 +97,7 @@ static inline int cpudl_maximum(struct cpudl *cp) * cpudl_find - find the best (later-dl) CPU in the system * @cp: the cpudl max-heap context * @p: the task - * @later_mask: a mask to fill in with the selected CPUs (or NULL) + * @later_mask: a mask to fill in with the selected CPUs (not NULL) * * Returns: int - best CPU (heap maximum if suitable) */ @@ -107,15 +107,13 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, int best_cpu = -1; const struct sched_dl_entity *dl_se = &p->dl; - if (later_mask && - cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { + if (cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { best_cpu = cpumask_any(later_mask); goto out; } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && dl_time_before(dl_se->deadline, cp->elements[0].dl)) { best_cpu = cpudl_maximum(cp); - if (later_mask) - cpumask_set_cpu(best_cpu, later_mask); + cpumask_set_cpu(best_cpu, later_mask); } out: diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0a81a95..256099c 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -964,14 +964,23 @@ out: return cpu; } +static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); + static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) { + struct cpumask *later_mask = + this_cpu_cpumask_var_ptr(local_cpu_mask_dl); + + /* Make sure the mask is initialized first */ + if (unlikely(!later_mask)) + return; + /* * Current can't be migrated, useless to reschedule, * let's hope p can move out. */ if (rq->curr->nr_cpus_allowed == 1 || - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) + cpudl_find(&rq->rd->cpudl, rq->curr, later_mask) == -1) return; /* @@ -979,7 +988,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * see if it is pushed or pulled somewhere else. */ if (p->nr_cpus_allowed != 1 && - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) + cpudl_find(&rq->rd->cpudl, p, later_mask) != -1) return; resched_curr(rq); @@ -1173,8 +1182,6 @@ next_node: return NULL; } -static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); - static int find_later_rq(struct task_struct *task) { struct sched_domain *sd;