@@ -5644,8 +5644,9 @@ static int init_rootdomain(struct root_domain *rd)
if (cpudl_init(&rd->cpudl) != 0)
goto free_dlo_mask;
- if (cpupri_init(&rd->cpupri) != 0)
+ if (cpupri_init(&rd->cpupri, &rd->cpudl) != 0)
goto free_rto_mask;
+
return 0;
free_rto_mask:
@@ -31,6 +31,7 @@
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
+#include "cpudeadline.h"
#include "cpupri.h"
/* Convert between a 140 based task->prio, and our 102 based cpupri */
@@ -54,7 +55,7 @@ static int convert_prio(int prio)
* cpupri_find - find the best (lowest-pri) CPU in the system
* @cp: The cpupri context
* @p: The task
- * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
+ * @lowest_mask: A mask to fill in with selected CPUs (not NULL)
*
* Note: This function returns the recommended CPUs as calculated during the
* current invocation. By the time the call returns, the CPUs may have in
@@ -75,6 +76,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
for (idx = 0; idx < task_pri; idx++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
+ struct cpudl *dl = cp->cpudl;
int skip = 0;
if (!atomic_read(&(vec)->count))
@@ -103,24 +105,12 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (skip)
continue;
- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+ cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+ if (dl->size)
+ cpumask_and(lowest_mask, lowest_mask, dl->free_cpus);
+ if (cpumask_any(lowest_mask) >= nr_cpu_ids)
continue;
- if (lowest_mask) {
- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
-
- /*
- * We have to ensure that we have at least one bit
- * still set in the array, since the map could have
- * been concurrently emptied between the first and
- * second reads of vec->mask. If we hit this
- * condition, simply act as though we never hit this
- * priority level and continue on.
- */
- if (cpumask_any(lowest_mask) >= nr_cpu_ids)
- continue;
- }
-
return 1;
}
@@ -202,10 +192,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
/**
* cpupri_init - initialize the cpupri structure
* @cp: The cpupri context
+ * @cpudl: The cpudl context of the same root domain
*
* Return: -ENOMEM on memory allocation failure.
*/
-int cpupri_init(struct cpupri *cp)
+int cpupri_init(struct cpupri *cp, struct cpudl *cpudl)
{
int i;
@@ -226,6 +217,8 @@ int cpupri_init(struct cpupri *cp)
for_each_possible_cpu(i)
cp->cpu_to_pri[i] = CPUPRI_INVALID;
+ cp->cpudl = cpudl;
+
return 0;
cleanup:
@@ -18,13 +18,14 @@ struct cpupri_vec {
struct cpupri {
struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
int *cpu_to_pri;
+ struct cpudl *cpudl;
};
#ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp,
struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri);
-int cpupri_init(struct cpupri *cp);
+int cpupri_init(struct cpupri *cp, struct cpudl *cpudl);
void cpupri_cleanup(struct cpupri *cp);
#endif
@@ -1354,14 +1354,22 @@ out:
return cpu;
}
+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
+
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
+ struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
+
+ /* Make sure the mask is initialized first */
+ if (unlikely(!lowest_mask))
+ return;
+
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
- !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
+ !cpupri_find(&rq->rd->cpupri, rq->curr, lowest_mask))
return;
/*
@@ -1369,7 +1377,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1
- && cpupri_find(&rq->rd->cpupri, p, NULL))
+ && cpupri_find(&rq->rd->cpupri, p, lowest_mask))
return;
/*
@@ -1531,8 +1539,6 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
return NULL;
}
-static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;