@@ -4694,15 +4694,15 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
cpumask_and(new_mask, in_mask, cpus_allowed);
/*
- * Since bandwidth control happens on root_domain basis,
- * if admission test is enabled, we only admit -deadline
- * tasks allowed to run on all the CPUs in the task's
- * root_domain.
+ * Since bandwidth control happens on root_domain basis, if admission
+ * test is enabled we don't allow the task' CPUs to change. If
+ * @new_mask is smaller than we risk running out of cycles. If it is
+ * bigger than we may be using DL bandwidth allocated to other CPUs.
*/
#ifdef CONFIG_SMP
if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
rcu_read_lock();
- if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
+ if (!cpumask_equal(task_rq(p)->rd->span, new_mask)) {
retval = -EBUSY;
rcu_read_unlock();
goto out_free_new_mask;
Now that we mandate that on creation, the ->cpus_allowed mask of a future DL task has to be equal to the rd->span of the root domain it will be associated with, changing the affinity of a DL task doesn't make sense anymore. Indeed, if we set the task to a smaller affinity set then we may be running out of CPU cycle. If we try to increase the size of the affinity set the new CPUs are necessarily part of another root domain where DL utilisation for the task won't be accounted for. As such simply refuse to change the affinity set of a DL task. Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> --- kernel/sched/core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) -- 2.7.4