@@ -522,7 +522,9 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
+#ifdef CONFIG_SCHED_DL
struct sched_dl_entity dl;
+#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
@@ -13,7 +13,7 @@
static inline int dl_prio(int prio)
{
- if (unlikely(prio < MAX_DL_PRIO))
+ if (IS_ENABLED(CONFIG_SCHED_DL) && unlikely(prio < MAX_DL_PRIO))
return 1;
return 0;
}
@@ -1303,6 +1303,14 @@ config SCHED_AUTOGROUP
desktop applications. Task group autogeneration is currently based
upon task session.
+config SCHED_DL
+ bool "Deadline Task Scheduling" if EXPERT
+ default y
+ help
+ This adds the sched_dl scheduling class to the kernel providing
+ support for the SCHED_DEADLINE policy. You might want to disable
+ this to reduce the kernel size. If unsure say y.
+
config SYSFS_DEPRECATED
bool "Enable deprecated sysfs features to support old userspace tools"
depends on SYSFS
@@ -227,8 +227,13 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
/*
* Only use with rt_mutex_waiter_{less,equal}()
*/
+#ifdef CONFIG_SCHED_DL
#define task_to_waiter(p) \
&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
+#else
+#define task_to_waiter(p) \
+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = 0 }
+#endif
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
@@ -692,7 +697,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* the values of the node being removed.
*/
waiter->prio = task->prio;
+#ifdef CONFIG_SCHED_DL
waiter->deadline = task->dl.deadline;
+#endif
rt_mutex_enqueue(lock, waiter);
@@ -967,7 +974,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
+#ifdef CONFIG_SCHED_DL
waiter->deadline = task->dl.deadline;
+#endif
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
@@ -16,9 +16,10 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
obj-y += core.o loadavg.o clock.o cputime.o
-obj-y += idle_task.o fair.o rt.o deadline.o
obj-y += wait.o swait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
+obj-y += idle_task.o fair.o rt.o
+obj-$(CONFIG_SCHED_DL) += deadline.o $(if $(CONFIG_SMP),cpudeadline.o)
+obj-$(CONFIG_SMP) += cpupri.o topology.o stop_task.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
@@ -634,9 +634,11 @@ bool sched_can_stop_tick(struct rq *rq)
{
int fifo_nr_running;
+#ifdef CONFIG_SCHED_DL
/* Deadline tasks, even if single, need the tick */
if (rq->dl.dl_nr_running)
return false;
+#endif
/*
* If there are more than one RR tasks, we need the tick to effect the
@@ -2174,9 +2176,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
+#ifdef CONFIG_SCHED_DL
RB_CLEAR_NODE(&p->dl.rb_node);
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
+#endif
INIT_LIST_HEAD(&p->rt.run_list);
p->rt.timeout = 0;
@@ -3699,6 +3703,9 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
* --> -dl task blocks on mutex A and could preempt the
* running task
*/
+#ifdef CONFIG_SCHED_DL
+ if (dl_prio(oldprio))
+ p->dl.dl_boosted = 0;
if (dl_prio(prio)) {
if (!dl_prio(p->normal_prio) ||
(pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
@@ -3707,15 +3714,13 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
} else
p->dl.dl_boosted = 0;
p->sched_class = &dl_sched_class;
- } else if (rt_prio(prio)) {
- if (dl_prio(oldprio))
- p->dl.dl_boosted = 0;
+ } else
+#endif
+ if (rt_prio(prio)) {
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
p->sched_class = &rt_sched_class;
} else {
- if (dl_prio(oldprio))
- p->dl.dl_boosted = 0;
if (rt_prio(oldprio))
p->rt.timeout = 0;
p->sched_class = &fair_sched_class;
@@ -5266,7 +5271,8 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
if (!cpumask_weight(cur))
return ret;
- ret = dl_cpuset_cpumask_can_shrink(cur, trial);
+ if (IS_ENABLED(CONFIG_SCHED_DL))
+ ret = dl_cpuset_cpumask_can_shrink(cur, trial);
return ret;
}
@@ -5561,7 +5567,7 @@ static void cpuset_cpu_active(void)
static int cpuset_cpu_inactive(unsigned int cpu)
{
if (!cpuhp_tasks_frozen) {
- if (dl_cpu_busy(cpu))
+ if (IS_ENABLED(CONFIG_SCHED_DL) && dl_cpu_busy(cpu))
return -EBUSY;
cpuset_update_active_cpus();
} else {
@@ -5721,7 +5727,9 @@ void __init sched_init_smp(void)
free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
+#ifdef CONFIG_SCHED_DL
init_sched_dl_class();
+#endif
sched_init_smt();
sched_clock_init_late();
@@ -5825,7 +5833,9 @@ void __init sched_init(void)
#endif /* CONFIG_CPUMASK_OFFSTACK */
init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
+#ifdef CONFIG_SCHED_DL
init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime());
+#endif
#ifdef CONFIG_SMP
init_defrootdomain();
@@ -5855,7 +5865,9 @@ void __init sched_init(void)
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt);
+#ifdef CONFIG_SCHED_DL
init_dl_rq(&rq->dl);
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -6518,16 +6530,19 @@ int sched_rt_handler(struct ctl_table *table, int write,
if (ret)
goto undo;
- ret = sched_dl_global_validate();
- if (ret)
- goto undo;
+ if (IS_ENABLED(CONFIG_SCHED_DL)) {
+ ret = sched_dl_global_validate();
+ if (ret)
+ goto undo;
+ }
ret = sched_rt_global_constraints();
if (ret)
goto undo;
sched_rt_do_global();
- sched_dl_do_global();
+ if (IS_ENABLED(CONFIG_SCHED_DL))
+ sched_dl_do_global();
}
if (0) {
undo:
@@ -646,7 +646,9 @@ do { \
spin_lock_irqsave(&sched_debug_lock, flags);
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
+#ifdef CONFIG_SCHED_DL
print_dl_stats(m, cpu);
+#endif
print_rq(m, rq, cpu);
spin_unlock_irqrestore(&sched_debug_lock, flags);
@@ -954,10 +956,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#endif
P(policy);
P(prio);
+#ifdef CONFIG_SCHED_DL
if (p->policy == SCHED_DEADLINE) {
P(dl.runtime);
P(dl.deadline);
}
+#endif
#undef PN_SCHEDSTAT
#undef PN
#undef __PN
@@ -1544,9 +1544,12 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* means a dl or stop task can slip in, in which case we need
* to re-start task selection.
*/
- if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
- rq->dl.dl_nr_running))
+ if (unlikely((rq->stop && task_on_rq_queued(rq->stop))))
return RETRY_TASK;
+#ifdef CONFIG_SCHED_DL
+ if (unlikely(rq->dl.dl_nr_running))
+ return RETRY_TASK;
+#endif
}
/*
@@ -137,7 +137,7 @@ static inline int rt_policy(int policy)
static inline int dl_policy(int policy)
{
- return policy == SCHED_DEADLINE;
+ return IS_ENABLED(CONFIG_SCHED_DL) && policy == SCHED_DEADLINE;
}
static inline bool valid_policy(int policy)
{
@@ -667,7 +667,9 @@ struct rq {
struct cfs_rq cfs;
struct rt_rq rt;
+#ifdef CONFIG_SCHED_DL
struct dl_rq dl;
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
@@ -1438,9 +1440,12 @@ static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
#ifdef CONFIG_SMP
#define sched_class_highest (&stop_sched_class)
-#else
+#elif defined(CONFIG_SCHED_DL)
#define sched_class_highest (&dl_sched_class)
+#else
+#define sched_class_highest (&rt_sched_class)
#endif
+
#define for_each_class(class) \
for (class = sched_class_highest; class; class = class->next)
@@ -110,7 +110,11 @@ static void update_curr_stop(struct rq *rq)
* Simple, special scheduling class for the per-CPU stop tasks:
*/
const struct sched_class stop_sched_class = {
+#ifdef CONFIG_SCHED_DL
.next = &dl_sched_class,
+#else
+ .next = &rt_sched_class,
+#endif
.enqueue_task = enqueue_task_stop,
.dequeue_task = dequeue_task_stop,
@@ -195,7 +195,9 @@ static void free_rootdomain(struct rcu_head *rcu)
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
cpupri_cleanup(&rd->cpupri);
+#ifdef CONFIG_SCHED_DL
cpudl_cleanup(&rd->cpudl);
+#endif
free_cpumask_var(rd->dlo_mask);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
@@ -253,16 +255,20 @@ static int init_rootdomain(struct root_domain *rd)
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;
+#ifdef CONFIG_SCHED_DL
init_dl_bw(&rd->dl_bw);
if (cpudl_init(&rd->cpudl) != 0)
goto free_rto_mask;
+#endif
if (cpupri_init(&rd->cpupri) != 0)
goto free_cpudl;
return 0;
free_cpudl:
+#ifdef CONFIG_SCHED_DL
cpudl_cleanup(&rd->cpudl);
+#endif
free_rto_mask:
free_cpumask_var(rd->rto_mask);
free_dlo_mask:
On most small systems, the deadline scheduler class is a luxury that rarely gets used if at all. It is preferable to have the ability to configure it out to reduce the kernel size in that case. Signed-off-by: Nicolas Pitre <nico@linaro.org> --- include/linux/sched.h | 2 ++ include/linux/sched/deadline.h | 2 +- init/Kconfig | 8 ++++++++ kernel/locking/rtmutex.c | 9 +++++++++ kernel/sched/Makefile | 5 +++-- kernel/sched/core.c | 37 ++++++++++++++++++++++++++----------- kernel/sched/debug.c | 4 ++++ kernel/sched/rt.c | 7 +++++-- kernel/sched/sched.h | 9 +++++++-- kernel/sched/stop_task.c | 4 ++++ kernel/sched/topology.c | 6 ++++++ 11 files changed, 75 insertions(+), 18 deletions(-) -- 2.9.4