diff mbox

[Query] : tick-sched: why don't we stop tick when we are running idle task?

Message ID CAKohpomgv-LbUcPxazfJkf0Jq0sSULxT0WjrbFovn0k0O-ORwg@mail.gmail.com
State New
Headers show

Commit Message

Viresh Kumar April 11, 2014, 4:38 p.m. UTC
On 11 April 2014 20:48, Peter Zijlstra <peterz@infradead.org> wrote:
> On Fri, Apr 11, 2014 at 04:53:35PM +0200, Frederic Weisbecker wrote:

> I think there's assumptions that tick runs on the local cpu;

Yes, many function behave that way, i.e. with smp_processor_id() as
CPU.

> also what
> are you going to do when running it on all remote cpus takes longer than
> the tick?
>
>> Otherwise (and ideally) we need to make the scheduler code able to handle long periods without
>> calling scheduler_tick(). But this is a lot more plumbing. And the scheduler has gazillions
>> accounting stuffs to handle. Sounds like a big nightmare to take that direction.
>
> So i'm not at all sure what you guys are talking about, but it seems to
> me you should all put down the bong and have a detox round instead.
>
> This all sounds like a cure worse than the problem.

So, what I was working on isn't ready yet but I would like to show what lines
I have been trying on. In case that is completely incorrect and I should stop
making that work :)

Please share your feedback about this (Yes there are several parts broken
currently, specially the assumption that tick runs on local CPU):

NOTE: Its rebased over some cleanups I did which aren't sent to LKML yet.

---------x-------------------------x---------------
    tick-sched: offload NO_HZ_FULL computation to timekeeping CPUs

    Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 include/linux/tick.h     |  2 ++
 kernel/time/tick-sched.c | 90
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 82 insertions(+), 10 deletions(-)

                                goto out;
@@ -687,9 +734,7 @@ out:
 static void tick_nohz_full_stop_tick(struct tick_sched *ts)
 {
 #ifdef CONFIG_NO_HZ_FULL
-       int cpu = smp_processor_id();
-
-       if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
+       if (!tick_nohz_full_cpu(ts->cpu) || is_idle_task(current))
                return;

        if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
@@ -698,7 +743,7 @@ static void tick_nohz_full_stop_tick(struct tick_sched *ts)
        if (!can_stop_full_tick())
                return;

-       tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
+       tick_nohz_stop_sched_tick(ts, ktime_get(), ts->cpu);
 #endif
 }

@@ -824,11 +869,21 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
 void tick_nohz_irq_exit(void)
 {
        struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+       unsigned int cpu;

-       if (ts->inidle)
+       if (ts->inidle) {
                __tick_nohz_idle_enter(ts);
-       else
+       } else {
                tick_nohz_full_stop_tick(ts);
+
+               while (!cpumask_empty(&ts->timekeeping_pending)) {
+                       cpu = cpumask_first(&ts->timekeeping_pending);
+                       cpumask_clear_cpu(cpu, &ts->timekeeping_pending);
+
+                       /* Try to stop tick of NO_HZ_FULL cpu */
+                       tick_nohz_full_stop_tick(tick_get_tick_sched(cpu));
+               }
+       }
 }

 /**
@@ -1090,13 +1145,23 @@ static enum hrtimer_restart
tick_sched_timer(struct hrtimer *timer)
 {
        struct tick_sched *ts =
                container_of(timer, struct tick_sched, sched_timer);
+       struct tick_sched *this_ts = &__get_cpu_var(tick_cpu_sched);
        ktime_t now = ktime_get();

-       tick_sched_do_timer(now);
+       /* Running as timekeeper ? */
+       if (likely(ts == this_ts))
+               tick_sched_do_timer(now);
+       else
+               cpumask_set_cpu(ts->cpu, &this_ts->timekeeping_pending);
+
        tick_sched_handle(ts);

        hrtimer_forward(timer, now, tick_period);

+       /*
+        * Yes, we are scheduling next tick also while timekeeping on this_cpu.
+        * We will handle that from irq_exit().
+        */
        return HRTIMER_RESTART;
 }

@@ -1149,6 +1214,11 @@ void tick_setup_sched_timer(void)
                tick_nohz_active = 1;
        }
 #endif
+
+#ifdef CONFIG_NO_HZ_FULL
+       ts->cpu = smp_processor_id();
+       cpumask_clear(&ts->timekeeping_pending);
+#endif
 }
 #endif /* HIGH_RES_TIMERS */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Comments

Preeti Murthy April 14, 2014, 9:48 a.m. UTC | #1
Hi Viresh,

I am not too sure about the complexity or the worthiness of this patch but
just wanted to add that care must be taken to migrate the tick_sched_timer
of all the remote CPUs off a hotplugged out CPU if the latter was keeping
their time thus far. In the normal scenario I am guessing the tick_sched_timer
dies along with the hotplugged out CPU since there is no need for it any more.

Regards
Preeti U Murthy
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Viresh Kumar April 14, 2014, 9:51 a.m. UTC | #2
On 14 April 2014 15:18, Preeti Murthy <preeti.lkml@gmail.com> wrote:
> I am not too sure about the complexity or the worthiness of this patch but
> just wanted to add that care must be taken to migrate the tick_sched_timer
> of all the remote CPUs off a hotplugged out CPU if the latter was keeping
> their time thus far. In the normal scenario I am guessing the tick_sched_timer
> dies along with the hotplugged out CPU since there is no need for it any more.

Agreed. Lets see if there is anybody in favor of this work as it is
very important
for some real time use cases we have. Like running data plane threads on
isolated CPUs.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Peter Zijlstra April 14, 2014, 11:02 a.m. UTC | #3
On Fri, Apr 11, 2014 at 10:08:30PM +0530, Viresh Kumar wrote:
> On 11 April 2014 20:48, Peter Zijlstra <peterz@infradead.org> wrote:
> > On Fri, Apr 11, 2014 at 04:53:35PM +0200, Frederic Weisbecker wrote:
> 
> > I think there's assumptions that tick runs on the local cpu;
> 
> Yes, many function behave that way, i.e. with smp_processor_id() as
> CPU.
> 
> > also what
> > are you going to do when running it on all remote cpus takes longer than
> > the tick?
> >
> >> Otherwise (and ideally) we need to make the scheduler code able to handle long periods without
> >> calling scheduler_tick(). But this is a lot more plumbing. And the scheduler has gazillions
> >> accounting stuffs to handle. Sounds like a big nightmare to take that direction.
> >
> > So i'm not at all sure what you guys are talking about, but it seems to
> > me you should all put down the bong and have a detox round instead.
> >
> > This all sounds like a cure worse than the problem.
> 
> So, what I was working on isn't ready yet but I would like to show what lines
> I have been trying on. In case that is completely incorrect and I should stop
> making that work :)
> 
> Please share your feedback about this (Yes there are several parts broken
> currently, specially the assumption that tick runs on local CPU):

I'm still not sure _what_ you're trying to solve here. What are you
doing and why?
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Viresh Kumar April 14, 2014, 11:42 a.m. UTC | #4
On 14 April 2014 16:32, Peter Zijlstra <peterz@infradead.org> wrote:
> I'm still not sure _what_ you're trying to solve here. What are you
> doing and why?

Hi Peter,

We are working building ARM Networking machines. Networking Data
plane is handled completely at user space. At run time we may fix
any number of CPUs for data plane activities. There will be a single
user space thread per CPU for these data plane packet processing.
Due to timing constraints these cores can't allow any interruption
from kernel. These include interruption from:

- other tasks: Fixed with cpusets
- timers/hrtimers: Implemented cpuset.quiesce as you suggested:
Waiting for reviews
- workqueues: Probably would be fixed by Frederic's work.
- Tick: Even with NO_HZ_FULL we get a tick every second. This is
what I am trying to address here. Frederic earlier suggested to
offload this accounting to other CPUs and so was my initial proposal.

Please let me know what's the right way to get this fixed and I will
try it that way.

Thanks for your inputs.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Peter Zijlstra April 14, 2014, 11:47 a.m. UTC | #5
On Mon, Apr 14, 2014 at 05:12:08PM +0530, Viresh Kumar wrote:
> On 14 April 2014 16:32, Peter Zijlstra <peterz@infradead.org> wrote:
> > I'm still not sure _what_ you're trying to solve here. What are you
> > doing and why?
> 
> Hi Peter,
> 
> We are working building ARM Networking machines. Networking Data
> plane is handled completely at user space. At run time we may fix
> any number of CPUs for data plane activities. There will be a single
> user space thread per CPU for these data plane packet processing.
> Due to timing constraints these cores can't allow any interruption
> from kernel. These include interruption from:
> 
> - other tasks: Fixed with cpusets
> - timers/hrtimers: Implemented cpuset.quiesce as you suggested:
> Waiting for reviews
> - workqueues: Probably would be fixed by Frederic's work.

Ok.

> - Tick: Even with NO_HZ_FULL we get a tick every second. This is
> what I am trying to address here. Frederic earlier suggested to
> offload this accounting to other CPUs and so was my initial proposal.

What causes this tick? I was under the impression that once there's a
single task (not doing any syscalls) and the above issues are sorted, no
more tick would happen.



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Viresh Kumar April 14, 2014, 11:52 a.m. UTC | #6
On 14 April 2014 17:17, Peter Zijlstra <peterz@infradead.org> wrote:
> What causes this tick? I was under the impression that once there's a
> single task (not doing any syscalls) and the above issues are sorted, no
> more tick would happen.

This is what Frederic told me earlier:

https://lkml.org/lkml/2014/2/13/238
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Peter Zijlstra April 14, 2014, 12:06 p.m. UTC | #7
On Mon, Apr 14, 2014 at 05:22:30PM +0530, Viresh Kumar wrote:
> On 14 April 2014 17:17, Peter Zijlstra <peterz@infradead.org> wrote:
> > What causes this tick? I was under the impression that once there's a
> > single task (not doing any syscalls) and the above issues are sorted, no
> > more tick would happen.
> 
> This is what Frederic told me earlier:
> 
> https://lkml.org/lkml/2014/2/13/238

That's a bit of a non-answer. I'm fairly sure its not a gazillion
issues, since the actual scheduler tick doesn't actually do that much.

So start by enumerating what is actually required.

The 2), which I suppose you're now trying to implement is I think
entirely the wrong way. The tick really assumes it runs local, moving it
to another CPU is insane.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Viresh Kumar April 15, 2014, 6:04 a.m. UTC | #8
On 14 April 2014 17:36, Peter Zijlstra <peterz@infradead.org> wrote:
> That's a bit of a non-answer. I'm fairly sure its not a gazillion
> issues, since the actual scheduler tick doesn't actually do that much.
>
> So start by enumerating what is actually required.
>
> The 2), which I suppose you're now trying to implement is I think
> entirely the wrong way. The tick really assumes it runs local, moving it
> to another CPU is insane.

Yeah, I was trying this one :(

I still don't have enough knowledge of scheduler and so can't exactly
tell what all requires tick to fire a 1 second.

@Frederic: Can you please help :) ?

--
viresh
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Frederic Weisbecker April 15, 2014, 9:30 a.m. UTC | #9
On Mon, Apr 14, 2014 at 02:06:00PM +0200, Peter Zijlstra wrote:
> On Mon, Apr 14, 2014 at 05:22:30PM +0530, Viresh Kumar wrote:
> > On 14 April 2014 17:17, Peter Zijlstra <peterz@infradead.org> wrote:
> > > What causes this tick? I was under the impression that once there's a
> > > single task (not doing any syscalls) and the above issues are sorted, no
> > > more tick would happen.
> > 
> > This is what Frederic told me earlier:
> > 
> > https://lkml.org/lkml/2014/2/13/238
> 
> That's a bit of a non-answer. I'm fairly sure its not a gazillion
> issues, since the actual scheduler tick doesn't actually do that much.
> 
> So start by enumerating what is actually required.

Ok, I'm a bit buzy with a conference right now but I'm going to summarize that
soonish.

> 
> The 2), which I suppose you're now trying to implement is I think
> entirely the wrong way. The tick really assumes it runs local, moving it
> to another CPU is insane.

There is probably a few things that assume local calls but last time
I checked I had the impression that it was fairly possible to call sched_class::task_tick()
remotely. rq is locked, no reference to "current", use rq accessors...

OTOH scheduler_tick() itself definetly requires local calls.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Peter Zijlstra April 15, 2014, 10:52 a.m. UTC | #10
On Tue, Apr 15, 2014 at 11:30:04AM +0200, Frederic Weisbecker wrote:
> There is probably a few things that assume local calls but last time
> I checked I had the impression that it was fairly possible to call sched_class::task_tick()
> remotely. rq is locked, no reference to "current", use rq accessors...
> 
> OTOH scheduler_tick() itself definetly requires local calls.

possible isn't the problem, its completely insane to do that.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Peter Zijlstra April 15, 2014, 10:53 a.m. UTC | #11
On Tue, Apr 15, 2014 at 12:52:26PM +0200, Peter Zijlstra wrote:
> On Tue, Apr 15, 2014 at 11:30:04AM +0200, Frederic Weisbecker wrote:
> > There is probably a few things that assume local calls but last time
> > I checked I had the impression that it was fairly possible to call sched_class::task_tick()
> > remotely. rq is locked, no reference to "current", use rq accessors...
> > 
> > OTOH scheduler_tick() itself definetly requires local calls.
> 
> possible isn't the problem, its completely insane to do that.

What's more, I'm still waiting to hear why we're wanting to do any of
this.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Viresh Kumar April 23, 2014, 11:12 a.m. UTC | #12
On 15 April 2014 15:00, Frederic Weisbecker <fweisbec@gmail.com> wrote:
> Ok, I'm a bit buzy with a conference right now but I'm going to summarize that
> soonish.

Are you back now ?
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Viresh Kumar May 9, 2014, 8:44 a.m. UTC | #13
On 23 April 2014 16:42, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> On 15 April 2014 15:00, Frederic Weisbecker <fweisbec@gmail.com> wrote:
>> Ok, I'm a bit buzy with a conference right now but I'm going to summarize that
>> soonish.

Hi Frederic,

Please see if you can find some time to close this, that would be very
helpful :)

Thanks

--
viresh
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Frederic Weisbecker May 13, 2014, 11:30 p.m. UTC | #14
On Fri, May 09, 2014 at 02:14:10PM +0530, Viresh Kumar wrote:
> On 23 April 2014 16:42, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> > On 15 April 2014 15:00, Frederic Weisbecker <fweisbec@gmail.com> wrote:
> >> Ok, I'm a bit buzy with a conference right now but I'm going to summarize that
> >> soonish.
> 
> Hi Frederic,
> 
> Please see if you can find some time to close this, that would be very
> helpful :)
> 
> Thanks

I'm generally worried about the accounting in update_curr() that periodically
updates stats. I have no idea which of these stats could be read by other CPUs:
vruntime, load bandwitdth, etc...

Also without tick:

* we don't poll anymore on trigger_load_balance()

* __update_cpu_load() requires fixed rate periodic polling. Alex Shi had
patches for that but I'm not sure if that's going to be merged

* rq->rt_avg accounting?
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Peter Zijlstra May 22, 2014, 8:44 a.m. UTC | #15
On Wed, May 14, 2014 at 01:30:39AM +0200, Frederic Weisbecker wrote:
> On Fri, May 09, 2014 at 02:14:10PM +0530, Viresh Kumar wrote:
> > On 23 April 2014 16:42, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> > > On 15 April 2014 15:00, Frederic Weisbecker <fweisbec@gmail.com> wrote:
> > >> Ok, I'm a bit buzy with a conference right now but I'm going to summarize that
> > >> soonish.
> > 
> > Hi Frederic,
> > 
> > Please see if you can find some time to close this, that would be very
> > helpful :)
> > 
> > Thanks
> 
> I'm generally worried about the accounting in update_curr() that periodically
> updates stats. I have no idea which of these stats could be read by other CPUs:
> vruntime, load bandwitdth, etc...

update_curr() principally does the sum_exec_runtime and vruntime. Now
vruntime is only interesting for other cpus when moving tasks across
CPUs, so see below on load-balancing.

sum_exec_runtime is used for a number of task stats, but when nobody
looks at those it shouldn't matter.

So rather than constantly force update them for no purpose, update them
on-demand. So when someone reads those cputime stats, prod the task/cpu.
I think you can do a remote update_curr() just fine.

And I suppose you also need to do something with task_tick_numa(), that
relies on the tick regardless of nr_running. And that's very much not
something you can do remotely. As it stands I think the numa balancing
and nohz_full are incompatible.

> Also without tick:
> 
> * we don't poll anymore on trigger_load_balance()
> 
> * __update_cpu_load() requires fixed rate periodic polling. Alex Shi had
> patches for that but I'm not sure if that's going to be merged
> 
> * rq->rt_avg accounting?

So I think typically we don't want load-balancing to happen when we're
on a nohz_full cpu and there's only the one task running.

So what you can do is extend the existing nohz balancing (which
currently only deals with CPU_IDLE) to also remote balance CPU_NOT_IDLE
when nr_running == 1.
diff mbox

Patch

diff --git a/include/linux/tick.h b/include/linux/tick.h
index 97bbb64..f8efa9f 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -62,6 +62,7 @@  enum tick_nohz_mode {
  */
 struct tick_sched {
        struct hrtimer                  sched_timer;
+       struct cpumask                  timekeeping_pending;
        enum tick_nohz_mode             nohz_mode;
        unsigned long                   check_clocks;
        unsigned long                   idle_jiffies;
@@ -77,6 +78,7 @@  struct tick_sched {
        ktime_t                         iowait_sleeptime;
        ktime_t                         sleep_length;
        ktime_t                         idle_expires;
+       unsigned int                    cpu;
        int                             inidle;
        int                             idle_active;
        int                             tick_stopped;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2d0b154..7560bd0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -532,13 +532,56 @@  u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);

+static int tick_nohz_full_get_offload_cpu(unsigned int cpu)
+{
+       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+       unsigned int offload_cpu;
+       cpumask_t cpumask;
+
+       /* Don't pick any NO_HZ_FULL cpu */
+       cpumask_andnot(&cpumask, cpu_online_mask, tick_nohz_full_mask);
+       cpumask_clear_cpu(cpu, &cpumask);
+
+       /* Try for same node. */
+       offload_cpu = cpumask_first_and(nodemask, &cpumask);
+       if (offload_cpu < nr_cpu_ids)
+               return offload_cpu;
+
+       /* Any online will do */
+       return cpumask_any(&cpumask);
+}
+
+static void tick_nohz_full_offload_timer(void *info)
+{
+       struct tick_sched *ts = info;
+       hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
+}
+
+static void tick_nohz_full_offload_tick(struct tick_sched *ts, ktime_t expires,
+                                       unsigned int cpu)
+{
+       unsigned int offload_cpu = tick_nohz_full_get_offload_cpu(cpu);
+
+       /* Offload accounting to timekeeper */
+       hrtimer_cancel(&ts->sched_timer);
+       hrtimer_set_expires(&ts->sched_timer, expires);
+
+       /*
+        * This is triggering a WARN_ON() as below routine doesn't support calls
+        * while interrupts are disabled. Need to think of some other way to get
+        * this fixed.
+        */
+       smp_call_function_single(offload_cpu, tick_nohz_full_offload_timer, ts,
+                                true);
+}
+
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                                         ktime_t now, int cpu)
 {
        unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
        ktime_t last_update, expires, ret = { .tv64 = 0 };
        unsigned long rcu_delta_jiffies;
-       struct clock_event_device *dev = tick_get_cpu_device()->evtdev;
+       struct clock_event_device *dev = tick_get_device(cpu)->evtdev;
        u64 time_delta;

        time_delta = timekeeping_max_deferment();
@@ -661,8 +704,12 @@  static ktime_t tick_nohz_stop_sched_tick(struct
tick_sched *ts,
                }

                if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start(&ts->sched_timer, expires,
-                                     HRTIMER_MODE_ABS_PINNED);
+                       if (ts->inidle)
+                               hrtimer_start(&ts->sched_timer, expires,
+                                             HRTIMER_MODE_ABS_PINNED);
+                       else
+                               tick_nohz_full_offload_tick(ts, expires, cpu);
+
                        /* Check, if the timer was already in the past */
                        if (hrtimer_active(&ts->sched_timer))