@@ -43,6 +43,8 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);
+void kasan_unpoison_task_stack(struct task_struct *idle);
+
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -66,6 +68,8 @@ void kasan_free_shadow(const struct vm_struct *vm);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+static inline void kasan_unpoison_task_stack(struct task_struct *idle) {}
+
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}
@@ -26,6 +26,7 @@
* Thomas Gleixner, Mike Kravetz
*/
+#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
@@ -5096,6 +5097,8 @@ void init_idle(struct task_struct *idle, int cpu)
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
+ kasan_unpoison_task_stack(idle);
+
#ifdef CONFIG_SMP
/*
* Its possible that init_idle() gets called multiple times on a task,
@@ -60,6 +60,16 @@ void kasan_unpoison_shadow(const void *address, size_t size)
}
}
+/*
+ * Remove any poison left on the stack from a prior hot-unplug.
+ */
+void kasan_unpoison_task_stack(struct task_struct *idle)
+{
+ void *base = task_stack_page(idle) + sizeof(struct thread_info);
+ size_t size = THREAD_SIZE - sizeof(struct thread_info);
+
+ kasan_unpoison_shadow(base, size);
+}
/*
* All functions below always inlined so compiler could