@@ -251,7 +251,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
@@ -200,7 +200,7 @@ int copy_thread(unsigned long clone_flags,
childksp[0] = 0; /* fp */
childksp[1] = (unsigned long)ret_from_fork; /* blink */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(c_regs, 0, sizeof(struct pt_regs));
c_callee->r13 = kthread_arg;
@@ -242,7 +242,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
thread->cpu_domain = get_domain();
#endif
- if (likely(!(p->flags & PF_KTHREAD))) {
+ if (likely(!is_kthread(p))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
if (stack_start)
@@ -625,7 +625,7 @@ static void update_sections_early(struct section_perm perms[], int n)
struct task_struct *t, *s;
for_each_process(t) {
- if (t->flags & PF_KTHREAD)
+ if (is_kthread(t))
continue;
for_each_thread(t, s)
set_section_perms(perms, n, true, s->mm);
@@ -367,7 +367,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*/
fpsimd_flush_task_state(p);
- if (likely(!(p->flags & PF_KTHREAD))) {
+ if (likely(!is_kthread(p))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
@@ -454,7 +454,7 @@ static void ssbs_thread_switch(struct task_struct *next)
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
*/
- if (unlikely(next->flags & PF_KTHREAD))
+ if (unlikely(is_kthread(next)))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
@@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = task_pt_regs(p);
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* case of __kernel_thread: we return to supervisor space */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->sp = (unsigned long)(childregs + 1);
@@ -52,7 +52,7 @@ int copy_thread(unsigned long clone_flags,
/* setup ksp for switch_to !!! */
p->thread.ksp = (unsigned long)childstack;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childregs, 0, sizeof(struct pt_regs));
childstack->r15 = (unsigned long) ret_from_kernel_thread;
childstack->r8 = kthread_arg;
@@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags,
childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->retpc = (unsigned long) ret_from_kernel_thread;
childregs->er4 = topstk; /* arg */
@@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
ss->r24 = usp;
@@ -376,7 +376,7 @@ copy_thread(unsigned long clone_flags,
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
if (unlikely(!user_stack_base)) {
/* fork_idle() called us */
return 0;
@@ -138,7 +138,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
*/
p->thread.fs = get_fs().seg;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(frame, 0, sizeof(struct fork_frame));
frame->regs.sr = PS_S;
@@ -60,7 +60,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct pt_regs *childregs = task_pt_regs(p);
struct thread_info *ti = task_thread_info(p);
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* if we're creating a new kernel thread then just zeroing all
* the registers. That's OK for a brand new thread.*/
memset(childregs, 0, sizeof(struct pt_regs));
@@ -87,7 +87,7 @@ void exit_thread(struct task_struct *tsk)
* User threads may have allocated a delay slot emulation frame.
* If so, clean up that allocation.
*/
- if (!(current->flags & PF_KTHREAD))
+ if (!is_kthread(current))
dsemul_thread_cleanup(tsk);
}
@@ -132,7 +132,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
@@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childregs, 0, sizeof(struct pt_regs));
/* kernel thread fn */
p->thread.cpu_context.r6 = stack_start;
@@ -207,7 +207,7 @@ struct task_struct *_switch_fpu(struct task_struct *prev, struct task_struct *ne
#if !IS_ENABLED(CONFIG_LAZY_FPU)
unlazy_fpu(prev);
#endif
- if (!(next->flags & PF_KTHREAD))
+ if (!is_kthread(next))
clear_fpu(task_pt_regs(next));
return prev;
}
@@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags,
struct switch_stack *childstack =
((struct switch_stack *)childregs) - 1;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
@@ -168,7 +168,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *)sp;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(kregs, 0, sizeof(struct pt_regs));
kregs->gpr[20] = usp; /* fn, kernel thread */
kregs->gpr[22] = arg;
@@ -220,7 +220,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
extern void * const ret_from_kernel_thread;
extern void * const child_return;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
if (!usp) /* idle thread */
@@ -1615,7 +1615,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
@@ -96,7 +96,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct pt_regs *childregs = task_pt_regs(p);
/* p->thread holds context to be restored by __switch_to() */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* Kernel thread */
const register unsigned long gp __asm__ ("gp");
memset(childregs, 0, sizeof(struct pt_regs));
@@ -113,7 +113,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
frame->sf.gprs[9] = (unsigned long) frame;
/* Store access registers to kernel stack of new process. */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
@@ -137,7 +137,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = task_pt_regs(p);
p->thread.sp = (unsigned long) childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.pc = (unsigned long) ret_from_kernel_thread;
childregs->regs[4] = arg;
@@ -389,7 +389,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
p->thread.sp = (unsigned long) childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->regs[2] = (unsigned long)arg;
childregs->regs[3] = (unsigned long)usp;
@@ -338,7 +338,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
ti->ksp = (unsigned long) new_stack;
p->thread.kregs = childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
extern int nwindows;
unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
@@ -632,7 +632,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
memset(child_trap_frame, 0, child_stack_sz);
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
@@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct * p)
{
void (*handler)(void);
- int kthread = current->flags & PF_KTHREAD;
+ int kthread = is_kthread(current);
int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
@@ -228,7 +228,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
thread->cpu_context.sp = (unsigned long)childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
thread->cpu_context.r4 = stack_start;
thread->cpu_context.r5 = stk_sz;
@@ -91,7 +91,7 @@ void kernel_fpu_begin(void)
this_cpu_write(in_kernel_fpu, true);
- if (!(current->flags & PF_KTHREAD) &&
+ if (!is_kthread(current) &&
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
/*
@@ -134,7 +134,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
p->thread.sp0 = (unsigned long) (childregs+1);
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
frame->bx = sp; /* function */
@@ -397,7 +397,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
savesegment(ds, p->thread.ds);
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
frame->bx = sp; /* function */
@@ -217,7 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
p->thread.sp = (unsigned long)childregs;
- if (!(p->flags & PF_KTHREAD)) {
+ if (!is_kthread(p)) {
struct pt_regs *regs = current_pt_regs();
unsigned long usp = usp_thread_fn ?
usp_thread_fn : regs->areg[1];
@@ -1739,7 +1739,7 @@ void blkcg_maybe_throttle_current(void)
*/
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
{
- if (unlikely(current->flags & PF_KTHREAD))
+ if (unlikely(is_kthread(current)))
return;
if (!blk_get_queue(q))
@@ -336,7 +336,7 @@ static void send_sig_all(int sig)
read_lock(&tasklist_lock);
for_each_process(p) {
- if (p->flags & PF_KTHREAD)
+ if (is_kthread(p))
continue;
if (is_global_init(p))
continue;
@@ -412,7 +412,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
for_each_process(g) {
if (g == tsk->group_leader)
continue;
- if (g->flags & PF_KTHREAD)
+ if (is_kthread(g))
continue;
for_each_thread(g, p) {
@@ -335,7 +335,7 @@ void fput_many(struct file *file, unsigned int refs)
if (atomic_long_sub_and_test(refs, &file->f_count)) {
struct task_struct *task = current;
- if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ if (likely(!in_interrupt() && !is_kthread(task))) {
init_task_work(&file->f_u.fu_rcuhead, ____fput);
if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
return;
@@ -368,7 +368,7 @@ void __fput_sync(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
struct task_struct *task = current;
- BUG_ON(!(task->flags & PF_KTHREAD));
+ BUG_ON(!is_kthread(task));
__fput(file);
}
}
@@ -1173,7 +1173,7 @@ static void mntput_no_expire(struct mount *mnt)
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
struct task_struct *task = current;
- if (likely(!(task->flags & PF_KTHREAD))) {
+ if (likely(!is_kthread(task))) {
init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
if (!task_work_add(task, &mnt->mnt_rcu, true))
return;
@@ -1098,7 +1098,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
continue;
/* do not touch kernel threads or the global init */
- if (p->flags & PF_KTHREAD || is_global_init(p))
+ if (is_kthread(p) || is_global_init(p))
continue;
task_lock(p);
@@ -1695,7 +1695,7 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
kuid_t uid;
kgid_t gid;
- if (unlikely(task->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(task))) {
*ruid = GLOBAL_ROOT_UID;
*rgid = GLOBAL_ROOT_GID;
return;
@@ -907,7 +907,7 @@ static inline bool cgroup_task_freeze(struct task_struct *task)
{
bool ret;
- if (task->flags & PF_KTHREAD)
+ if (is_kthread(task))
return false;
rcu_read_lock();
@@ -199,7 +199,7 @@ static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze)
* Ignore kernel threads here. Freezing cgroups containing
* kthreads isn't supported.
*/
- if (task->flags & PF_KTHREAD)
+ if (is_kthread(task))
continue;
cgroup_freeze_task(task, freeze);
}
@@ -227,7 +227,7 @@ void cgroup_freezer_migrate_task(struct task_struct *task,
/*
* Kernel threads are not supposed to be frozen at all.
*/
- if (task->flags & PF_KTHREAD)
+ if (is_kthread(task))
return;
/*
@@ -5951,7 +5951,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
- } else if (!(current->flags & PF_KTHREAD)) {
+ } else if (!is_kthread(current)) {
perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
@@ -448,7 +448,7 @@ void mm_update_next_owner(struct mm_struct *mm)
* Search through everything else, we should not get here often.
*/
for_each_process(g) {
- if (g->flags & PF_KTHREAD)
+ if (is_kthread(g))
continue;
for_each_thread(g, c) {
if (c->mm == mm)
@@ -459,7 +459,7 @@ void free_task(struct task_struct *tsk)
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
arch_release_task_struct(tsk);
- if (tsk->flags & PF_KTHREAD)
+ if (is_kthread(tsk))
free_kthread_struct(tsk);
free_task_struct(tsk);
}
@@ -1167,7 +1167,7 @@ struct file *get_task_exe_file(struct task_struct *task)
task_lock(task);
mm = task->mm;
if (mm) {
- if (!(task->flags & PF_KTHREAD))
+ if (!is_kthread(task))
exe_file = get_mm_exe_file(mm);
}
task_unlock(task);
@@ -1191,7 +1191,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
task_lock(task);
mm = task->mm;
if (mm) {
- if (task->flags & PF_KTHREAD)
+ if (is_kthread(task))
mm = NULL;
else
mmget(mm);
@@ -51,7 +51,7 @@ bool freezing_slow_path(struct task_struct *p)
if (pm_nosig_freezing || cgroup_freezing(p))
return true;
- if (pm_freezing && !(p->flags & PF_KTHREAD))
+ if (pm_freezing && !is_kthread(p))
return true;
return false;
@@ -140,7 +140,7 @@ bool freeze_task(struct task_struct *p)
return false;
}
- if (!(p->flags & PF_KTHREAD))
+ if (!is_kthread(p))
fake_signal_wake_up(p);
else
wake_up_state(p, TASK_INTERRUPTIBLE);
@@ -1249,7 +1249,7 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
if (!p)
return handle_exit_race(uaddr, uval, NULL);
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(p))) {
put_task_struct(p);
return -EPERM;
}
@@ -72,7 +72,7 @@ static inline void set_kthread_struct(void *kthread)
static inline struct kthread *to_kthread(struct task_struct *k)
{
- WARN_ON(!(k->flags & PF_KTHREAD));
+ WARN_ON(!is_kthread(k));
return (__force void *)k->set_child_tid;
}
@@ -1205,7 +1205,7 @@ void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
struct kthread *kthread;
- if (!(current->flags & PF_KTHREAD))
+ if (!is_kthread(current))
return;
kthread = to_kthread(current);
if (!kthread)
@@ -1231,7 +1231,7 @@ struct cgroup_subsys_state *kthread_blkcg(void)
{
struct kthread *kthread;
- if (current->flags & PF_KTHREAD) {
+ if (is_kthread(current)) {
kthread = to_kthread(current);
if (kthread)
return kthread->blkcg_css;
@@ -358,7 +358,7 @@ static void klp_send_signals(void)
* Meanwhile the task could migrate itself and the action
* would be meaningless. It is not serious though.
*/
- if (task->flags & PF_KTHREAD) {
+ if (is_kthread(task)) {
/*
* Wake up a kthread which sleeps interruptedly and
* still has not been migrated.
@@ -376,7 +376,7 @@ static int ptrace_attach(struct task_struct *task, long request,
audit_ptrace(task);
retval = -EPERM;
- if (unlikely(task->flags & PF_KTHREAD))
+ if (unlikely(is_kthread(task)))
goto out;
if (same_thread_group(task, current))
goto out;
@@ -1323,7 +1323,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
static inline bool is_per_cpu_kthread(struct task_struct *p)
{
- if (!(p->flags & PF_KTHREAD))
+ if (!is_kthread(p))
return false;
if (p->nr_cpus_allowed != 1)
@@ -1518,7 +1518,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
- if (p->flags & PF_KTHREAD) {
+ if (is_kthread(p)) {
/*
* Kernel threads are allowed on online && !active CPUs
*/
@@ -1544,7 +1544,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
do_set_cpus_allowed(p, new_mask);
- if (p->flags & PF_KTHREAD) {
+ if (is_kthread(p)) {
/*
* For kernel threads that do indeed end up on online &&
* !active we want to ensure they are strict per-CPU threads.
@@ -6649,7 +6649,7 @@ void normalize_rt_tasks(void)
/*
* Only normalize user tasks:
*/
- if (p->flags & PF_KTHREAD)
+ if (is_kthread(p))
continue;
p->se.exec_start = 0;
@@ -321,7 +321,7 @@ void play_idle(unsigned long duration_ms)
*/
WARN_ON_ONCE(current->policy != SCHED_FIFO);
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
- WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
+ WARN_ON_ONCE(!is_kthread(current));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
WARN_ON_ONCE(!duration_ms);
@@ -380,7 +380,7 @@ EXPORT_SYMBOL(autoremove_wake_function);
static inline bool is_kthread_should_stop(void)
{
- return (current->flags & PF_KTHREAD) && kthread_should_stop();
+ return is_kthread(current) && kthread_should_stop();
}
/*
@@ -1085,7 +1085,7 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
/*
* Skip useless siginfo allocation for SIGKILL and kernel threads.
*/
- if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
+ if ((sig == SIGKILL) || is_kthread(t))
goto out_set;
/*
@@ -229,7 +229,7 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
mm_segment_t fs;
/* Trace user stack if not a kernel thread */
- if (current->flags & PF_KTHREAD)
+ if (is_kthread(current))
return 0;
fs = get_fs();
@@ -28,7 +28,7 @@ bool current_is_single_threaded(void)
ret = false;
rcu_read_lock();
for_each_process(p) {
- if (unlikely(p->flags & PF_KTHREAD))
+ if (unlikely(is_kthread(p)))
continue;
if (unlikely(p == task->group_leader))
continue;
@@ -2668,7 +2668,7 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
static inline bool memcg_kmem_bypass(void)
{
- if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
+ if (in_interrupt() || !current->mm || is_kthread(current))
return true;
return false;
}
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p)
{
if (is_global_init(p))
return true;
- if (p->flags & PF_KTHREAD)
+ if (is_kthread(p))
return true;
return false;
}
@@ -919,7 +919,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
* No use_mm() user needs to read from the userspace so we are
* ok to reap it.
*/
- if (unlikely(p->flags & PF_KTHREAD))
+ if (unlikely(is_kthread(p)))
continue;
do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
}
@@ -831,7 +831,7 @@ static inline struct capture_control *task_capc(struct zone *zone)
struct capture_control *capc = current->capture_control;
return capc &&
- !(current->flags & PF_KTHREAD) &&
+ !is_kthread(current) &&
!capc->page &&
capc->cc->zone == zone &&
capc->cc->direct_compaction ? capc : NULL;
@@ -30,7 +30,7 @@
*/
static inline bool vmacache_valid_mm(struct mm_struct *mm)
{
- return current->mm == mm && !(current->flags & PF_KTHREAD);
+ return current->mm == mm && !is_kthread(current);
}
void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
@@ -3110,7 +3110,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
* committing a transaction where throttling it could forcing other
* processes to block on log_wait_commit().
*/
- if (current->flags & PF_KTHREAD)
+ if (is_kthread(current))
goto out;
/*
@@ -671,7 +671,7 @@ bool smack_privileged(int cap)
/*
* All kernel tasks are privileged
*/
- if (unlikely(current->flags & PF_KTHREAD))
+ if (unlikely(is_kthread(current)))
return true;
return smack_privileged_cred(cap, current_cred());
@@ -2257,7 +2257,7 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
/*
* Sockets created by kernel threads receive web label.
*/
- if (unlikely(current->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(current))) {
ssp->smk_in = &smack_known_web;
ssp->smk_out = &smack_known_web;
} else {
@@ -2761,7 +2761,7 @@ static int smack_socket_post_create(struct socket *sock, int family,
/*
* Sockets created by kernel threads receive web label.
*/
- if (unlikely(current->flags & PF_KTHREAD)) {
+ if (unlikely(is_kthread(current))) {
ssp = sock->sk->sk_security;
ssp->smk_in = &smack_known_web;
ssp->smk_out = &smack_known_web;
@@ -79,7 +79,7 @@ static void report_access(const char *access, struct task_struct *target,
assert_spin_locked(&target->alloc_lock); /* for target->comm */
- if (current->flags & PF_KTHREAD) {
+ if (is_kthread(current)) {
/* I don't think kthreads call task_work_run() before exiting.
* Imagine angry ranting about procfs here.
*/
Now that we have is_kthread(), let's convert existing open-coded checks of the form: task->flags & PF_KTHREAD ... over to the new helper, which makes things a little easier to read, and sets a consistent example for new code to follow. Generated with coccinelle: ---- virtual patch @ depends on patch @ expression E; @@ - (E->flags & PF_KTHREAD) + is_kthread(E) ---- ... though this didn't pick up the instance in <linux/cgroup.h>, which I fixed up manually. Instances checking multiple PF_* flags at ocne are left as-is for now. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> --- arch/alpha/kernel/process.c | 2 +- arch/arc/kernel/process.c | 2 +- arch/arm/kernel/process.c | 2 +- arch/arm/mm/init.c | 2 +- arch/arm64/kernel/process.c | 4 ++-- arch/c6x/kernel/process.c | 2 +- arch/csky/kernel/process.c | 2 +- arch/h8300/kernel/process.c | 2 +- arch/hexagon/kernel/process.c | 2 +- arch/ia64/kernel/process.c | 2 +- arch/m68k/kernel/process.c | 2 +- arch/microblaze/kernel/process.c | 2 +- arch/mips/kernel/process.c | 4 ++-- arch/nds32/kernel/process.c | 4 ++-- arch/nios2/kernel/process.c | 2 +- arch/openrisc/kernel/process.c | 2 +- arch/parisc/kernel/process.c | 2 +- arch/powerpc/kernel/process.c | 2 +- arch/riscv/kernel/process.c | 2 +- arch/s390/kernel/process.c | 2 +- arch/sh/kernel/process_32.c | 2 +- arch/sh/kernel/process_64.c | 2 +- arch/sparc/kernel/process_32.c | 2 +- arch/sparc/kernel/process_64.c | 2 +- arch/um/kernel/process.c | 2 +- arch/unicore32/kernel/process.c | 2 +- arch/x86/kernel/fpu/core.c | 2 +- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- arch/xtensa/kernel/process.c | 2 +- block/blk-cgroup.c | 2 +- drivers/tty/sysrq.c | 2 +- fs/coredump.c | 2 +- fs/file_table.c | 4 ++-- fs/namespace.c | 2 +- fs/proc/base.c | 4 ++-- include/linux/cgroup.h | 2 +- kernel/cgroup/freezer.c | 4 ++-- kernel/events/core.c | 2 +- kernel/exit.c | 2 +- kernel/fork.c | 6 +++--- kernel/freezer.c | 4 ++-- kernel/futex.c | 2 +- kernel/kthread.c | 6 +++--- kernel/livepatch/transition.c | 2 +- kernel/ptrace.c | 2 +- kernel/sched/core.c | 8 ++++---- kernel/sched/idle.c | 2 +- kernel/sched/wait.c | 2 +- kernel/signal.c | 2 +- kernel/stacktrace.c | 2 +- lib/is_single_threaded.c | 2 +- mm/memcontrol.c | 2 +- mm/oom_kill.c | 4 ++-- mm/page_alloc.c | 2 +- mm/vmacache.c | 2 +- mm/vmscan.c | 2 +- security/smack/smack_access.c | 2 +- security/smack/smack_lsm.c | 4 ++-- security/yama/yama_lsm.c | 2 +- 60 files changed, 76 insertions(+), 76 deletions(-) -- 2.11.0