1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/ptrace.h>
20 #include <trace/events/sched.h>
22 static DEFINE_SPINLOCK(kthread_create_lock
);
23 static LIST_HEAD(kthread_create_list
);
24 struct task_struct
*kthreadd_task
;
26 struct kthread_create_info
28 /* Information passed to kthread() from kthreadd. */
29 int (*threadfn
)(void *data
);
33 /* Result passed back to kthread_create() from kthreadd. */
34 struct task_struct
*result
;
35 struct completion done
;
37 struct list_head list
;
43 struct completion exited
;
46 #define to_kthread(tsk) \
47 container_of((tsk)->vfork_done, struct kthread, exited)
50 * kthread_should_stop - should this kthread return now?
52 * When someone calls kthread_stop() on your kthread, it will be woken
53 * and this will return true. You should then return, and your return
54 * value will be passed through to kthread_stop().
56 int kthread_should_stop(void)
58 return to_kthread(current
)->should_stop
;
60 EXPORT_SYMBOL(kthread_should_stop
);
63 * kthread_freezable_should_stop - should this freezable kthread return now?
64 * @was_frozen: optional out parameter, indicates whether %current was frozen
66 * kthread_should_stop() for freezable kthreads, which will enter
67 * refrigerator if necessary. This function is safe from kthread_stop() /
68 * freezer deadlock and freezable kthreads should use this function instead
69 * of calling try_to_freeze() directly.
71 bool kthread_freezable_should_stop(bool *was_frozen
)
77 if (unlikely(freezing(current
)))
78 frozen
= __refrigerator(true);
83 return kthread_should_stop();
85 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop
);
88 * kthread_data - return data value specified on kthread creation
89 * @task: kthread task in question
91 * Return the data value specified when kthread @task was created.
92 * The caller is responsible for ensuring the validity of @task when
93 * calling this function.
95 void *kthread_data(struct task_struct
*task
)
97 return to_kthread(task
)->data
;
100 static int kthread(void *_create
)
102 /* Copy data: it's on kthread's stack */
103 struct kthread_create_info
*create
= _create
;
104 int (*threadfn
)(void *data
) = create
->threadfn
;
105 void *data
= create
->data
;
109 self
.should_stop
= 0;
111 init_completion(&self
.exited
);
112 current
->vfork_done
= &self
.exited
;
114 /* OK, tell user we're spawned, wait for stop or wakeup */
115 __set_current_state(TASK_UNINTERRUPTIBLE
);
116 create
->result
= current
;
117 complete(&create
->done
);
121 if (!self
.should_stop
)
122 ret
= threadfn(data
);
124 /* we can't just return, we must preserve "self" on stack */
128 /* called from do_fork() to get node information for about to be created task */
129 int tsk_fork_get_node(struct task_struct
*tsk
)
132 if (tsk
== kthreadd_task
)
133 return tsk
->pref_node_fork
;
135 return numa_node_id();
138 static void create_kthread(struct kthread_create_info
*create
)
143 current
->pref_node_fork
= create
->node
;
145 /* We want our own signal handler (we take no signals by default). */
146 pid
= kernel_thread(kthread
, create
, CLONE_FS
| CLONE_FILES
| SIGCHLD
);
148 create
->result
= ERR_PTR(pid
);
149 complete(&create
->done
);
154 * kthread_create_on_node - create a kthread.
155 * @threadfn: the function to run until signal_pending(current).
156 * @data: data ptr for @threadfn.
157 * @node: memory node number.
158 * @namefmt: printf-style name for the thread.
160 * Description: This helper function creates and names a kernel
161 * thread. The thread will be stopped: use wake_up_process() to start
162 * it. See also kthread_run().
164 * If thread is going to be bound on a particular cpu, give its node
165 * in @node, to get NUMA affinity for kthread stack, or else give -1.
166 * When woken, the thread will run @threadfn() with @data as its
167 * argument. @threadfn() can either call do_exit() directly if it is a
168 * standalone thread for which no one will call kthread_stop(), or
169 * return when 'kthread_should_stop()' is true (which means
170 * kthread_stop() has been called). The return value should be zero
171 * or a negative error number; it will be passed to kthread_stop().
173 * Returns a task_struct or ERR_PTR(-ENOMEM).
175 struct task_struct
*kthread_create_on_node(int (*threadfn
)(void *data
),
178 const char namefmt
[],
181 struct kthread_create_info create
;
183 create
.threadfn
= threadfn
;
186 init_completion(&create
.done
);
188 spin_lock(&kthread_create_lock
);
189 list_add_tail(&create
.list
, &kthread_create_list
);
190 spin_unlock(&kthread_create_lock
);
192 wake_up_process(kthreadd_task
);
193 wait_for_completion(&create
.done
);
195 if (!IS_ERR(create
.result
)) {
196 static const struct sched_param param
= { .sched_priority
= 0 };
199 va_start(args
, namefmt
);
200 vsnprintf(create
.result
->comm
, sizeof(create
.result
->comm
),
204 * root may have changed our (kthreadd's) priority or CPU mask.
205 * The kernel thread should not inherit these properties.
207 sched_setscheduler_nocheck(create
.result
, SCHED_NORMAL
, ¶m
);
208 set_cpus_allowed_ptr(create
.result
, cpu_all_mask
);
210 return create
.result
;
212 EXPORT_SYMBOL(kthread_create_on_node
);
215 * kthread_bind - bind a just-created kthread to a cpu.
216 * @p: thread created by kthread_create().
217 * @cpu: cpu (might not be online, must be possible) for @k to run on.
219 * Description: This function is equivalent to set_cpus_allowed(),
220 * except that @cpu doesn't need to be online, and the thread must be
221 * stopped (i.e., just returned from kthread_create()).
223 void kthread_bind(struct task_struct
*p
, unsigned int cpu
)
225 /* Must have done schedule() in kthread() before we set_task_cpu */
226 if (!wait_task_inactive(p
, TASK_UNINTERRUPTIBLE
)) {
231 /* It's safe because the task is inactive. */
232 do_set_cpus_allowed(p
, cpumask_of(cpu
));
233 p
->flags
|= PF_THREAD_BOUND
;
235 EXPORT_SYMBOL(kthread_bind
);
238 * kthread_stop - stop a thread created by kthread_create().
239 * @k: thread created by kthread_create().
241 * Sets kthread_should_stop() for @k to return true, wakes it, and
242 * waits for it to exit. This can also be called after kthread_create()
243 * instead of calling wake_up_process(): the thread will exit without
244 * calling threadfn().
246 * If threadfn() may call do_exit() itself, the caller must ensure
247 * task_struct can't go away.
249 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
252 int kthread_stop(struct task_struct
*k
)
254 struct kthread
*kthread
;
257 trace_sched_kthread_stop(k
);
260 kthread
= to_kthread(k
);
261 barrier(); /* it might have exited */
262 if (k
->vfork_done
!= NULL
) {
263 kthread
->should_stop
= 1;
265 wait_for_completion(&kthread
->exited
);
270 trace_sched_kthread_stop_ret(ret
);
274 EXPORT_SYMBOL(kthread_stop
);
276 int kthreadd(void *unused
)
278 struct task_struct
*tsk
= current
;
280 /* Setup a clean context for our children to inherit. */
281 set_task_comm(tsk
, "kthreadd");
283 set_cpus_allowed_ptr(tsk
, cpu_all_mask
);
284 set_mems_allowed(node_states
[N_HIGH_MEMORY
]);
286 current
->flags
|= PF_NOFREEZE
;
289 set_current_state(TASK_INTERRUPTIBLE
);
290 if (list_empty(&kthread_create_list
))
292 __set_current_state(TASK_RUNNING
);
294 spin_lock(&kthread_create_lock
);
295 while (!list_empty(&kthread_create_list
)) {
296 struct kthread_create_info
*create
;
298 create
= list_entry(kthread_create_list
.next
,
299 struct kthread_create_info
, list
);
300 list_del_init(&create
->list
);
301 spin_unlock(&kthread_create_lock
);
303 create_kthread(create
);
305 spin_lock(&kthread_create_lock
);
307 spin_unlock(&kthread_create_lock
);
313 void __init_kthread_worker(struct kthread_worker
*worker
,
315 struct lock_class_key
*key
)
317 spin_lock_init(&worker
->lock
);
318 lockdep_set_class_and_name(&worker
->lock
, key
, name
);
319 INIT_LIST_HEAD(&worker
->work_list
);
322 EXPORT_SYMBOL_GPL(__init_kthread_worker
);
325 * kthread_worker_fn - kthread function to process kthread_worker
326 * @worker_ptr: pointer to initialized kthread_worker
328 * This function can be used as @threadfn to kthread_create() or
329 * kthread_run() with @worker_ptr argument pointing to an initialized
330 * kthread_worker. The started kthread will process work_list until
331 * the it is stopped with kthread_stop(). A kthread can also call
332 * this function directly after extra initialization.
334 * Different kthreads can be used for the same kthread_worker as long
335 * as there's only one kthread attached to it at any given time. A
336 * kthread_worker without an attached kthread simply collects queued
339 int kthread_worker_fn(void *worker_ptr
)
341 struct kthread_worker
*worker
= worker_ptr
;
342 struct kthread_work
*work
;
344 WARN_ON(worker
->task
);
345 worker
->task
= current
;
347 set_current_state(TASK_INTERRUPTIBLE
); /* mb paired w/ kthread_stop */
349 if (kthread_should_stop()) {
350 __set_current_state(TASK_RUNNING
);
351 spin_lock_irq(&worker
->lock
);
353 spin_unlock_irq(&worker
->lock
);
358 spin_lock_irq(&worker
->lock
);
359 if (!list_empty(&worker
->work_list
)) {
360 work
= list_first_entry(&worker
->work_list
,
361 struct kthread_work
, node
);
362 list_del_init(&work
->node
);
364 worker
->current_work
= work
;
365 spin_unlock_irq(&worker
->lock
);
368 __set_current_state(TASK_RUNNING
);
370 } else if (!freezing(current
))
376 EXPORT_SYMBOL_GPL(kthread_worker_fn
);
378 /* insert @work before @pos in @worker */
379 static void insert_kthread_work(struct kthread_worker
*worker
,
380 struct kthread_work
*work
,
381 struct list_head
*pos
)
383 lockdep_assert_held(&worker
->lock
);
385 list_add_tail(&work
->node
, pos
);
386 work
->worker
= worker
;
387 if (likely(worker
->task
))
388 wake_up_process(worker
->task
);
392 * queue_kthread_work - queue a kthread_work
393 * @worker: target kthread_worker
394 * @work: kthread_work to queue
396 * Queue @work to work processor @task for async execution. @task
397 * must have been created with kthread_worker_create(). Returns %true
398 * if @work was successfully queued, %false if it was already pending.
400 bool queue_kthread_work(struct kthread_worker
*worker
,
401 struct kthread_work
*work
)
406 spin_lock_irqsave(&worker
->lock
, flags
);
407 if (list_empty(&work
->node
)) {
408 insert_kthread_work(worker
, work
, &worker
->work_list
);
411 spin_unlock_irqrestore(&worker
->lock
, flags
);
414 EXPORT_SYMBOL_GPL(queue_kthread_work
);
416 struct kthread_flush_work
{
417 struct kthread_work work
;
418 struct completion done
;
421 static void kthread_flush_work_fn(struct kthread_work
*work
)
423 struct kthread_flush_work
*fwork
=
424 container_of(work
, struct kthread_flush_work
, work
);
425 complete(&fwork
->done
);
429 * flush_kthread_work - flush a kthread_work
430 * @work: work to flush
432 * If @work is queued or executing, wait for it to finish execution.
434 void flush_kthread_work(struct kthread_work
*work
)
436 struct kthread_flush_work fwork
= {
437 KTHREAD_WORK_INIT(fwork
.work
, kthread_flush_work_fn
),
438 COMPLETION_INITIALIZER_ONSTACK(fwork
.done
),
440 struct kthread_worker
*worker
;
444 worker
= work
->worker
;
448 spin_lock_irq(&worker
->lock
);
449 if (work
->worker
!= worker
) {
450 spin_unlock_irq(&worker
->lock
);
454 if (!list_empty(&work
->node
))
455 insert_kthread_work(worker
, &fwork
.work
, work
->node
.next
);
456 else if (worker
->current_work
== work
)
457 insert_kthread_work(worker
, &fwork
.work
, worker
->work_list
.next
);
461 spin_unlock_irq(&worker
->lock
);
464 wait_for_completion(&fwork
.done
);
466 EXPORT_SYMBOL_GPL(flush_kthread_work
);
469 * flush_kthread_worker - flush all current works on a kthread_worker
470 * @worker: worker to flush
472 * Wait until all currently executing or pending works on @worker are
475 void flush_kthread_worker(struct kthread_worker
*worker
)
477 struct kthread_flush_work fwork
= {
478 KTHREAD_WORK_INIT(fwork
.work
, kthread_flush_work_fn
),
479 COMPLETION_INITIALIZER_ONSTACK(fwork
.done
),
482 queue_kthread_work(worker
, &fwork
.work
);
483 wait_for_completion(&fwork
.done
);
485 EXPORT_SYMBOL_GPL(flush_kthread_worker
);