int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
-};
+ struct list_head pwqs_node; /* I: node on wq->pwqs */
+} __aligned(1 << WORK_STRUCT_FLAG_BITS);
/*
* Structure used to wait for workqueue flush.
struct pool_workqueue *single;
unsigned long v;
} pool_wq; /* I: pwq's */
+ struct list_head pwqs; /* I: all pwqs of this wq */
struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */
char name[]; /* I: workqueue name */
};
+static struct kmem_cache *pwq_cache;
+
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
return WORK_CPU_END;
}
-static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
- struct workqueue_struct *wq)
-{
- return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
-}
-
/*
* CPU iterators
*
*
* for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
* for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
- * for_each_pwq_cpu() : possible CPUs for bound workqueues,
- * WORK_CPU_UNBOUND for unbound workqueues
*/
#define for_each_wq_cpu(cpu) \
for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \
(cpu) < WORK_CPU_END; \
(cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
-#define for_each_pwq_cpu(cpu, wq) \
- for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
- (cpu) < WORK_CPU_END; \
- (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
+/**
+ * for_each_pool - iterate through all worker_pools in the system
+ * @pool: iteration cursor
+ * @id: integer used for iteration
+ */
+#define for_each_pool(pool, id) \
+ idr_for_each_entry(&worker_pool_idr, pool, id)
+
+/**
+ * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
+ * @pwq: iteration cursor
+ * @wq: the target workqueue
+ */
+#define for_each_pwq(pwq, wq) \
+ list_for_each_entry((pwq), &(wq)->pwqs, pwqs_node)
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static inline void set_work_data(struct work_struct *work, unsigned long data,
unsigned long flags)
{
- BUG_ON(!work_pending(work));
+ WARN_ON_ONCE(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work));
}
pool = worker->pool;
/* this can only happen on the local cpu */
- BUG_ON(cpu != raw_smp_processor_id());
+ if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+ return NULL;
/*
* The counterpart of the following dec_and_test, implied mb,
{
struct worker_pool *pool = worker->pool;
- BUG_ON(worker->flags & WORKER_IDLE);
- BUG_ON(!list_empty(&worker->entry) &&
- (worker->hentry.next || worker->hentry.pprev));
+ if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
+ WARN_ON_ONCE(!list_empty(&worker->entry) &&
+ (worker->hentry.next || worker->hentry.pprev)))
+ return;
/* can't use worker_set_flags(), also called from start_worker() */
worker->flags |= WORKER_IDLE;
{
struct worker_pool *pool = worker->pool;
- BUG_ON(!(worker->flags & WORKER_IDLE));
+ if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
+ return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
/**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
- * @worker: self
+ * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
+ * @pool: target worker_pool
+ *
+ * Bind %current to the cpu of @pool if it is associated and lock @pool.
*
* Works which are scheduled while the cpu is online must at least be
* scheduled to a worker which is bound to the cpu so that if they are
* %true if the associated pool is online (@worker is successfully
* bound), %false if offline.
*/
-static bool worker_maybe_bind_and_lock(struct worker *worker)
+static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
__acquires(&pool->lock)
{
- struct worker_pool *pool = worker->pool;
-
while (true) {
/*
* The following call may fail, succeed or succeed
static void idle_worker_rebind(struct worker *worker)
{
/* CPU may go down again inbetween, clear UNBOUND only on success */
- if (worker_maybe_bind_and_lock(worker))
+ if (worker_maybe_bind_and_lock(worker->pool))
worker_clr_flags(worker, WORKER_UNBOUND);
/* rebind complete, become available again */
{
struct worker *worker = container_of(work, struct worker, rebind_work);
- if (worker_maybe_bind_and_lock(worker))
+ if (worker_maybe_bind_and_lock(worker->pool))
worker_clr_flags(worker, WORKER_UNBOUND);
spin_unlock_irq(&worker->pool->lock);
int id = worker->id;
/* sanity check frenzy */
- BUG_ON(worker->current_work);
- BUG_ON(!list_empty(&worker->scheduled));
+ if (WARN_ON(worker->current_work) ||
+ WARN_ON(!list_empty(&worker->scheduled)))
+ return;
if (worker->flags & WORKER_STARTED)
pool->nr_workers--;
del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&pool->lock);
start_worker(worker);
- BUG_ON(need_to_create_worker(pool));
+ if (WARN_ON_ONCE(need_to_create_worker(pool)))
+ goto restart;
return true;
}
* on @pool's current state. Try it and adjust
* %WORKER_UNBOUND accordingly.
*/
- if (worker_maybe_bind_and_lock(worker))
+ if (worker_maybe_bind_and_lock(pool))
worker->flags &= ~WORKER_UNBOUND;
else
worker->flags |= WORKER_UNBOUND;
* preparing to process a work or actually processing it.
* Make sure nobody diddled with it while I was sleeping.
*/
- BUG_ON(!list_empty(&worker->scheduled));
+ WARN_ON_ONCE(!list_empty(&worker->scheduled));
/*
* When control reaches this point, we're guaranteed to have
mayday_clear_cpu(cpu, wq->mayday_mask);
/* migrate to the target cpu if possible */
+ worker_maybe_bind_and_lock(pool);
rescuer->pool = pool;
- worker_maybe_bind_and_lock(rescuer);
/*
* Slurp in all works issued via this workqueue and
* process'em.
*/
- BUG_ON(!list_empty(&rescuer->scheduled));
+ WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_pwq(work) == pwq)
move_linked_works(work, scheduled, &n);
if (keep_working(pool))
wake_up_worker(pool);
+ rescuer->pool = NULL;
spin_unlock_irq(&pool->lock);
}
int flush_color, int work_color)
{
bool wait = false;
- unsigned int cpu;
+ struct pool_workqueue *pwq;
if (flush_color >= 0) {
- BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
+ WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
atomic_set(&wq->nr_pwqs_to_flush, 1);
}
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
spin_lock_irq(&pool->lock);
if (flush_color >= 0) {
- BUG_ON(pwq->flush_color != -1);
+ WARN_ON_ONCE(pwq->flush_color != -1);
if (pwq->nr_in_flight[flush_color]) {
pwq->flush_color = flush_color;
}
if (work_color >= 0) {
- BUG_ON(work_color != work_next_color(pwq->work_color));
+ WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
pwq->work_color = work_color;
}
* becomes our flush_color and work_color is advanced
* by one.
*/
- BUG_ON(!list_empty(&wq->flusher_overflow));
+ WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;
if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
- BUG_ON(wq->flush_color != this_flusher.flush_color);
+ WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
wq->first_flusher = &this_flusher;
}
} else {
/* wait in queue */
- BUG_ON(wq->flush_color == this_flusher.flush_color);
+ WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
wq->first_flusher = NULL;
- BUG_ON(!list_empty(&this_flusher.list));
- BUG_ON(wq->flush_color != this_flusher.flush_color);
+ WARN_ON_ONCE(!list_empty(&this_flusher.list));
+ WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
while (true) {
struct wq_flusher *next, *tmp;
complete(&next->done);
}
- BUG_ON(!list_empty(&wq->flusher_overflow) &&
- wq->flush_color != work_next_color(wq->work_color));
+ WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
+ wq->flush_color != work_next_color(wq->work_color));
/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);
}
if (list_empty(&wq->flusher_queue)) {
- BUG_ON(wq->flush_color != wq->work_color);
+ WARN_ON_ONCE(wq->flush_color != wq->work_color);
break;
}
* Need to flush more colors. Make the next flusher
* the new first flusher and arm pwqs.
*/
- BUG_ON(wq->flush_color == wq->work_color);
- BUG_ON(wq->flush_color != next->flush_color);
+ WARN_ON_ONCE(wq->flush_color == wq->work_color);
+ WARN_ON_ONCE(wq->flush_color != next->flush_color);
list_del_init(&next->list);
wq->first_flusher = next;
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
- unsigned int cpu;
+ struct pool_workqueue *pwq;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!wq->nr_drainers++)
wq->flags |= WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
reflush:
flush_workqueue(wq);
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ for_each_pwq(pwq, wq) {
bool drained;
spin_lock_irq(&pwq->pool->lock);
goto reflush;
}
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!--wq->nr_drainers)
wq->flags &= ~WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
return system_wq != NULL;
}
-static int alloc_pwqs(struct workqueue_struct *wq)
+static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
- /*
- * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
- * Make sure that the alignment isn't lower than that of
- * unsigned long long.
- */
- const size_t size = sizeof(struct pool_workqueue);
- const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
- __alignof__(unsigned long long));
+ bool highpri = wq->flags & WQ_HIGHPRI;
+ int cpu;
- if (!(wq->flags & WQ_UNBOUND))
- wq->pool_wq.pcpu = __alloc_percpu(size, align);
- else {
- void *ptr;
+ if (!(wq->flags & WQ_UNBOUND)) {
+ wq->pool_wq.pcpu = alloc_percpu(struct pool_workqueue);
+ if (!wq->pool_wq.pcpu)
+ return -ENOMEM;
- /*
- * Allocate enough room to align pwq and put an extra
- * pointer at the end pointing back to the originally
- * allocated pointer which will be used for free.
- */
- ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
- if (ptr) {
- wq->pool_wq.single = PTR_ALIGN(ptr, align);
- *(void **)(wq->pool_wq.single + 1) = ptr;
+ for_each_possible_cpu(cpu) {
+ struct pool_workqueue *pwq = get_pwq(cpu, wq);
+
+ pwq->pool = get_std_worker_pool(cpu, highpri);
+ list_add_tail(&pwq->pwqs_node, &wq->pwqs);
}
+ } else {
+ struct pool_workqueue *pwq;
+
+ pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
+ if (!pwq)
+ return -ENOMEM;
+
+ wq->pool_wq.single = pwq;
+ pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
+ list_add_tail(&pwq->pwqs_node, &wq->pwqs);
}
- /* just in case, make sure it's actually aligned */
- BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
- return wq->pool_wq.v ? 0 : -ENOMEM;
+ return 0;
}
static void free_pwqs(struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->pool_wq.pcpu);
- else if (wq->pool_wq.single) {
- /* the pointer to free is stored right after the pwq */
- kfree(*(void **)(wq->pool_wq.single + 1));
- }
+ else
+ kmem_cache_free(pwq_cache, wq->pool_wq.single);
}
static int wq_clamp_max_active(int max_active, unsigned int flags,
{
va_list args, args1;
struct workqueue_struct *wq;
- unsigned int cpu;
+ struct pool_workqueue *pwq;
size_t namelen;
/* determine namelen, allocate wq and format name */
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0);
+ INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
- if (alloc_pwqs(wq) < 0)
+ if (alloc_and_link_pwqs(wq) < 0)
goto err;
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
-
+ for_each_pwq(pwq, wq) {
BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
- pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
pwq->wq = wq;
pwq->flush_color = -1;
pwq->max_active = max_active;
* list. Grab it, set max_active accordingly and add the new
* workqueue to workqueues list.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
- for_each_pwq_cpu(cpu, wq)
- get_pwq(cpu, wq)->max_active = 0;
+ for_each_pwq(pwq, wq)
+ pwq->max_active = 0;
list_add(&wq->list, &workqueues);
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
return wq;
err:
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- unsigned int cpu;
+ struct pool_workqueue *pwq;
/* drain it before proceeding with destruction */
drain_workqueue(wq);
+ /* sanity checks */
+ for_each_pwq(pwq, wq) {
+ int i;
+
+ for (i = 0; i < WORK_NR_COLORS; i++)
+ if (WARN_ON(pwq->nr_in_flight[i]))
+ return;
+ if (WARN_ON(pwq->nr_active) ||
+ WARN_ON(!list_empty(&pwq->delayed_works)))
+ return;
+ }
+
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
list_del(&wq->list);
- spin_unlock(&workqueue_lock);
-
- /* sanity check */
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
- int i;
-
- for (i = 0; i < WORK_NR_COLORS; i++)
- BUG_ON(pwq->nr_in_flight[i]);
- BUG_ON(pwq->nr_active);
- BUG_ON(!list_empty(&pwq->delayed_works));
- }
+ spin_unlock_irq(&workqueue_lock);
if (wq->flags & WQ_RESCUER) {
kthread_stop(wq->rescuer->task);
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
- unsigned int cpu;
+ struct pool_workqueue *pwq;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
wq->saved_max_active = max_active;
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
if (!(wq->flags & WQ_FREEZABLE) ||
!(pool->flags & POOL_FREEZING))
pwq_set_max_active(pwq, max_active);
- spin_unlock_irq(&pool->lock);
+ spin_unlock(&pool->lock);
}
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
int i;
for_each_std_worker_pool(pool, cpu) {
- BUG_ON(cpu != smp_processor_id());
+ WARN_ON_ONCE(cpu != smp_processor_id());
mutex_lock(&pool->assoc_mutex);
spin_lock_irq(&pool->lock);
*/
void freeze_workqueues_begin(void)
{
- unsigned int cpu;
+ struct worker_pool *pool;
+ int id;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
- BUG_ON(workqueue_freezing);
+ WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
- for_each_wq_cpu(cpu) {
- struct worker_pool *pool;
+ for_each_pool(pool, id) {
struct workqueue_struct *wq;
- for_each_std_worker_pool(pool, cpu) {
- spin_lock_irq(&pool->lock);
-
- WARN_ON_ONCE(pool->flags & POOL_FREEZING);
- pool->flags |= POOL_FREEZING;
+ spin_lock(&pool->lock);
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+ pool->flags |= POOL_FREEZING;
- if (pwq && pwq->pool == pool &&
- (wq->flags & WQ_FREEZABLE))
- pwq->max_active = 0;
- }
+ list_for_each_entry(wq, &workqueues, list) {
+ struct pool_workqueue *pwq = get_pwq(pool->cpu, wq);
- spin_unlock_irq(&pool->lock);
+ if (pwq && pwq->pool == pool &&
+ (wq->flags & WQ_FREEZABLE))
+ pwq->max_active = 0;
}
+
+ spin_unlock(&pool->lock);
}
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
/**
unsigned int cpu;
bool busy = false;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
- BUG_ON(!workqueue_freezing);
+ WARN_ON_ONCE(!workqueue_freezing);
for_each_wq_cpu(cpu) {
struct workqueue_struct *wq;
if (!pwq || !(wq->flags & WQ_FREEZABLE))
continue;
- BUG_ON(pwq->nr_active < 0);
+ WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
goto out_unlock;
}
}
out_unlock:
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
return busy;
}
{
unsigned int cpu;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
struct workqueue_struct *wq;
for_each_std_worker_pool(pool, cpu) {
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
pool->flags &= ~POOL_FREEZING;
wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
+ spin_unlock(&pool->lock);
}
}
workqueue_freezing = false;
out_unlock:
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
#endif /* CONFIG_FREEZER */
BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
WORK_CPU_END * NR_STD_WORKER_POOLS);
+ WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+
+ pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
+
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);