workqueue: introduce for_each_pool()
[deliverable/linux.git] / kernel / workqueue.c
index f456433cf5351eaa64726502738916c5f67235a9..55494e3f9f3bbca8be69161096f96c25dfe65da7 100644 (file)
@@ -169,7 +169,8 @@ struct pool_workqueue {
        int                     nr_active;      /* L: nr of active works */
        int                     max_active;     /* L: max active works */
        struct list_head        delayed_works;  /* L: delayed works */
-};
+       struct list_head        pwqs_node;      /* I: node on wq->pwqs */
+} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 
 /*
  * Structure used to wait for workqueue flush.
@@ -212,6 +213,7 @@ struct workqueue_struct {
                struct pool_workqueue                   *single;
                unsigned long                           v;
        } pool_wq;                              /* I: pwq's */
+       struct list_head        pwqs;           /* I: all pwqs of this wq */
        struct list_head        list;           /* W: list of all workqueues */
 
        struct mutex            flush_mutex;    /* protects wq flushing */
@@ -233,6 +235,8 @@ struct workqueue_struct {
        char                    name[];         /* I: workqueue name */
 };
 
+static struct kmem_cache *pwq_cache;
+
 struct workqueue_struct *system_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -269,12 +273,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
        return WORK_CPU_END;
 }
 
-static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
-                                struct workqueue_struct *wq)
-{
-       return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
-}
-
 /*
  * CPU iterators
  *
@@ -285,8 +283,6 @@ static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
  *
  * for_each_wq_cpu()           : possible CPUs + WORK_CPU_UNBOUND
  * for_each_online_wq_cpu()    : online CPUs + WORK_CPU_UNBOUND
- * for_each_pwq_cpu()          : possible CPUs for bound workqueues,
- *                               WORK_CPU_UNBOUND for unbound workqueues
  */
 #define for_each_wq_cpu(cpu)                                           \
        for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3);           \
@@ -298,10 +294,21 @@ static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
             (cpu) < WORK_CPU_END;                                      \
             (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
 
-#define for_each_pwq_cpu(cpu, wq)                                      \
-       for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq));       \
-            (cpu) < WORK_CPU_END;                                      \
-            (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
+/**
+ * for_each_pool - iterate through all worker_pools in the system
+ * @pool: iteration cursor
+ * @id: integer used for iteration
+ */
+#define for_each_pool(pool, id)                                                \
+       idr_for_each_entry(&worker_pool_idr, pool, id)
+
+/**
+ * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
+ * @pwq: iteration cursor
+ * @wq: the target workqueue
+ */
+#define for_each_pwq(pwq, wq)                                          \
+       list_for_each_entry((pwq), &(wq)->pwqs, pwqs_node)
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
@@ -530,7 +537,7 @@ static int work_next_color(int color)
 static inline void set_work_data(struct work_struct *work, unsigned long data,
                                 unsigned long flags)
 {
-       BUG_ON(!work_pending(work));
+       WARN_ON_ONCE(!work_pending(work));
        atomic_long_set(&work->data, data | flags | work_static(work));
 }
 
@@ -785,7 +792,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
        pool = worker->pool;
 
        /* this can only happen on the local cpu */
-       BUG_ON(cpu != raw_smp_processor_id());
+       if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+               return NULL;
 
        /*
         * The counterpart of the following dec_and_test, implied mb,
@@ -1458,9 +1466,10 @@ static void worker_enter_idle(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
 
-       BUG_ON(worker->flags & WORKER_IDLE);
-       BUG_ON(!list_empty(&worker->entry) &&
-              (worker->hentry.next || worker->hentry.pprev));
+       if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
+           WARN_ON_ONCE(!list_empty(&worker->entry) &&
+                        (worker->hentry.next || worker->hentry.pprev)))
+               return;
 
        /* can't use worker_set_flags(), also called from start_worker() */
        worker->flags |= WORKER_IDLE;
@@ -1497,15 +1506,18 @@ static void worker_leave_idle(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
 
-       BUG_ON(!(worker->flags & WORKER_IDLE));
+       if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
+               return;
        worker_clr_flags(worker, WORKER_IDLE);
        pool->nr_idle--;
        list_del_init(&worker->entry);
 }
 
 /**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
- * @worker: self
+ * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
+ * @pool: target worker_pool
+ *
+ * Bind %current to the cpu of @pool if it is associated and lock @pool.
  *
  * Works which are scheduled while the cpu is online must at least be
  * scheduled to a worker which is bound to the cpu so that if they are
@@ -1533,11 +1545,9 @@ static void worker_leave_idle(struct worker *worker)
  * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
-static bool worker_maybe_bind_and_lock(struct worker *worker)
+static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
 __acquires(&pool->lock)
 {
-       struct worker_pool *pool = worker->pool;
-
        while (true) {
                /*
                 * The following call may fail, succeed or succeed
@@ -1575,7 +1585,7 @@ __acquires(&pool->lock)
 static void idle_worker_rebind(struct worker *worker)
 {
        /* CPU may go down again inbetween, clear UNBOUND only on success */
-       if (worker_maybe_bind_and_lock(worker))
+       if (worker_maybe_bind_and_lock(worker->pool))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        /* rebind complete, become available again */
@@ -1593,7 +1603,7 @@ static void busy_worker_rebind_fn(struct work_struct *work)
 {
        struct worker *worker = container_of(work, struct worker, rebind_work);
 
-       if (worker_maybe_bind_and_lock(worker))
+       if (worker_maybe_bind_and_lock(worker->pool))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        spin_unlock_irq(&worker->pool->lock);
@@ -1793,8 +1803,9 @@ static void destroy_worker(struct worker *worker)
        int id = worker->id;
 
        /* sanity check frenzy */
-       BUG_ON(worker->current_work);
-       BUG_ON(!list_empty(&worker->scheduled));
+       if (WARN_ON(worker->current_work) ||
+           WARN_ON(!list_empty(&worker->scheduled)))
+               return;
 
        if (worker->flags & WORKER_STARTED)
                pool->nr_workers--;
@@ -1923,7 +1934,8 @@ restart:
                        del_timer_sync(&pool->mayday_timer);
                        spin_lock_irq(&pool->lock);
                        start_worker(worker);
-                       BUG_ON(need_to_create_worker(pool));
+                       if (WARN_ON_ONCE(need_to_create_worker(pool)))
+                               goto restart;
                        return true;
                }
 
@@ -2038,7 +2050,7 @@ static bool manage_workers(struct worker *worker)
                 * on @pool's current state.  Try it and adjust
                 * %WORKER_UNBOUND accordingly.
                 */
-               if (worker_maybe_bind_and_lock(worker))
+               if (worker_maybe_bind_and_lock(pool))
                        worker->flags &= ~WORKER_UNBOUND;
                else
                        worker->flags |= WORKER_UNBOUND;
@@ -2256,7 +2268,7 @@ recheck:
         * preparing to process a work or actually processing it.
         * Make sure nobody diddled with it while I was sleeping.
         */
-       BUG_ON(!list_empty(&worker->scheduled));
+       WARN_ON_ONCE(!list_empty(&worker->scheduled));
 
        /*
         * When control reaches this point, we're guaranteed to have
@@ -2357,14 +2369,14 @@ repeat:
                mayday_clear_cpu(cpu, wq->mayday_mask);
 
                /* migrate to the target cpu if possible */
+               worker_maybe_bind_and_lock(pool);
                rescuer->pool = pool;
-               worker_maybe_bind_and_lock(rescuer);
 
                /*
                 * Slurp in all works issued via this workqueue and
                 * process'em.
                 */
-               BUG_ON(!list_empty(&rescuer->scheduled));
+               WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
                list_for_each_entry_safe(work, n, &pool->worklist, entry)
                        if (get_work_pwq(work) == pwq)
                                move_linked_works(work, scheduled, &n);
@@ -2379,6 +2391,7 @@ repeat:
                if (keep_working(pool))
                        wake_up_worker(pool);
 
+               rescuer->pool = NULL;
                spin_unlock_irq(&pool->lock);
        }
 
@@ -2495,21 +2508,20 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
                                      int flush_color, int work_color)
 {
        bool wait = false;
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
 
        if (flush_color >= 0) {
-               BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
+               WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
                atomic_set(&wq->nr_pwqs_to_flush, 1);
        }
 
-       for_each_pwq_cpu(cpu, wq) {
-               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+       for_each_pwq(pwq, wq) {
                struct worker_pool *pool = pwq->pool;
 
                spin_lock_irq(&pool->lock);
 
                if (flush_color >= 0) {
-                       BUG_ON(pwq->flush_color != -1);
+                       WARN_ON_ONCE(pwq->flush_color != -1);
 
                        if (pwq->nr_in_flight[flush_color]) {
                                pwq->flush_color = flush_color;
@@ -2519,7 +2531,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
                }
 
                if (work_color >= 0) {
-                       BUG_ON(work_color != work_next_color(pwq->work_color));
+                       WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
                        pwq->work_color = work_color;
                }
 
@@ -2567,13 +2579,13 @@ void flush_workqueue(struct workqueue_struct *wq)
                 * becomes our flush_color and work_color is advanced
                 * by one.
                 */
-               BUG_ON(!list_empty(&wq->flusher_overflow));
+               WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
                this_flusher.flush_color = wq->work_color;
                wq->work_color = next_color;
 
                if (!wq->first_flusher) {
                        /* no flush in progress, become the first flusher */
-                       BUG_ON(wq->flush_color != this_flusher.flush_color);
+                       WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
 
                        wq->first_flusher = &this_flusher;
 
@@ -2586,7 +2598,7 @@ void flush_workqueue(struct workqueue_struct *wq)
                        }
                } else {
                        /* wait in queue */
-                       BUG_ON(wq->flush_color == this_flusher.flush_color);
+                       WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
@@ -2620,8 +2632,8 @@ void flush_workqueue(struct workqueue_struct *wq)
 
        wq->first_flusher = NULL;
 
-       BUG_ON(!list_empty(&this_flusher.list));
-       BUG_ON(wq->flush_color != this_flusher.flush_color);
+       WARN_ON_ONCE(!list_empty(&this_flusher.list));
+       WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
 
        while (true) {
                struct wq_flusher *next, *tmp;
@@ -2634,8 +2646,8 @@ void flush_workqueue(struct workqueue_struct *wq)
                        complete(&next->done);
                }
 
-               BUG_ON(!list_empty(&wq->flusher_overflow) &&
-                      wq->flush_color != work_next_color(wq->work_color));
+               WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
+                            wq->flush_color != work_next_color(wq->work_color));
 
                /* this flush_color is finished, advance by one */
                wq->flush_color = work_next_color(wq->flush_color);
@@ -2659,7 +2671,7 @@ void flush_workqueue(struct workqueue_struct *wq)
                }
 
                if (list_empty(&wq->flusher_queue)) {
-                       BUG_ON(wq->flush_color != wq->work_color);
+                       WARN_ON_ONCE(wq->flush_color != wq->work_color);
                        break;
                }
 
@@ -2667,8 +2679,8 @@ void flush_workqueue(struct workqueue_struct *wq)
                 * Need to flush more colors.  Make the next flusher
                 * the new first flusher and arm pwqs.
                 */
-               BUG_ON(wq->flush_color == wq->work_color);
-               BUG_ON(wq->flush_color != next->flush_color);
+               WARN_ON_ONCE(wq->flush_color == wq->work_color);
+               WARN_ON_ONCE(wq->flush_color != next->flush_color);
 
                list_del_init(&next->list);
                wq->first_flusher = next;
@@ -2702,22 +2714,21 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
 void drain_workqueue(struct workqueue_struct *wq)
 {
        unsigned int flush_cnt = 0;
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
 
        /*
         * __queue_work() needs to test whether there are drainers, is much
         * hotter than drain_workqueue() and already looks at @wq->flags.
         * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
         */
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
        if (!wq->nr_drainers++)
                wq->flags |= WQ_DRAINING;
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
 reflush:
        flush_workqueue(wq);
 
-       for_each_pwq_cpu(cpu, wq) {
-               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+       for_each_pwq(pwq, wq) {
                bool drained;
 
                spin_lock_irq(&pwq->pool->lock);
@@ -2734,10 +2745,10 @@ reflush:
                goto reflush;
        }
 
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
        if (!--wq->nr_drainers)
                wq->flags &= ~WQ_DRAINING;
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
 }
 EXPORT_SYMBOL_GPL(drain_workqueue);
 
@@ -3088,47 +3099,43 @@ int keventd_up(void)
        return system_wq != NULL;
 }
 
-static int alloc_pwqs(struct workqueue_struct *wq)
+static int alloc_and_link_pwqs(struct workqueue_struct *wq)
 {
-       /*
-        * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
-        * Make sure that the alignment isn't lower than that of
-        * unsigned long long.
-        */
-       const size_t size = sizeof(struct pool_workqueue);
-       const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
-                                  __alignof__(unsigned long long));
+       bool highpri = wq->flags & WQ_HIGHPRI;
+       int cpu;
 
-       if (!(wq->flags & WQ_UNBOUND))
-               wq->pool_wq.pcpu = __alloc_percpu(size, align);
-       else {
-               void *ptr;
+       if (!(wq->flags & WQ_UNBOUND)) {
+               wq->pool_wq.pcpu = alloc_percpu(struct pool_workqueue);
+               if (!wq->pool_wq.pcpu)
+                       return -ENOMEM;
 
-               /*
-                * Allocate enough room to align pwq and put an extra
-                * pointer at the end pointing back to the originally
-                * allocated pointer which will be used for free.
-                */
-               ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
-               if (ptr) {
-                       wq->pool_wq.single = PTR_ALIGN(ptr, align);
-                       *(void **)(wq->pool_wq.single + 1) = ptr;
+               for_each_possible_cpu(cpu) {
+                       struct pool_workqueue *pwq = get_pwq(cpu, wq);
+
+                       pwq->pool = get_std_worker_pool(cpu, highpri);
+                       list_add_tail(&pwq->pwqs_node, &wq->pwqs);
                }
+       } else {
+               struct pool_workqueue *pwq;
+
+               pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
+               if (!pwq)
+                       return -ENOMEM;
+
+               wq->pool_wq.single = pwq;
+               pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
+               list_add_tail(&pwq->pwqs_node, &wq->pwqs);
        }
 
-       /* just in case, make sure it's actually aligned */
-       BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
-       return wq->pool_wq.v ? 0 : -ENOMEM;
+       return 0;
 }
 
 static void free_pwqs(struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND))
                free_percpu(wq->pool_wq.pcpu);
-       else if (wq->pool_wq.single) {
-               /* the pointer to free is stored right after the pwq */
-               kfree(*(void **)(wq->pool_wq.single + 1));
-       }
+       else
+               kmem_cache_free(pwq_cache, wq->pool_wq.single);
 }
 
 static int wq_clamp_max_active(int max_active, unsigned int flags,
@@ -3151,7 +3158,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
 {
        va_list args, args1;
        struct workqueue_struct *wq;
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
        size_t namelen;
 
        /* determine namelen, allocate wq and format name */
@@ -3182,20 +3189,18 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
        atomic_set(&wq->nr_pwqs_to_flush, 0);
+       INIT_LIST_HEAD(&wq->pwqs);
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
 
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
-       if (alloc_pwqs(wq) < 0)
+       if (alloc_and_link_pwqs(wq) < 0)
                goto err;
 
-       for_each_pwq_cpu(cpu, wq) {
-               struct pool_workqueue *pwq = get_pwq(cpu, wq);
-
+       for_each_pwq(pwq, wq) {
                BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
-               pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
                pwq->wq = wq;
                pwq->flush_color = -1;
                pwq->max_active = max_active;
@@ -3227,15 +3232,15 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
         * list.  Grab it, set max_active accordingly and add the new
         * workqueue to workqueues list.
         */
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
 
        if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
-               for_each_pwq_cpu(cpu, wq)
-                       get_pwq(cpu, wq)->max_active = 0;
+               for_each_pwq(pwq, wq)
+                       pwq->max_active = 0;
 
        list_add(&wq->list, &workqueues);
 
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
 
        return wq;
 err:
@@ -3257,29 +3262,30 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
  */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
 
        /* drain it before proceeding with destruction */
        drain_workqueue(wq);
 
+       /* sanity checks */
+       for_each_pwq(pwq, wq) {
+               int i;
+
+               for (i = 0; i < WORK_NR_COLORS; i++)
+                       if (WARN_ON(pwq->nr_in_flight[i]))
+                               return;
+               if (WARN_ON(pwq->nr_active) ||
+                   WARN_ON(!list_empty(&pwq->delayed_works)))
+                       return;
+       }
+
        /*
         * wq list is used to freeze wq, remove from list after
         * flushing is complete in case freeze races us.
         */
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
        list_del(&wq->list);
-       spin_unlock(&workqueue_lock);
-
-       /* sanity check */
-       for_each_pwq_cpu(cpu, wq) {
-               struct pool_workqueue *pwq = get_pwq(cpu, wq);
-               int i;
-
-               for (i = 0; i < WORK_NR_COLORS; i++)
-                       BUG_ON(pwq->nr_in_flight[i]);
-               BUG_ON(pwq->nr_active);
-               BUG_ON(!list_empty(&pwq->delayed_works));
-       }
+       spin_unlock_irq(&workqueue_lock);
 
        if (wq->flags & WQ_RESCUER) {
                kthread_stop(wq->rescuer->task);
@@ -3324,28 +3330,27 @@ static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
  */
 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 {
-       unsigned int cpu;
+       struct pool_workqueue *pwq;
 
        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
 
        wq->saved_max_active = max_active;
 
-       for_each_pwq_cpu(cpu, wq) {
-               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+       for_each_pwq(pwq, wq) {
                struct worker_pool *pool = pwq->pool;
 
-               spin_lock_irq(&pool->lock);
+               spin_lock(&pool->lock);
 
                if (!(wq->flags & WQ_FREEZABLE) ||
                    !(pool->flags & POOL_FREEZING))
                        pwq_set_max_active(pwq, max_active);
 
-               spin_unlock_irq(&pool->lock);
+               spin_unlock(&pool->lock);
        }
 
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
 }
 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 
@@ -3423,7 +3428,7 @@ static void wq_unbind_fn(struct work_struct *work)
        int i;
 
        for_each_std_worker_pool(pool, cpu) {
-               BUG_ON(cpu != smp_processor_id());
+               WARN_ON_ONCE(cpu != smp_processor_id());
 
                mutex_lock(&pool->assoc_mutex);
                spin_lock_irq(&pool->lock);
@@ -3589,36 +3594,34 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  */
 void freeze_workqueues_begin(void)
 {
-       unsigned int cpu;
+       struct worker_pool *pool;
+       int id;
 
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
 
-       BUG_ON(workqueue_freezing);
+       WARN_ON_ONCE(workqueue_freezing);
        workqueue_freezing = true;
 
-       for_each_wq_cpu(cpu) {
-               struct worker_pool *pool;
+       for_each_pool(pool, id) {
                struct workqueue_struct *wq;
 
-               for_each_std_worker_pool(pool, cpu) {
-                       spin_lock_irq(&pool->lock);
-
-                       WARN_ON_ONCE(pool->flags & POOL_FREEZING);
-                       pool->flags |= POOL_FREEZING;
+               spin_lock(&pool->lock);
 
-                       list_for_each_entry(wq, &workqueues, list) {
-                               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+               WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+               pool->flags |= POOL_FREEZING;
 
-                               if (pwq && pwq->pool == pool &&
-                                   (wq->flags & WQ_FREEZABLE))
-                                       pwq->max_active = 0;
-                       }
+               list_for_each_entry(wq, &workqueues, list) {
+                       struct pool_workqueue *pwq = get_pwq(pool->cpu, wq);
 
-                       spin_unlock_irq(&pool->lock);
+                       if (pwq && pwq->pool == pool &&
+                           (wq->flags & WQ_FREEZABLE))
+                               pwq->max_active = 0;
                }
+
+               spin_unlock(&pool->lock);
        }
 
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
 }
 
 /**
@@ -3639,9 +3642,9 @@ bool freeze_workqueues_busy(void)
        unsigned int cpu;
        bool busy = false;
 
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
 
-       BUG_ON(!workqueue_freezing);
+       WARN_ON_ONCE(!workqueue_freezing);
 
        for_each_wq_cpu(cpu) {
                struct workqueue_struct *wq;
@@ -3655,7 +3658,7 @@ bool freeze_workqueues_busy(void)
                        if (!pwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
-                       BUG_ON(pwq->nr_active < 0);
+                       WARN_ON_ONCE(pwq->nr_active < 0);
                        if (pwq->nr_active) {
                                busy = true;
                                goto out_unlock;
@@ -3663,7 +3666,7 @@ bool freeze_workqueues_busy(void)
                }
        }
 out_unlock:
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
        return busy;
 }
 
@@ -3680,7 +3683,7 @@ void thaw_workqueues(void)
 {
        unsigned int cpu;
 
-       spin_lock(&workqueue_lock);
+       spin_lock_irq(&workqueue_lock);
 
        if (!workqueue_freezing)
                goto out_unlock;
@@ -3690,7 +3693,7 @@ void thaw_workqueues(void)
                struct workqueue_struct *wq;
 
                for_each_std_worker_pool(pool, cpu) {
-                       spin_lock_irq(&pool->lock);
+                       spin_lock(&pool->lock);
 
                        WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
                        pool->flags &= ~POOL_FREEZING;
@@ -3708,13 +3711,13 @@ void thaw_workqueues(void)
 
                        wake_up_worker(pool);
 
-                       spin_unlock_irq(&pool->lock);
+                       spin_unlock(&pool->lock);
                }
        }
 
        workqueue_freezing = false;
 out_unlock:
-       spin_unlock(&workqueue_lock);
+       spin_unlock_irq(&workqueue_lock);
 }
 #endif /* CONFIG_FREEZER */
 
@@ -3726,6 +3729,10 @@ static int __init init_workqueues(void)
        BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
                     WORK_CPU_END * NR_STD_WORKER_POOLS);
 
+       WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+
+       pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
+
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
This page took 0.061621 seconds and 5 git commands to generate.