2 * padata.c - generic interface to process data streams in parallel
4 * See Documentation/padata.txt for an api documentation.
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/export.h>
24 #include <linux/cpumask.h>
25 #include <linux/err.h>
26 #include <linux/cpu.h>
27 #include <linux/padata.h>
28 #include <linux/mutex.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/sysfs.h>
32 #include <linux/rcupdate.h>
34 #define MAX_OBJ_NUM 1000
36 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
40 target_cpu
= cpumask_first(pd
->cpumask
.pcpu
);
41 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
42 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
.pcpu
);
47 static int padata_cpu_hash(struct parallel_data
*pd
)
53 * Hash the sequence numbers to the cpus by taking
54 * seq_nr mod. number of cpus in use.
57 seq_nr
= atomic_inc_return(&pd
->seq_nr
);
58 cpu_index
= seq_nr
% cpumask_weight(pd
->cpumask
.pcpu
);
60 return padata_index_to_cpu(pd
, cpu_index
);
63 static void padata_parallel_worker(struct work_struct
*parallel_work
)
65 struct padata_parallel_queue
*pqueue
;
66 struct parallel_data
*pd
;
67 struct padata_instance
*pinst
;
68 LIST_HEAD(local_list
);
71 pqueue
= container_of(parallel_work
,
72 struct padata_parallel_queue
, work
);
76 spin_lock(&pqueue
->parallel
.lock
);
77 list_replace_init(&pqueue
->parallel
.list
, &local_list
);
78 spin_unlock(&pqueue
->parallel
.lock
);
80 while (!list_empty(&local_list
)) {
81 struct padata_priv
*padata
;
83 padata
= list_entry(local_list
.next
,
84 struct padata_priv
, list
);
86 list_del_init(&padata
->list
);
88 padata
->parallel(padata
);
95 * padata_do_parallel - padata parallelization function
97 * @pinst: padata instance
98 * @padata: object to be parallelized
99 * @cb_cpu: cpu the serialization callback function will run on,
100 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
102 * The parallelization callback function will run with BHs off.
103 * Note: Every object which is parallelized by padata_do_parallel
104 * must be seen by padata_do_serial.
106 int padata_do_parallel(struct padata_instance
*pinst
,
107 struct padata_priv
*padata
, int cb_cpu
)
110 struct padata_parallel_queue
*queue
;
111 struct parallel_data
*pd
;
115 pd
= rcu_dereference_bh(pinst
->pd
);
118 if (!(pinst
->flags
& PADATA_INIT
) || pinst
->flags
& PADATA_INVALID
)
121 if (!cpumask_test_cpu(cb_cpu
, pd
->cpumask
.cbcpu
))
125 if ((pinst
->flags
& PADATA_RESET
))
128 if (atomic_read(&pd
->refcnt
) >= MAX_OBJ_NUM
)
132 atomic_inc(&pd
->refcnt
);
134 padata
->cb_cpu
= cb_cpu
;
136 target_cpu
= padata_cpu_hash(pd
);
137 queue
= per_cpu_ptr(pd
->pqueue
, target_cpu
);
139 spin_lock(&queue
->parallel
.lock
);
140 list_add_tail(&padata
->list
, &queue
->parallel
.list
);
141 spin_unlock(&queue
->parallel
.lock
);
143 queue_work_on(target_cpu
, pinst
->wq
, &queue
->work
);
146 rcu_read_unlock_bh();
150 EXPORT_SYMBOL(padata_do_parallel
);
153 * padata_get_next - Get the next object that needs serialization.
157 * A pointer to the control struct of the next object that needs
158 * serialization, if present in one of the percpu reorder queues.
160 * NULL, if all percpu reorder queues are empty.
162 * -EINPROGRESS, if the next object that needs serialization will
163 * be parallel processed by another cpu and is not yet present in
164 * the cpu's reorder queue.
166 * -ENODATA, if this cpu has to do the parallel processing for
169 static struct padata_priv
*padata_get_next(struct parallel_data
*pd
)
172 unsigned int next_nr
, next_index
;
173 struct padata_parallel_queue
*next_queue
;
174 struct padata_priv
*padata
;
175 struct padata_list
*reorder
;
177 num_cpus
= cpumask_weight(pd
->cpumask
.pcpu
);
180 * Calculate the percpu reorder queue and the sequence
181 * number of the next object.
183 next_nr
= pd
->processed
;
184 next_index
= next_nr
% num_cpus
;
185 cpu
= padata_index_to_cpu(pd
, next_index
);
186 next_queue
= per_cpu_ptr(pd
->pqueue
, cpu
);
190 reorder
= &next_queue
->reorder
;
192 if (!list_empty(&reorder
->list
)) {
193 padata
= list_entry(reorder
->list
.next
,
194 struct padata_priv
, list
);
196 spin_lock(&reorder
->lock
);
197 list_del_init(&padata
->list
);
198 atomic_dec(&pd
->reorder_objects
);
199 spin_unlock(&reorder
->lock
);
206 if (__this_cpu_read(pd
->pqueue
->cpu_index
) == next_queue
->cpu_index
) {
207 padata
= ERR_PTR(-ENODATA
);
211 padata
= ERR_PTR(-EINPROGRESS
);
216 static void padata_reorder(struct parallel_data
*pd
)
219 struct padata_priv
*padata
;
220 struct padata_serial_queue
*squeue
;
221 struct padata_instance
*pinst
= pd
->pinst
;
224 * We need to ensure that only one cpu can work on dequeueing of
225 * the reorder queue the time. Calculating in which percpu reorder
226 * queue the next object will arrive takes some time. A spinlock
227 * would be highly contended. Also it is not clear in which order
228 * the objects arrive to the reorder queues. So a cpu could wait to
229 * get the lock just to notice that there is nothing to do at the
230 * moment. Therefore we use a trylock and let the holder of the lock
231 * care for all the objects enqueued during the holdtime of the lock.
233 if (!spin_trylock_bh(&pd
->lock
))
237 padata
= padata_get_next(pd
);
240 * All reorder queues are empty, or the next object that needs
241 * serialization is parallel processed by another cpu and is
242 * still on it's way to the cpu's reorder queue, nothing to
245 if (!padata
|| PTR_ERR(padata
) == -EINPROGRESS
)
249 * This cpu has to do the parallel processing of the next
250 * object. It's waiting in the cpu's parallelization queue,
251 * so exit immediately.
253 if (PTR_ERR(padata
) == -ENODATA
) {
254 del_timer(&pd
->timer
);
255 spin_unlock_bh(&pd
->lock
);
259 cb_cpu
= padata
->cb_cpu
;
260 squeue
= per_cpu_ptr(pd
->squeue
, cb_cpu
);
262 spin_lock(&squeue
->serial
.lock
);
263 list_add_tail(&padata
->list
, &squeue
->serial
.list
);
264 spin_unlock(&squeue
->serial
.lock
);
266 queue_work_on(cb_cpu
, pinst
->wq
, &squeue
->work
);
269 spin_unlock_bh(&pd
->lock
);
272 * The next object that needs serialization might have arrived to
273 * the reorder queues in the meantime, we will be called again
274 * from the timer function if no one else cares for it.
276 if (atomic_read(&pd
->reorder_objects
)
277 && !(pinst
->flags
& PADATA_RESET
))
278 mod_timer(&pd
->timer
, jiffies
+ HZ
);
280 del_timer(&pd
->timer
);
285 static void padata_reorder_timer(unsigned long arg
)
287 struct parallel_data
*pd
= (struct parallel_data
*)arg
;
292 static void padata_serial_worker(struct work_struct
*serial_work
)
294 struct padata_serial_queue
*squeue
;
295 struct parallel_data
*pd
;
296 LIST_HEAD(local_list
);
299 squeue
= container_of(serial_work
, struct padata_serial_queue
, work
);
302 spin_lock(&squeue
->serial
.lock
);
303 list_replace_init(&squeue
->serial
.list
, &local_list
);
304 spin_unlock(&squeue
->serial
.lock
);
306 while (!list_empty(&local_list
)) {
307 struct padata_priv
*padata
;
309 padata
= list_entry(local_list
.next
,
310 struct padata_priv
, list
);
312 list_del_init(&padata
->list
);
314 padata
->serial(padata
);
315 atomic_dec(&pd
->refcnt
);
321 * padata_do_serial - padata serialization function
323 * @padata: object to be serialized.
325 * padata_do_serial must be called for every parallelized object.
326 * The serialization callback function will run with BHs off.
328 void padata_do_serial(struct padata_priv
*padata
)
331 struct padata_parallel_queue
*pqueue
;
332 struct parallel_data
*pd
;
337 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
339 spin_lock(&pqueue
->reorder
.lock
);
340 atomic_inc(&pd
->reorder_objects
);
341 list_add_tail(&padata
->list
, &pqueue
->reorder
.list
);
342 spin_unlock(&pqueue
->reorder
.lock
);
348 EXPORT_SYMBOL(padata_do_serial
);
350 static int padata_setup_cpumasks(struct parallel_data
*pd
,
351 const struct cpumask
*pcpumask
,
352 const struct cpumask
*cbcpumask
)
354 if (!alloc_cpumask_var(&pd
->cpumask
.pcpu
, GFP_KERNEL
))
357 cpumask_and(pd
->cpumask
.pcpu
, pcpumask
, cpu_online_mask
);
358 if (!alloc_cpumask_var(&pd
->cpumask
.cbcpu
, GFP_KERNEL
)) {
359 free_cpumask_var(pd
->cpumask
.cbcpu
);
363 cpumask_and(pd
->cpumask
.cbcpu
, cbcpumask
, cpu_online_mask
);
367 static void __padata_list_init(struct padata_list
*pd_list
)
369 INIT_LIST_HEAD(&pd_list
->list
);
370 spin_lock_init(&pd_list
->lock
);
373 /* Initialize all percpu queues used by serial workers */
374 static void padata_init_squeues(struct parallel_data
*pd
)
377 struct padata_serial_queue
*squeue
;
379 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
380 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
382 __padata_list_init(&squeue
->serial
);
383 INIT_WORK(&squeue
->work
, padata_serial_worker
);
387 /* Initialize all percpu queues used by parallel workers */
388 static void padata_init_pqueues(struct parallel_data
*pd
)
391 struct padata_parallel_queue
*pqueue
;
394 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
395 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
397 pqueue
->cpu_index
= cpu_index
;
400 __padata_list_init(&pqueue
->reorder
);
401 __padata_list_init(&pqueue
->parallel
);
402 INIT_WORK(&pqueue
->work
, padata_parallel_worker
);
403 atomic_set(&pqueue
->num_obj
, 0);
407 /* Allocate and initialize the internal cpumask dependend resources. */
408 static struct parallel_data
*padata_alloc_pd(struct padata_instance
*pinst
,
409 const struct cpumask
*pcpumask
,
410 const struct cpumask
*cbcpumask
)
412 struct parallel_data
*pd
;
414 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
418 pd
->pqueue
= alloc_percpu(struct padata_parallel_queue
);
422 pd
->squeue
= alloc_percpu(struct padata_serial_queue
);
424 goto err_free_pqueue
;
425 if (padata_setup_cpumasks(pd
, pcpumask
, cbcpumask
) < 0)
426 goto err_free_squeue
;
428 padata_init_pqueues(pd
);
429 padata_init_squeues(pd
);
430 setup_timer(&pd
->timer
, padata_reorder_timer
, (unsigned long)pd
);
431 atomic_set(&pd
->seq_nr
, -1);
432 atomic_set(&pd
->reorder_objects
, 0);
433 atomic_set(&pd
->refcnt
, 0);
435 spin_lock_init(&pd
->lock
);
440 free_percpu(pd
->squeue
);
442 free_percpu(pd
->pqueue
);
449 static void padata_free_pd(struct parallel_data
*pd
)
451 free_cpumask_var(pd
->cpumask
.pcpu
);
452 free_cpumask_var(pd
->cpumask
.cbcpu
);
453 free_percpu(pd
->pqueue
);
454 free_percpu(pd
->squeue
);
458 /* Flush all objects out of the padata queues. */
459 static void padata_flush_queues(struct parallel_data
*pd
)
462 struct padata_parallel_queue
*pqueue
;
463 struct padata_serial_queue
*squeue
;
465 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
466 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
467 flush_work(&pqueue
->work
);
470 del_timer_sync(&pd
->timer
);
472 if (atomic_read(&pd
->reorder_objects
))
475 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
476 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
477 flush_work(&squeue
->work
);
480 BUG_ON(atomic_read(&pd
->refcnt
) != 0);
483 static void __padata_start(struct padata_instance
*pinst
)
485 pinst
->flags
|= PADATA_INIT
;
488 static void __padata_stop(struct padata_instance
*pinst
)
490 if (!(pinst
->flags
& PADATA_INIT
))
493 pinst
->flags
&= ~PADATA_INIT
;
498 padata_flush_queues(pinst
->pd
);
502 /* Replace the internal control structure with a new one. */
503 static void padata_replace(struct padata_instance
*pinst
,
504 struct parallel_data
*pd_new
)
506 struct parallel_data
*pd_old
= pinst
->pd
;
507 int notification_mask
= 0;
509 pinst
->flags
|= PADATA_RESET
;
511 rcu_assign_pointer(pinst
->pd
, pd_new
);
515 if (!cpumask_equal(pd_old
->cpumask
.pcpu
, pd_new
->cpumask
.pcpu
))
516 notification_mask
|= PADATA_CPU_PARALLEL
;
517 if (!cpumask_equal(pd_old
->cpumask
.cbcpu
, pd_new
->cpumask
.cbcpu
))
518 notification_mask
|= PADATA_CPU_SERIAL
;
520 padata_flush_queues(pd_old
);
521 padata_free_pd(pd_old
);
523 if (notification_mask
)
524 blocking_notifier_call_chain(&pinst
->cpumask_change_notifier
,
528 pinst
->flags
&= ~PADATA_RESET
;
532 * padata_register_cpumask_notifier - Registers a notifier that will be called
533 * if either pcpu or cbcpu or both cpumasks change.
535 * @pinst: A poineter to padata instance
536 * @nblock: A pointer to notifier block.
538 int padata_register_cpumask_notifier(struct padata_instance
*pinst
,
539 struct notifier_block
*nblock
)
541 return blocking_notifier_chain_register(&pinst
->cpumask_change_notifier
,
544 EXPORT_SYMBOL(padata_register_cpumask_notifier
);
547 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
548 * registered earlier using padata_register_cpumask_notifier
550 * @pinst: A pointer to data instance.
551 * @nlock: A pointer to notifier block.
553 int padata_unregister_cpumask_notifier(struct padata_instance
*pinst
,
554 struct notifier_block
*nblock
)
556 return blocking_notifier_chain_unregister(
557 &pinst
->cpumask_change_notifier
,
560 EXPORT_SYMBOL(padata_unregister_cpumask_notifier
);
563 /* If cpumask contains no active cpu, we mark the instance as invalid. */
564 static bool padata_validate_cpumask(struct padata_instance
*pinst
,
565 const struct cpumask
*cpumask
)
567 if (!cpumask_intersects(cpumask
, cpu_online_mask
)) {
568 pinst
->flags
|= PADATA_INVALID
;
572 pinst
->flags
&= ~PADATA_INVALID
;
576 static int __padata_set_cpumasks(struct padata_instance
*pinst
,
577 cpumask_var_t pcpumask
,
578 cpumask_var_t cbcpumask
)
581 struct parallel_data
*pd
;
583 valid
= padata_validate_cpumask(pinst
, pcpumask
);
585 __padata_stop(pinst
);
589 valid
= padata_validate_cpumask(pinst
, cbcpumask
);
591 __padata_stop(pinst
);
594 pd
= padata_alloc_pd(pinst
, pcpumask
, cbcpumask
);
598 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
599 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
601 padata_replace(pinst
, pd
);
604 __padata_start(pinst
);
610 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
611 * equivalent to @cpumask.
613 * @pinst: padata instance
614 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
615 * to parallel and serial cpumasks respectively.
616 * @cpumask: the cpumask to use
618 int padata_set_cpumask(struct padata_instance
*pinst
, int cpumask_type
,
619 cpumask_var_t cpumask
)
621 struct cpumask
*serial_mask
, *parallel_mask
;
624 mutex_lock(&pinst
->lock
);
627 switch (cpumask_type
) {
628 case PADATA_CPU_PARALLEL
:
629 serial_mask
= pinst
->cpumask
.cbcpu
;
630 parallel_mask
= cpumask
;
632 case PADATA_CPU_SERIAL
:
633 parallel_mask
= pinst
->cpumask
.pcpu
;
634 serial_mask
= cpumask
;
640 err
= __padata_set_cpumasks(pinst
, parallel_mask
, serial_mask
);
644 mutex_unlock(&pinst
->lock
);
648 EXPORT_SYMBOL(padata_set_cpumask
);
651 * padata_start - start the parallel processing
653 * @pinst: padata instance to start
655 int padata_start(struct padata_instance
*pinst
)
659 mutex_lock(&pinst
->lock
);
661 if (pinst
->flags
& PADATA_INVALID
)
664 __padata_start(pinst
);
666 mutex_unlock(&pinst
->lock
);
670 EXPORT_SYMBOL(padata_start
);
673 * padata_stop - stop the parallel processing
675 * @pinst: padata instance to stop
677 void padata_stop(struct padata_instance
*pinst
)
679 mutex_lock(&pinst
->lock
);
680 __padata_stop(pinst
);
681 mutex_unlock(&pinst
->lock
);
683 EXPORT_SYMBOL(padata_stop
);
685 #ifdef CONFIG_HOTPLUG_CPU
687 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
689 struct parallel_data
*pd
;
691 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
692 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
.pcpu
,
693 pinst
->cpumask
.cbcpu
);
697 padata_replace(pinst
, pd
);
699 if (padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) &&
700 padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
701 __padata_start(pinst
);
707 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
709 struct parallel_data
*pd
= NULL
;
711 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
713 if (!padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) ||
714 !padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
715 __padata_stop(pinst
);
717 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
.pcpu
,
718 pinst
->cpumask
.cbcpu
);
722 padata_replace(pinst
, pd
);
724 cpumask_clear_cpu(cpu
, pd
->cpumask
.cbcpu
);
725 cpumask_clear_cpu(cpu
, pd
->cpumask
.pcpu
);
732 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
735 * @pinst: padata instance
736 * @cpu: cpu to remove
737 * @mask: bitmask specifying from which cpumask @cpu should be removed
738 * The @mask may be any combination of the following flags:
739 * PADATA_CPU_SERIAL - serial cpumask
740 * PADATA_CPU_PARALLEL - parallel cpumask
742 int padata_remove_cpu(struct padata_instance
*pinst
, int cpu
, int mask
)
746 if (!(mask
& (PADATA_CPU_SERIAL
| PADATA_CPU_PARALLEL
)))
749 mutex_lock(&pinst
->lock
);
752 if (mask
& PADATA_CPU_SERIAL
)
753 cpumask_clear_cpu(cpu
, pinst
->cpumask
.cbcpu
);
754 if (mask
& PADATA_CPU_PARALLEL
)
755 cpumask_clear_cpu(cpu
, pinst
->cpumask
.pcpu
);
757 err
= __padata_remove_cpu(pinst
, cpu
);
760 mutex_unlock(&pinst
->lock
);
764 EXPORT_SYMBOL(padata_remove_cpu
);
766 static inline int pinst_has_cpu(struct padata_instance
*pinst
, int cpu
)
768 return cpumask_test_cpu(cpu
, pinst
->cpumask
.pcpu
) ||
769 cpumask_test_cpu(cpu
, pinst
->cpumask
.cbcpu
);
773 static int padata_cpu_callback(struct notifier_block
*nfb
,
774 unsigned long action
, void *hcpu
)
777 struct padata_instance
*pinst
;
778 int cpu
= (unsigned long)hcpu
;
780 pinst
= container_of(nfb
, struct padata_instance
, cpu_notifier
);
784 case CPU_ONLINE_FROZEN
:
785 case CPU_DOWN_FAILED
:
786 case CPU_DOWN_FAILED_FROZEN
:
787 if (!pinst_has_cpu(pinst
, cpu
))
789 mutex_lock(&pinst
->lock
);
790 err
= __padata_add_cpu(pinst
, cpu
);
791 mutex_unlock(&pinst
->lock
);
793 return notifier_from_errno(err
);
796 case CPU_DOWN_PREPARE
:
797 case CPU_DOWN_PREPARE_FROZEN
:
798 case CPU_UP_CANCELED
:
799 case CPU_UP_CANCELED_FROZEN
:
800 if (!pinst_has_cpu(pinst
, cpu
))
802 mutex_lock(&pinst
->lock
);
803 err
= __padata_remove_cpu(pinst
, cpu
);
804 mutex_unlock(&pinst
->lock
);
806 return notifier_from_errno(err
);
814 static void __padata_free(struct padata_instance
*pinst
)
816 #ifdef CONFIG_HOTPLUG_CPU
817 unregister_hotcpu_notifier(&pinst
->cpu_notifier
);
821 padata_free_pd(pinst
->pd
);
822 free_cpumask_var(pinst
->cpumask
.pcpu
);
823 free_cpumask_var(pinst
->cpumask
.cbcpu
);
827 #define kobj2pinst(_kobj) \
828 container_of(_kobj, struct padata_instance, kobj)
829 #define attr2pentry(_attr) \
830 container_of(_attr, struct padata_sysfs_entry, attr)
832 static void padata_sysfs_release(struct kobject
*kobj
)
834 struct padata_instance
*pinst
= kobj2pinst(kobj
);
835 __padata_free(pinst
);
838 struct padata_sysfs_entry
{
839 struct attribute attr
;
840 ssize_t (*show
)(struct padata_instance
*, struct attribute
*, char *);
841 ssize_t (*store
)(struct padata_instance
*, struct attribute
*,
842 const char *, size_t);
845 static ssize_t
show_cpumask(struct padata_instance
*pinst
,
846 struct attribute
*attr
, char *buf
)
848 struct cpumask
*cpumask
;
851 mutex_lock(&pinst
->lock
);
852 if (!strcmp(attr
->name
, "serial_cpumask"))
853 cpumask
= pinst
->cpumask
.cbcpu
;
855 cpumask
= pinst
->cpumask
.pcpu
;
857 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n",
858 nr_cpu_ids
, cpumask_bits(cpumask
));
859 mutex_unlock(&pinst
->lock
);
860 return len
< PAGE_SIZE
? len
: -EINVAL
;
863 static ssize_t
store_cpumask(struct padata_instance
*pinst
,
864 struct attribute
*attr
,
865 const char *buf
, size_t count
)
867 cpumask_var_t new_cpumask
;
871 if (!alloc_cpumask_var(&new_cpumask
, GFP_KERNEL
))
874 ret
= bitmap_parse(buf
, count
, cpumask_bits(new_cpumask
),
879 mask_type
= !strcmp(attr
->name
, "serial_cpumask") ?
880 PADATA_CPU_SERIAL
: PADATA_CPU_PARALLEL
;
881 ret
= padata_set_cpumask(pinst
, mask_type
, new_cpumask
);
886 free_cpumask_var(new_cpumask
);
890 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
891 static struct padata_sysfs_entry _name##_attr = \
892 __ATTR(_name, 0644, _show_name, _store_name)
893 #define PADATA_ATTR_RO(_name, _show_name) \
894 static struct padata_sysfs_entry _name##_attr = \
895 __ATTR(_name, 0400, _show_name, NULL)
897 PADATA_ATTR_RW(serial_cpumask
, show_cpumask
, store_cpumask
);
898 PADATA_ATTR_RW(parallel_cpumask
, show_cpumask
, store_cpumask
);
901 * Padata sysfs provides the following objects:
902 * serial_cpumask [RW] - cpumask for serial workers
903 * parallel_cpumask [RW] - cpumask for parallel workers
905 static struct attribute
*padata_default_attrs
[] = {
906 &serial_cpumask_attr
.attr
,
907 ¶llel_cpumask_attr
.attr
,
911 static ssize_t
padata_sysfs_show(struct kobject
*kobj
,
912 struct attribute
*attr
, char *buf
)
914 struct padata_instance
*pinst
;
915 struct padata_sysfs_entry
*pentry
;
918 pinst
= kobj2pinst(kobj
);
919 pentry
= attr2pentry(attr
);
921 ret
= pentry
->show(pinst
, attr
, buf
);
926 static ssize_t
padata_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
927 const char *buf
, size_t count
)
929 struct padata_instance
*pinst
;
930 struct padata_sysfs_entry
*pentry
;
933 pinst
= kobj2pinst(kobj
);
934 pentry
= attr2pentry(attr
);
936 ret
= pentry
->store(pinst
, attr
, buf
, count
);
941 static const struct sysfs_ops padata_sysfs_ops
= {
942 .show
= padata_sysfs_show
,
943 .store
= padata_sysfs_store
,
946 static struct kobj_type padata_attr_type
= {
947 .sysfs_ops
= &padata_sysfs_ops
,
948 .default_attrs
= padata_default_attrs
,
949 .release
= padata_sysfs_release
,
953 * padata_alloc_possible - Allocate and initialize padata instance.
954 * Use the cpu_possible_mask for serial and
957 * @wq: workqueue to use for the allocated padata instance
959 struct padata_instance
*padata_alloc_possible(struct workqueue_struct
*wq
)
961 return padata_alloc(wq
, cpu_possible_mask
, cpu_possible_mask
);
963 EXPORT_SYMBOL(padata_alloc_possible
);
966 * padata_alloc - allocate and initialize a padata instance and specify
967 * cpumasks for serial and parallel workers.
969 * @wq: workqueue to use for the allocated padata instance
970 * @pcpumask: cpumask that will be used for padata parallelization
971 * @cbcpumask: cpumask that will be used for padata serialization
973 struct padata_instance
*padata_alloc(struct workqueue_struct
*wq
,
974 const struct cpumask
*pcpumask
,
975 const struct cpumask
*cbcpumask
)
977 struct padata_instance
*pinst
;
978 struct parallel_data
*pd
= NULL
;
980 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
985 if (!alloc_cpumask_var(&pinst
->cpumask
.pcpu
, GFP_KERNEL
))
987 if (!alloc_cpumask_var(&pinst
->cpumask
.cbcpu
, GFP_KERNEL
)) {
988 free_cpumask_var(pinst
->cpumask
.pcpu
);
991 if (!padata_validate_cpumask(pinst
, pcpumask
) ||
992 !padata_validate_cpumask(pinst
, cbcpumask
))
995 pd
= padata_alloc_pd(pinst
, pcpumask
, cbcpumask
);
999 rcu_assign_pointer(pinst
->pd
, pd
);
1003 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
1004 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
1010 BLOCKING_INIT_NOTIFIER_HEAD(&pinst
->cpumask_change_notifier
);
1011 kobject_init(&pinst
->kobj
, &padata_attr_type
);
1012 mutex_init(&pinst
->lock
);
1014 #ifdef CONFIG_HOTPLUG_CPU
1015 pinst
->cpu_notifier
.notifier_call
= padata_cpu_callback
;
1016 pinst
->cpu_notifier
.priority
= 0;
1017 register_hotcpu_notifier(&pinst
->cpu_notifier
);
1023 free_cpumask_var(pinst
->cpumask
.pcpu
);
1024 free_cpumask_var(pinst
->cpumask
.cbcpu
);
1033 * padata_free - free a padata instance
1035 * @padata_inst: padata instance to free
1037 void padata_free(struct padata_instance
*pinst
)
1039 kobject_put(&pinst
->kobj
);
1041 EXPORT_SYMBOL(padata_free
);