tracing: extend sched_pi_setprio
[deliverable/linux.git] / kernel / irq / affinity.c
CommitLineData
5e385a6e
CH
1
2#include <linux/interrupt.h>
3#include <linux/kernel.h>
4#include <linux/slab.h>
5#include <linux/cpu.h>
6
7static int get_first_sibling(unsigned int cpu)
8{
9 unsigned int ret;
10
11 ret = cpumask_first(topology_sibling_cpumask(cpu));
12 if (ret < nr_cpu_ids)
13 return ret;
14 return cpu;
15}
16
17/*
18 * Take a map of online CPUs and the number of available interrupt vectors
19 * and generate an output cpumask suitable for spreading MSI/MSI-X vectors
20 * so that they are distributed as good as possible around the CPUs. If
21 * more vectors than CPUs are available we'll map one to each CPU,
22 * otherwise we map one to the first sibling of each socket.
23 *
24 * If there are more vectors than CPUs we will still only have one bit
25 * set per CPU, but interrupt code will keep on assigning the vectors from
26 * the start of the bitmap until we run out of vectors.
27 */
28struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
29{
30 struct cpumask *affinity_mask;
31 unsigned int max_vecs = *nr_vecs;
32
33 if (max_vecs == 1)
34 return NULL;
35
36 affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
37 if (!affinity_mask) {
38 *nr_vecs = 1;
39 return NULL;
40 }
41
3ee0ce2a 42 get_online_cpus();
5e385a6e
CH
43 if (max_vecs >= num_online_cpus()) {
44 cpumask_copy(affinity_mask, cpu_online_mask);
45 *nr_vecs = num_online_cpus();
46 } else {
47 unsigned int vecs = 0, cpu;
48
49 for_each_online_cpu(cpu) {
50 if (cpu == get_first_sibling(cpu)) {
51 cpumask_set_cpu(cpu, affinity_mask);
52 vecs++;
53 }
54
55 if (--max_vecs == 0)
56 break;
57 }
58 *nr_vecs = vecs;
59 }
3ee0ce2a 60 put_online_cpus();
5e385a6e
CH
61
62 return affinity_mask;
63}
This page took 0.035463 seconds and 5 git commands to generate.