Commit | Line | Data |
---|---|---|
6e0534f2 GH |
1 | /* |
2 | * kernel/sched_cpupri.c | |
3 | * | |
4 | * CPU priority management | |
5 | * | |
6 | * Copyright (C) 2007-2008 Novell | |
7 | * | |
8 | * Author: Gregory Haskins <ghaskins@novell.com> | |
9 | * | |
10 | * This code tracks the priority of each CPU so that global migration | |
11 | * decisions are easy to calculate. Each CPU can be in a state as follows: | |
12 | * | |
13 | * (INVALID), IDLE, NORMAL, RT1, ... RT99 | |
14 | * | |
15 | * going from the lowest priority to the highest. CPUs in the INVALID state | |
16 | * are not eligible for routing. The system maintains this state with | |
17 | * a 2 dimensional bitmap (the first for priority class, the second for cpus | |
18 | * in that class). Therefore a typical application without affinity | |
19 | * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit | |
20 | * searches). For tasks with affinity restrictions, the algorithm has a | |
21 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that | |
22 | * yields the worst case search is fairly contrived. | |
23 | * | |
24 | * This program is free software; you can redistribute it and/or | |
25 | * modify it under the terms of the GNU General Public License | |
26 | * as published by the Free Software Foundation; version 2 | |
27 | * of the License. | |
28 | */ | |
29 | ||
30 | #include "sched_cpupri.h" | |
31 | ||
32 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ | |
33 | static int convert_prio(int prio) | |
34 | { | |
35 | int cpupri; | |
36 | ||
37 | if (prio == CPUPRI_INVALID) | |
38 | cpupri = CPUPRI_INVALID; | |
39 | else if (prio == MAX_PRIO) | |
40 | cpupri = CPUPRI_IDLE; | |
41 | else if (prio >= MAX_RT_PRIO) | |
42 | cpupri = CPUPRI_NORMAL; | |
43 | else | |
44 | cpupri = MAX_RT_PRIO - prio + 1; | |
45 | ||
46 | return cpupri; | |
47 | } | |
48 | ||
49 | #define for_each_cpupri_active(array, idx) \ | |
50 | for (idx = find_first_bit(array, CPUPRI_NR_PRIORITIES); \ | |
51 | idx < CPUPRI_NR_PRIORITIES; \ | |
52 | idx = find_next_bit(array, CPUPRI_NR_PRIORITIES, idx+1)) | |
53 | ||
54 | /** | |
55 | * cpupri_find - find the best (lowest-pri) CPU in the system | |
56 | * @cp: The cpupri context | |
57 | * @p: The task | |
58 | * @lowest_mask: A mask to fill in with selected CPUs | |
59 | * | |
60 | * Note: This function returns the recommended CPUs as calculated during the | |
61 | * current invokation. By the time the call returns, the CPUs may have in | |
62 | * fact changed priorities any number of times. While not ideal, it is not | |
63 | * an issue of correctness since the normal rebalancer logic will correct | |
64 | * any discrepancies created by racing against the uncertainty of the current | |
65 | * priority configuration. | |
66 | * | |
67 | * Returns: (int)bool - CPUs were found | |
68 | */ | |
69 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | |
68e74568 | 70 | struct cpumask *lowest_mask) |
6e0534f2 GH |
71 | { |
72 | int idx = 0; | |
73 | int task_pri = convert_prio(p->prio); | |
74 | ||
75 | for_each_cpupri_active(cp->pri_active, idx) { | |
76 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; | |
6e0534f2 GH |
77 | |
78 | if (idx >= task_pri) | |
79 | break; | |
80 | ||
68e74568 | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
6e0534f2 GH |
82 | continue; |
83 | ||
68e74568 | 84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
6e0534f2 GH |
85 | return 1; |
86 | } | |
87 | ||
88 | return 0; | |
89 | } | |
90 | ||
91 | /** | |
92 | * cpupri_set - update the cpu priority setting | |
93 | * @cp: The cpupri context | |
94 | * @cpu: The target cpu | |
95 | * @pri: The priority (INVALID-RT99) to assign to this CPU | |
96 | * | |
97 | * Note: Assumes cpu_rq(cpu)->lock is locked | |
98 | * | |
99 | * Returns: (void) | |
100 | */ | |
101 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |
102 | { | |
103 | int *currpri = &cp->cpu_to_pri[cpu]; | |
104 | int oldpri = *currpri; | |
105 | unsigned long flags; | |
106 | ||
107 | newpri = convert_prio(newpri); | |
108 | ||
109 | BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); | |
110 | ||
111 | if (newpri == oldpri) | |
112 | return; | |
113 | ||
114 | /* | |
115 | * If the cpu was currently mapped to a different value, we | |
116 | * first need to unmap the old value | |
117 | */ | |
118 | if (likely(oldpri != CPUPRI_INVALID)) { | |
119 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | |
120 | ||
121 | spin_lock_irqsave(&vec->lock, flags); | |
122 | ||
123 | vec->count--; | |
124 | if (!vec->count) | |
125 | clear_bit(oldpri, cp->pri_active); | |
68e74568 | 126 | cpumask_clear_cpu(cpu, vec->mask); |
6e0534f2 GH |
127 | |
128 | spin_unlock_irqrestore(&vec->lock, flags); | |
129 | } | |
130 | ||
131 | if (likely(newpri != CPUPRI_INVALID)) { | |
132 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; | |
133 | ||
134 | spin_lock_irqsave(&vec->lock, flags); | |
135 | ||
68e74568 | 136 | cpumask_set_cpu(cpu, vec->mask); |
6e0534f2 GH |
137 | vec->count++; |
138 | if (vec->count == 1) | |
139 | set_bit(newpri, cp->pri_active); | |
140 | ||
141 | spin_unlock_irqrestore(&vec->lock, flags); | |
142 | } | |
143 | ||
144 | *currpri = newpri; | |
145 | } | |
146 | ||
147 | /** | |
148 | * cpupri_init - initialize the cpupri structure | |
149 | * @cp: The cpupri context | |
68e74568 | 150 | * @bootmem: true if allocations need to use bootmem |
6e0534f2 | 151 | * |
68e74568 | 152 | * Returns: -ENOMEM if memory fails. |
6e0534f2 | 153 | */ |
db2f59c8 | 154 | int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) |
6e0534f2 GH |
155 | { |
156 | int i; | |
157 | ||
158 | memset(cp, 0, sizeof(*cp)); | |
159 | ||
160 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { | |
161 | struct cpupri_vec *vec = &cp->pri_to_cpu[i]; | |
162 | ||
163 | spin_lock_init(&vec->lock); | |
164 | vec->count = 0; | |
68e74568 RR |
165 | if (bootmem) |
166 | alloc_bootmem_cpumask_var(&vec->mask); | |
167 | else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) | |
168 | goto cleanup; | |
6e0534f2 GH |
169 | } |
170 | ||
171 | for_each_possible_cpu(i) | |
172 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | |
68e74568 RR |
173 | return 0; |
174 | ||
175 | cleanup: | |
176 | for (i--; i >= 0; i--) | |
177 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
178 | return -ENOMEM; | |
6e0534f2 GH |
179 | } |
180 | ||
68e74568 RR |
181 | /** |
182 | * cpupri_cleanup - clean up the cpupri structure | |
183 | * @cp: The cpupri context | |
184 | */ | |
185 | void cpupri_cleanup(struct cpupri *cp) | |
186 | { | |
187 | int i; | |
6e0534f2 | 188 | |
68e74568 RR |
189 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
190 | free_cpumask_var(cp->pri_to_cpu[i].mask); | |
191 | } |