Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / dca / dca-core.c
1 /*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This driver supports an interface for DCA clients and providers to meet.
24 */
25
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
31
32 #define DCA_VERSION "1.12.1"
33
34 MODULE_VERSION(DCA_VERSION);
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Intel Corporation");
37
38 static DEFINE_SPINLOCK(dca_lock);
39
40 static LIST_HEAD(dca_domains);
41
42 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44 static int dca_providers_blocked;
45
46 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
47 {
48 struct pci_dev *pdev = to_pci_dev(dev);
49 struct pci_bus *bus = pdev->bus;
50
51 while (bus->parent)
52 bus = bus->parent;
53
54 return bus;
55 }
56
57 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
58 {
59 struct dca_domain *domain;
60
61 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
62 if (!domain)
63 return NULL;
64
65 INIT_LIST_HEAD(&domain->dca_providers);
66 domain->pci_rc = rc;
67
68 return domain;
69 }
70
71 static void dca_free_domain(struct dca_domain *domain)
72 {
73 list_del(&domain->node);
74 kfree(domain);
75 }
76
77 static int dca_provider_ioat_ver_3_0(struct device *dev)
78 {
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90 }
91
92 static void unregister_dca_providers(void)
93 {
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113
114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115 list_move(&dca->node, &unregistered_providers);
116
117 dca_free_domain(domain);
118
119 spin_unlock_irqrestore(&dca_lock, flags);
120
121 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
122 dca_sysfs_remove_provider(dca);
123 list_del(&dca->node);
124 }
125 }
126
127 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
128 {
129 struct dca_domain *domain;
130
131 list_for_each_entry(domain, &dca_domains, node)
132 if (domain->pci_rc == rc)
133 return domain;
134
135 return NULL;
136 }
137
138 static struct dca_domain *dca_get_domain(struct device *dev)
139 {
140 struct pci_bus *rc;
141 struct dca_domain *domain;
142
143 rc = dca_pci_rc_from_dev(dev);
144 domain = dca_find_domain(rc);
145
146 if (!domain) {
147 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
148 dca_providers_blocked = 1;
149 } else {
150 domain = dca_allocate_domain(rc);
151 if (domain)
152 list_add(&domain->node, &dca_domains);
153 }
154 }
155
156 return domain;
157 }
158
159 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
160 {
161 struct dca_provider *dca;
162 struct pci_bus *rc;
163 struct dca_domain *domain;
164
165 if (dev) {
166 rc = dca_pci_rc_from_dev(dev);
167 domain = dca_find_domain(rc);
168 if (!domain)
169 return NULL;
170 } else {
171 if (!list_empty(&dca_domains))
172 domain = list_first_entry(&dca_domains,
173 struct dca_domain,
174 node);
175 else
176 return NULL;
177 }
178
179 list_for_each_entry(dca, &domain->dca_providers, node)
180 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
181 return dca;
182
183 return NULL;
184 }
185
186 /**
187 * dca_add_requester - add a dca client to the list
188 * @dev - the device that wants dca service
189 */
190 int dca_add_requester(struct device *dev)
191 {
192 struct dca_provider *dca;
193 int err, slot = -ENODEV;
194 unsigned long flags;
195 struct pci_bus *pci_rc;
196 struct dca_domain *domain;
197
198 if (!dev)
199 return -EFAULT;
200
201 spin_lock_irqsave(&dca_lock, flags);
202
203 /* check if the requester has not been added already */
204 dca = dca_find_provider_by_dev(dev);
205 if (dca) {
206 spin_unlock_irqrestore(&dca_lock, flags);
207 return -EEXIST;
208 }
209
210 pci_rc = dca_pci_rc_from_dev(dev);
211 domain = dca_find_domain(pci_rc);
212 if (!domain) {
213 spin_unlock_irqrestore(&dca_lock, flags);
214 return -ENODEV;
215 }
216
217 list_for_each_entry(dca, &domain->dca_providers, node) {
218 slot = dca->ops->add_requester(dca, dev);
219 if (slot >= 0)
220 break;
221 }
222
223 spin_unlock_irqrestore(&dca_lock, flags);
224
225 if (slot < 0)
226 return slot;
227
228 err = dca_sysfs_add_req(dca, dev, slot);
229 if (err) {
230 spin_lock_irqsave(&dca_lock, flags);
231 if (dca == dca_find_provider_by_dev(dev))
232 dca->ops->remove_requester(dca, dev);
233 spin_unlock_irqrestore(&dca_lock, flags);
234 return err;
235 }
236
237 return 0;
238 }
239 EXPORT_SYMBOL_GPL(dca_add_requester);
240
241 /**
242 * dca_remove_requester - remove a dca client from the list
243 * @dev - the device that wants dca service
244 */
245 int dca_remove_requester(struct device *dev)
246 {
247 struct dca_provider *dca;
248 int slot;
249 unsigned long flags;
250
251 if (!dev)
252 return -EFAULT;
253
254 spin_lock_irqsave(&dca_lock, flags);
255 dca = dca_find_provider_by_dev(dev);
256 if (!dca) {
257 spin_unlock_irqrestore(&dca_lock, flags);
258 return -ENODEV;
259 }
260 slot = dca->ops->remove_requester(dca, dev);
261 spin_unlock_irqrestore(&dca_lock, flags);
262
263 if (slot < 0)
264 return slot;
265
266 dca_sysfs_remove_req(dca, slot);
267
268 return 0;
269 }
270 EXPORT_SYMBOL_GPL(dca_remove_requester);
271
272 /**
273 * dca_common_get_tag - return the dca tag (serves both new and old api)
274 * @dev - the device that wants dca service
275 * @cpu - the cpuid as returned by get_cpu()
276 */
277 u8 dca_common_get_tag(struct device *dev, int cpu)
278 {
279 struct dca_provider *dca;
280 u8 tag;
281 unsigned long flags;
282
283 spin_lock_irqsave(&dca_lock, flags);
284
285 dca = dca_find_provider_by_dev(dev);
286 if (!dca) {
287 spin_unlock_irqrestore(&dca_lock, flags);
288 return -ENODEV;
289 }
290 tag = dca->ops->get_tag(dca, dev, cpu);
291
292 spin_unlock_irqrestore(&dca_lock, flags);
293 return tag;
294 }
295
296 /**
297 * dca3_get_tag - return the dca tag to the requester device
298 * for the given cpu (new api)
299 * @dev - the device that wants dca service
300 * @cpu - the cpuid as returned by get_cpu()
301 */
302 u8 dca3_get_tag(struct device *dev, int cpu)
303 {
304 if (!dev)
305 return -EFAULT;
306
307 return dca_common_get_tag(dev, cpu);
308 }
309 EXPORT_SYMBOL_GPL(dca3_get_tag);
310
311 /**
312 * dca_get_tag - return the dca tag for the given cpu (old api)
313 * @cpu - the cpuid as returned by get_cpu()
314 */
315 u8 dca_get_tag(int cpu)
316 {
317 struct device *dev = NULL;
318
319 return dca_common_get_tag(dev, cpu);
320 }
321 EXPORT_SYMBOL_GPL(dca_get_tag);
322
323 /**
324 * alloc_dca_provider - get data struct for describing a dca provider
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
329 {
330 struct dca_provider *dca;
331 int alloc_size;
332
333 alloc_size = (sizeof(*dca) + priv_size);
334 dca = kzalloc(alloc_size, GFP_KERNEL);
335 if (!dca)
336 return NULL;
337 dca->ops = ops;
338
339 return dca;
340 }
341 EXPORT_SYMBOL_GPL(alloc_dca_provider);
342
343 /**
344 * free_dca_provider - release the dca provider data struct
345 * @ops - pointer to struct of dca operation function pointers
346 * @priv_size - size of extra mem to be added for provider's needs
347 */
348 void free_dca_provider(struct dca_provider *dca)
349 {
350 kfree(dca);
351 }
352 EXPORT_SYMBOL_GPL(free_dca_provider);
353
354 /**
355 * register_dca_provider - register a dca provider
356 * @dca - struct created by alloc_dca_provider()
357 * @dev - device providing dca services
358 */
359 int register_dca_provider(struct dca_provider *dca, struct device *dev)
360 {
361 int err;
362 unsigned long flags;
363 struct dca_domain *domain;
364
365 spin_lock_irqsave(&dca_lock, flags);
366 if (dca_providers_blocked) {
367 spin_unlock_irqrestore(&dca_lock, flags);
368 return -ENODEV;
369 }
370 spin_unlock_irqrestore(&dca_lock, flags);
371
372 err = dca_sysfs_add_provider(dca, dev);
373 if (err)
374 return err;
375
376 spin_lock_irqsave(&dca_lock, flags);
377 domain = dca_get_domain(dev);
378 if (!domain) {
379 if (dca_providers_blocked) {
380 spin_unlock_irqrestore(&dca_lock, flags);
381 dca_sysfs_remove_provider(dca);
382 unregister_dca_providers();
383 } else {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 }
386 return -ENODEV;
387 }
388 list_add(&dca->node, &domain->dca_providers);
389 spin_unlock_irqrestore(&dca_lock, flags);
390
391 blocking_notifier_call_chain(&dca_provider_chain,
392 DCA_PROVIDER_ADD, NULL);
393 return 0;
394 }
395 EXPORT_SYMBOL_GPL(register_dca_provider);
396
397 /**
398 * unregister_dca_provider - remove a dca provider
399 * @dca - struct created by alloc_dca_provider()
400 */
401 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
402 {
403 unsigned long flags;
404 struct pci_bus *pci_rc;
405 struct dca_domain *domain;
406
407 blocking_notifier_call_chain(&dca_provider_chain,
408 DCA_PROVIDER_REMOVE, NULL);
409
410 spin_lock_irqsave(&dca_lock, flags);
411
412 list_del(&dca->node);
413
414 pci_rc = dca_pci_rc_from_dev(dev);
415 domain = dca_find_domain(pci_rc);
416 if (list_empty(&domain->dca_providers))
417 dca_free_domain(domain);
418
419 spin_unlock_irqrestore(&dca_lock, flags);
420
421 dca_sysfs_remove_provider(dca);
422 }
423 EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425 /**
426 * dca_register_notify - register a client's notifier callback
427 */
428 void dca_register_notify(struct notifier_block *nb)
429 {
430 blocking_notifier_chain_register(&dca_provider_chain, nb);
431 }
432 EXPORT_SYMBOL_GPL(dca_register_notify);
433
434 /**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437 void dca_unregister_notify(struct notifier_block *nb)
438 {
439 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440 }
441 EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443 static int __init dca_init(void)
444 {
445 pr_info("dca service started, version %s\n", DCA_VERSION);
446 return dca_sysfs_init();
447 }
448
449 static void __exit dca_exit(void)
450 {
451 dca_sysfs_exit();
452 }
453
454 arch_initcall(dca_init);
455 module_exit(dca_exit);
456
This page took 0.038565 seconds and 5 git commands to generate.