Merge branch 'cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[deliverable/linux.git] / drivers / misc / mei / pci-txe.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/fs.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/sched.h>
26 #include <linux/uuid.h>
27 #include <linux/jiffies.h>
28 #include <linux/interrupt.h>
29 #include <linux/workqueue.h>
30 #include <linux/pm_domain.h>
31 #include <linux/pm_runtime.h>
32
33 #include <linux/mei.h>
34
35
36 #include "mei_dev.h"
37 #include "hw-txe.h"
38
39 static const struct pci_device_id mei_txe_pci_tbl[] = {
40 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
41 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
42
43 {0, }
44 };
45 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
46
47 #ifdef CONFIG_PM
48 static inline void mei_txe_set_pm_domain(struct mei_device *dev);
49 static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
50 #else
51 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
52 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
53 #endif /* CONFIG_PM */
54
55 static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
56 {
57 int i;
58
59 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
60 if (hw->mem_addr[i]) {
61 pci_iounmap(pdev, hw->mem_addr[i]);
62 hw->mem_addr[i] = NULL;
63 }
64 }
65 }
66 /**
67 * mei_txe_probe - Device Initialization Routine
68 *
69 * @pdev: PCI device structure
70 * @ent: entry in mei_txe_pci_tbl
71 *
72 * Return: 0 on success, <0 on failure.
73 */
74 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
75 {
76 struct mei_device *dev;
77 struct mei_txe_hw *hw;
78 int err;
79 int i;
80
81 /* enable pci dev */
82 err = pci_enable_device(pdev);
83 if (err) {
84 dev_err(&pdev->dev, "failed to enable pci device.\n");
85 goto end;
86 }
87 /* set PCI host mastering */
88 pci_set_master(pdev);
89 /* pci request regions for mei driver */
90 err = pci_request_regions(pdev, KBUILD_MODNAME);
91 if (err) {
92 dev_err(&pdev->dev, "failed to get pci regions.\n");
93 goto disable_device;
94 }
95
96 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
97 if (err) {
98 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
99 if (err) {
100 dev_err(&pdev->dev, "No suitable DMA available.\n");
101 goto release_regions;
102 }
103 }
104
105 /* allocates and initializes the mei dev structure */
106 dev = mei_txe_dev_init(pdev);
107 if (!dev) {
108 err = -ENOMEM;
109 goto release_regions;
110 }
111 hw = to_txe_hw(dev);
112
113 /* mapping IO device memory */
114 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
115 hw->mem_addr[i] = pci_iomap(pdev, i, 0);
116 if (!hw->mem_addr[i]) {
117 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
118 err = -ENOMEM;
119 goto free_device;
120 }
121 }
122
123
124 pci_enable_msi(pdev);
125
126 /* clear spurious interrupts */
127 mei_clear_interrupts(dev);
128
129 /* request and enable interrupt */
130 if (pci_dev_msi_enabled(pdev))
131 err = request_threaded_irq(pdev->irq,
132 NULL,
133 mei_txe_irq_thread_handler,
134 IRQF_ONESHOT, KBUILD_MODNAME, dev);
135 else
136 err = request_threaded_irq(pdev->irq,
137 mei_txe_irq_quick_handler,
138 mei_txe_irq_thread_handler,
139 IRQF_SHARED, KBUILD_MODNAME, dev);
140 if (err) {
141 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
142 pdev->irq);
143 goto free_device;
144 }
145
146 if (mei_start(dev)) {
147 dev_err(&pdev->dev, "init hw failure.\n");
148 err = -ENODEV;
149 goto release_irq;
150 }
151
152 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
153 pm_runtime_use_autosuspend(&pdev->dev);
154
155 err = mei_register(dev, &pdev->dev);
156 if (err)
157 goto release_irq;
158
159 pci_set_drvdata(pdev, dev);
160
161 /*
162 * For not wake-able HW runtime pm framework
163 * can't be used on pci device level.
164 * Use domain runtime pm callbacks instead.
165 */
166 if (!pci_dev_run_wake(pdev))
167 mei_txe_set_pm_domain(dev);
168
169 pm_runtime_put_noidle(&pdev->dev);
170
171 return 0;
172
173 release_irq:
174
175 mei_cancel_work(dev);
176
177 /* disable interrupts */
178 mei_disable_interrupts(dev);
179
180 free_irq(pdev->irq, dev);
181 pci_disable_msi(pdev);
182
183 free_device:
184 mei_txe_pci_iounmap(pdev, hw);
185
186 kfree(dev);
187 release_regions:
188 pci_release_regions(pdev);
189 disable_device:
190 pci_disable_device(pdev);
191 end:
192 dev_err(&pdev->dev, "initialization failed.\n");
193 return err;
194 }
195
196 /**
197 * mei_txe_remove - Device Removal Routine
198 *
199 * @pdev: PCI device structure
200 *
201 * mei_remove is called by the PCI subsystem to alert the driver
202 * that it should release a PCI device.
203 */
204 static void mei_txe_remove(struct pci_dev *pdev)
205 {
206 struct mei_device *dev;
207 struct mei_txe_hw *hw;
208
209 dev = pci_get_drvdata(pdev);
210 if (!dev) {
211 dev_err(&pdev->dev, "mei: dev =NULL\n");
212 return;
213 }
214
215 pm_runtime_get_noresume(&pdev->dev);
216
217 hw = to_txe_hw(dev);
218
219 mei_stop(dev);
220
221 if (!pci_dev_run_wake(pdev))
222 mei_txe_unset_pm_domain(dev);
223
224 /* disable interrupts */
225 mei_disable_interrupts(dev);
226 free_irq(pdev->irq, dev);
227 pci_disable_msi(pdev);
228
229 pci_set_drvdata(pdev, NULL);
230
231 mei_txe_pci_iounmap(pdev, hw);
232
233 mei_deregister(dev);
234
235 kfree(dev);
236
237 pci_release_regions(pdev);
238 pci_disable_device(pdev);
239 }
240
241
242 #ifdef CONFIG_PM_SLEEP
243 static int mei_txe_pci_suspend(struct device *device)
244 {
245 struct pci_dev *pdev = to_pci_dev(device);
246 struct mei_device *dev = pci_get_drvdata(pdev);
247
248 if (!dev)
249 return -ENODEV;
250
251 dev_dbg(&pdev->dev, "suspend\n");
252
253 mei_stop(dev);
254
255 mei_disable_interrupts(dev);
256
257 free_irq(pdev->irq, dev);
258 pci_disable_msi(pdev);
259
260 return 0;
261 }
262
263 static int mei_txe_pci_resume(struct device *device)
264 {
265 struct pci_dev *pdev = to_pci_dev(device);
266 struct mei_device *dev;
267 int err;
268
269 dev = pci_get_drvdata(pdev);
270 if (!dev)
271 return -ENODEV;
272
273 pci_enable_msi(pdev);
274
275 mei_clear_interrupts(dev);
276
277 /* request and enable interrupt */
278 if (pci_dev_msi_enabled(pdev))
279 err = request_threaded_irq(pdev->irq,
280 NULL,
281 mei_txe_irq_thread_handler,
282 IRQF_ONESHOT, KBUILD_MODNAME, dev);
283 else
284 err = request_threaded_irq(pdev->irq,
285 mei_txe_irq_quick_handler,
286 mei_txe_irq_thread_handler,
287 IRQF_SHARED, KBUILD_MODNAME, dev);
288 if (err) {
289 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
290 pdev->irq);
291 return err;
292 }
293
294 err = mei_restart(dev);
295
296 return err;
297 }
298 #endif /* CONFIG_PM_SLEEP */
299
300 #ifdef CONFIG_PM
301 static int mei_txe_pm_runtime_idle(struct device *device)
302 {
303 struct pci_dev *pdev = to_pci_dev(device);
304 struct mei_device *dev;
305
306 dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n");
307
308 dev = pci_get_drvdata(pdev);
309 if (!dev)
310 return -ENODEV;
311 if (mei_write_is_idle(dev))
312 pm_runtime_autosuspend(device);
313
314 return -EBUSY;
315 }
316 static int mei_txe_pm_runtime_suspend(struct device *device)
317 {
318 struct pci_dev *pdev = to_pci_dev(device);
319 struct mei_device *dev;
320 int ret;
321
322 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n");
323
324 dev = pci_get_drvdata(pdev);
325 if (!dev)
326 return -ENODEV;
327
328 mutex_lock(&dev->device_lock);
329
330 if (mei_write_is_idle(dev))
331 ret = mei_txe_aliveness_set_sync(dev, 0);
332 else
333 ret = -EAGAIN;
334
335 /*
336 * If everything is okay we're about to enter PCI low
337 * power state (D3) therefor we need to disable the
338 * interrupts towards host.
339 * However if device is not wakeable we do not enter
340 * D-low state and we need to keep the interrupt kicking
341 */
342 if (!ret && pci_dev_run_wake(pdev))
343 mei_disable_interrupts(dev);
344
345 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
346
347 mutex_unlock(&dev->device_lock);
348 return ret;
349 }
350
351 static int mei_txe_pm_runtime_resume(struct device *device)
352 {
353 struct pci_dev *pdev = to_pci_dev(device);
354 struct mei_device *dev;
355 int ret;
356
357 dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n");
358
359 dev = pci_get_drvdata(pdev);
360 if (!dev)
361 return -ENODEV;
362
363 mutex_lock(&dev->device_lock);
364
365 mei_enable_interrupts(dev);
366
367 ret = mei_txe_aliveness_set_sync(dev, 1);
368
369 mutex_unlock(&dev->device_lock);
370
371 dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
372
373 return ret;
374 }
375
376 /**
377 * mei_txe_set_pm_domain - fill and set pm domain structure for device
378 *
379 * @dev: mei_device
380 */
381 static inline void mei_txe_set_pm_domain(struct mei_device *dev)
382 {
383 struct pci_dev *pdev = to_pci_dev(dev->dev);
384
385 if (pdev->dev.bus && pdev->dev.bus->pm) {
386 dev->pg_domain.ops = *pdev->dev.bus->pm;
387
388 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
389 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
390 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
391
392 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
393 }
394 }
395
396 /**
397 * mei_txe_unset_pm_domain - clean pm domain structure for device
398 *
399 * @dev: mei_device
400 */
401 static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
402 {
403 /* stop using pm callbacks if any */
404 dev_pm_domain_set(dev->dev, NULL);
405 }
406
407 static const struct dev_pm_ops mei_txe_pm_ops = {
408 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
409 mei_txe_pci_resume)
410 SET_RUNTIME_PM_OPS(
411 mei_txe_pm_runtime_suspend,
412 mei_txe_pm_runtime_resume,
413 mei_txe_pm_runtime_idle)
414 };
415
416 #define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
417 #else
418 #define MEI_TXE_PM_OPS NULL
419 #endif /* CONFIG_PM */
420
421 /*
422 * PCI driver structure
423 */
424 static struct pci_driver mei_txe_driver = {
425 .name = KBUILD_MODNAME,
426 .id_table = mei_txe_pci_tbl,
427 .probe = mei_txe_probe,
428 .remove = mei_txe_remove,
429 .shutdown = mei_txe_remove,
430 .driver.pm = MEI_TXE_PM_OPS,
431 };
432
433 module_pci_driver(mei_txe_driver);
434
435 MODULE_AUTHOR("Intel Corporation");
436 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
437 MODULE_LICENSE("GPL v2");
This page took 0.046094 seconds and 6 git commands to generate.