Commit | Line | Data |
---|---|---|
5823d089 AS |
1 | /* |
2 | * Intel MID Power Management Unit (PWRMU) device driver | |
3 | * | |
4 | * Copyright (C) 2016, Intel Corporation | |
5 | * | |
6 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * Intel MID Power Management Unit device driver handles the South Complex PCI | |
13 | * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core | |
14 | * modifies bits in PMCSR register in the PCI configuration space. This is not | |
15 | * enough on some SoCs like Intel Tangier. In such case PCI core sets a new | |
16 | * power state of the device in question through a PM hook registered in struct | |
17 | * pci_platform_pm_ops (see drivers/pci/pci-mid.c). | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
21 | ||
22 | #include <linux/delay.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/mutex.h> | |
29 | #include <linux/pci.h> | |
30 | ||
31 | #include <asm/intel-mid.h> | |
32 | ||
33 | /* Registers */ | |
34 | #define PM_STS 0x00 | |
35 | #define PM_CMD 0x04 | |
36 | #define PM_ICS 0x08 | |
37 | #define PM_WKC(x) (0x10 + (x) * 4) | |
38 | #define PM_WKS(x) (0x18 + (x) * 4) | |
39 | #define PM_SSC(x) (0x20 + (x) * 4) | |
40 | #define PM_SSS(x) (0x30 + (x) * 4) | |
41 | ||
42 | /* Bits in PM_STS */ | |
43 | #define PM_STS_BUSY (1 << 8) | |
44 | ||
45 | /* Bits in PM_CMD */ | |
46 | #define PM_CMD_CMD(x) ((x) << 0) | |
47 | #define PM_CMD_IOC (1 << 8) | |
48 | #define PM_CMD_D3cold (1 << 21) | |
49 | ||
50 | /* List of commands */ | |
51 | #define CMD_SET_CFG 0x01 | |
52 | ||
53 | /* Bits in PM_ICS */ | |
54 | #define PM_ICS_INT_STATUS(x) ((x) & 0xff) | |
55 | #define PM_ICS_IE (1 << 8) | |
56 | #define PM_ICS_IP (1 << 9) | |
57 | #define PM_ICS_SW_INT_STS (1 << 10) | |
58 | ||
59 | /* List of interrupts */ | |
60 | #define INT_INVALID 0 | |
61 | #define INT_CMD_COMPLETE 1 | |
62 | #define INT_CMD_ERR 2 | |
63 | #define INT_WAKE_EVENT 3 | |
64 | #define INT_LSS_POWER_ERR 4 | |
65 | #define INT_S0iX_MSG_ERR 5 | |
66 | #define INT_NO_C6 6 | |
67 | #define INT_TRIGGER_ERR 7 | |
68 | #define INT_INACTIVITY 8 | |
69 | ||
70 | /* South Complex devices */ | |
71 | #define LSS_MAX_SHARED_DEVS 4 | |
72 | #define LSS_MAX_DEVS 64 | |
73 | ||
74 | #define LSS_WS_BITS 1 /* wake state width */ | |
75 | #define LSS_PWS_BITS 2 /* power state width */ | |
76 | ||
77 | /* Supported device IDs */ | |
ca22312d | 78 | #define PCI_DEVICE_ID_PENWELL 0x0828 |
5823d089 AS |
79 | #define PCI_DEVICE_ID_TANGIER 0x11a1 |
80 | ||
81 | struct mid_pwr_dev { | |
82 | struct pci_dev *pdev; | |
83 | pci_power_t state; | |
84 | }; | |
85 | ||
86 | struct mid_pwr { | |
87 | struct device *dev; | |
88 | void __iomem *regs; | |
89 | int irq; | |
90 | bool available; | |
91 | ||
92 | struct mutex lock; | |
93 | struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS]; | |
94 | }; | |
95 | ||
96 | static struct mid_pwr *midpwr; | |
97 | ||
98 | static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg) | |
99 | { | |
100 | return readl(pwr->regs + PM_SSS(reg)); | |
101 | } | |
102 | ||
103 | static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value) | |
104 | { | |
105 | writel(value, pwr->regs + PM_SSC(reg)); | |
106 | } | |
107 | ||
108 | static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value) | |
109 | { | |
110 | writel(value, pwr->regs + PM_WKC(reg)); | |
111 | } | |
112 | ||
113 | static void mid_pwr_interrupt_disable(struct mid_pwr *pwr) | |
114 | { | |
115 | writel(~PM_ICS_IE, pwr->regs + PM_ICS); | |
116 | } | |
117 | ||
118 | static bool mid_pwr_is_busy(struct mid_pwr *pwr) | |
119 | { | |
120 | return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY); | |
121 | } | |
122 | ||
123 | /* Wait 500ms that the latest PWRMU command finished */ | |
124 | static int mid_pwr_wait(struct mid_pwr *pwr) | |
125 | { | |
126 | unsigned int count = 500000; | |
127 | bool busy; | |
128 | ||
129 | do { | |
130 | busy = mid_pwr_is_busy(pwr); | |
131 | if (!busy) | |
132 | return 0; | |
133 | udelay(1); | |
134 | } while (--count); | |
135 | ||
136 | return -EBUSY; | |
137 | } | |
138 | ||
139 | static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd) | |
140 | { | |
141 | writel(PM_CMD_CMD(cmd), pwr->regs + PM_CMD); | |
142 | return mid_pwr_wait(pwr); | |
143 | } | |
144 | ||
145 | static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new) | |
146 | { | |
147 | int curstate; | |
148 | u32 power; | |
149 | int ret; | |
150 | ||
151 | /* Check if the device is already in desired state */ | |
152 | power = mid_pwr_get_state(pwr, reg); | |
153 | curstate = (power >> bit) & 3; | |
154 | if (curstate == new) | |
155 | return 0; | |
156 | ||
157 | /* Update the power state */ | |
158 | mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit)); | |
159 | ||
160 | /* Send command to SCU */ | |
161 | ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG); | |
162 | if (ret) | |
163 | return ret; | |
164 | ||
165 | /* Check if the device is already in desired state */ | |
166 | power = mid_pwr_get_state(pwr, reg); | |
167 | curstate = (power >> bit) & 3; | |
168 | if (curstate != new) | |
169 | return -EAGAIN; | |
170 | ||
171 | return 0; | |
172 | } | |
173 | ||
174 | static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss, | |
175 | struct pci_dev *pdev, | |
176 | pci_power_t state) | |
177 | { | |
178 | pci_power_t weakest = PCI_D3hot; | |
179 | unsigned int j; | |
180 | ||
181 | /* Find device in cache or first free cell */ | |
182 | for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) { | |
183 | if (lss[j].pdev == pdev || !lss[j].pdev) | |
184 | break; | |
185 | } | |
186 | ||
187 | /* Store the desired state in cache */ | |
188 | if (j < LSS_MAX_SHARED_DEVS) { | |
189 | lss[j].pdev = pdev; | |
190 | lss[j].state = state; | |
191 | } else { | |
192 | dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n"); | |
193 | weakest = state; | |
194 | } | |
195 | ||
196 | /* Find the power state we may use */ | |
197 | for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) { | |
198 | if (lss[j].state < weakest) | |
199 | weakest = lss[j].state; | |
200 | } | |
201 | ||
202 | return weakest; | |
203 | } | |
204 | ||
205 | static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev, | |
206 | pci_power_t state, int id, int reg, int bit) | |
207 | { | |
208 | const char *name; | |
209 | int ret; | |
210 | ||
211 | state = __find_weakest_power_state(pwr->lss[id], pdev, state); | |
212 | name = pci_power_name(state); | |
213 | ||
214 | ret = __update_power_state(pwr, reg, bit, (__force int)state); | |
215 | if (ret) { | |
216 | dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret); | |
217 | return ret; | |
218 | } | |
219 | ||
220 | dev_vdbg(&pdev->dev, "Set power state %s\n", name); | |
221 | return 0; | |
222 | } | |
223 | ||
224 | static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev, | |
225 | pci_power_t state) | |
226 | { | |
227 | int id, reg, bit; | |
228 | int ret; | |
229 | ||
230 | id = intel_mid_pwr_get_lss_id(pdev); | |
231 | if (id < 0) | |
232 | return id; | |
233 | ||
234 | reg = (id * LSS_PWS_BITS) / 32; | |
235 | bit = (id * LSS_PWS_BITS) % 32; | |
236 | ||
237 | /* We support states between PCI_D0 and PCI_D3hot */ | |
238 | if (state < PCI_D0) | |
239 | state = PCI_D0; | |
240 | if (state > PCI_D3hot) | |
241 | state = PCI_D3hot; | |
242 | ||
243 | mutex_lock(&pwr->lock); | |
244 | ret = __set_power_state(pwr, pdev, state, id, reg, bit); | |
245 | mutex_unlock(&pwr->lock); | |
246 | return ret; | |
247 | } | |
248 | ||
249 | int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) | |
250 | { | |
251 | struct mid_pwr *pwr = midpwr; | |
252 | int ret = 0; | |
253 | ||
254 | might_sleep(); | |
255 | ||
256 | if (pwr && pwr->available) | |
257 | ret = mid_pwr_set_power_state(pwr, pdev, state); | |
258 | dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret); | |
259 | ||
260 | return 0; | |
261 | } | |
262 | EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); | |
263 | ||
264 | int intel_mid_pwr_get_lss_id(struct pci_dev *pdev) | |
265 | { | |
266 | int vndr; | |
267 | u8 id; | |
268 | ||
269 | /* | |
270 | * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of | |
271 | * Vendor capability. | |
272 | */ | |
273 | vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); | |
274 | if (!vndr) | |
275 | return -EINVAL; | |
276 | ||
277 | /* Read the Logical SubSystem ID byte */ | |
278 | pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id); | |
279 | if (!(id & INTEL_MID_PWR_LSS_TYPE)) | |
280 | return -ENODEV; | |
281 | ||
282 | id &= ~INTEL_MID_PWR_LSS_TYPE; | |
283 | if (id >= LSS_MAX_DEVS) | |
284 | return -ERANGE; | |
285 | ||
286 | return id; | |
287 | } | |
288 | ||
289 | static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id) | |
290 | { | |
291 | struct mid_pwr *pwr = dev_id; | |
292 | u32 ics; | |
293 | ||
294 | ics = readl(pwr->regs + PM_ICS); | |
295 | if (!(ics & PM_ICS_IP)) | |
296 | return IRQ_NONE; | |
297 | ||
298 | writel(ics | PM_ICS_IP, pwr->regs + PM_ICS); | |
299 | ||
300 | dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics)); | |
301 | return IRQ_HANDLED; | |
302 | } | |
303 | ||
304 | struct mid_pwr_device_info { | |
305 | int (*set_initial_state)(struct mid_pwr *pwr); | |
306 | }; | |
307 | ||
308 | static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
309 | { | |
310 | struct mid_pwr_device_info *info = (void *)id->driver_data; | |
311 | struct device *dev = &pdev->dev; | |
312 | struct mid_pwr *pwr; | |
313 | int ret; | |
314 | ||
315 | ret = pcim_enable_device(pdev); | |
316 | if (ret < 0) { | |
317 | dev_err(&pdev->dev, "error: could not enable device\n"); | |
318 | return ret; | |
319 | } | |
320 | ||
321 | ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); | |
322 | if (ret) { | |
323 | dev_err(&pdev->dev, "I/O memory remapping failed\n"); | |
324 | return ret; | |
325 | } | |
326 | ||
327 | pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL); | |
328 | if (!pwr) | |
329 | return -ENOMEM; | |
330 | ||
331 | pwr->dev = dev; | |
332 | pwr->regs = pcim_iomap_table(pdev)[0]; | |
333 | pwr->irq = pdev->irq; | |
334 | ||
335 | mutex_init(&pwr->lock); | |
336 | ||
337 | /* Disable interrupts */ | |
338 | mid_pwr_interrupt_disable(pwr); | |
339 | ||
340 | if (info && info->set_initial_state) { | |
341 | ret = info->set_initial_state(pwr); | |
342 | if (ret) | |
343 | dev_warn(dev, "Can't set initial state: %d\n", ret); | |
344 | } | |
345 | ||
346 | ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler, | |
347 | IRQF_NO_SUSPEND, pci_name(pdev), pwr); | |
348 | if (ret) | |
349 | return ret; | |
350 | ||
351 | pwr->available = true; | |
352 | midpwr = pwr; | |
353 | ||
354 | pci_set_drvdata(pdev, pwr); | |
355 | return 0; | |
356 | } | |
357 | ||
ca22312d | 358 | static int mid_set_initial_state(struct mid_pwr *pwr) |
5823d089 AS |
359 | { |
360 | unsigned int i, j; | |
361 | int ret; | |
362 | ||
363 | /* | |
364 | * Enable wake events. | |
365 | * | |
366 | * PWRMU supports up to 32 sources for wake up the system. Ungate them | |
367 | * all here. | |
368 | */ | |
369 | mid_pwr_set_wake(pwr, 0, 0xffffffff); | |
370 | mid_pwr_set_wake(pwr, 1, 0xffffffff); | |
371 | ||
372 | /* | |
373 | * Power off South Complex devices. | |
374 | * | |
375 | * There is a map (see a note below) of 64 devices with 2 bits per each | |
376 | * on 32-bit HW registers. The following calls set all devices to one | |
377 | * known initial state, i.e. PCI_D3hot. This is done in conjunction | |
378 | * with PMCSR setting in arch/x86/pci/intel_mid_pci.c. | |
379 | * | |
380 | * NOTE: The actual device mapping is provided by a platform at run | |
381 | * time using vendor capability of PCI configuration space. | |
382 | */ | |
383 | mid_pwr_set_state(pwr, 0, 0xffffffff); | |
384 | mid_pwr_set_state(pwr, 1, 0xffffffff); | |
385 | mid_pwr_set_state(pwr, 2, 0xffffffff); | |
386 | mid_pwr_set_state(pwr, 3, 0xffffffff); | |
387 | ||
388 | /* Send command to SCU */ | |
389 | ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG); | |
390 | if (ret) | |
391 | return ret; | |
392 | ||
393 | for (i = 0; i < LSS_MAX_DEVS; i++) { | |
394 | for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) | |
395 | pwr->lss[i][j].state = PCI_D3hot; | |
396 | } | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
ca22312d AS |
401 | static const struct mid_pwr_device_info mid_info = { |
402 | .set_initial_state = mid_set_initial_state, | |
5823d089 AS |
403 | }; |
404 | ||
405 | static const struct pci_device_id mid_pwr_pci_ids[] = { | |
ca22312d AS |
406 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&mid_info }, |
407 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&mid_info }, | |
5823d089 AS |
408 | {} |
409 | }; | |
410 | MODULE_DEVICE_TABLE(pci, mid_pwr_pci_ids); | |
411 | ||
412 | static struct pci_driver mid_pwr_pci_driver = { | |
413 | .name = "intel_mid_pwr", | |
414 | .probe = mid_pwr_probe, | |
415 | .id_table = mid_pwr_pci_ids, | |
416 | }; | |
417 | ||
418 | builtin_pci_driver(mid_pwr_pci_driver); |