Commit | Line | Data |
---|---|---|
f8beab2b MB |
1 | /* |
2 | * regmap based irq_chip | |
3 | * | |
4 | * Copyright 2011 Wolfson Microelectronics plc | |
5 | * | |
6 | * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/export.h> | |
51990e82 | 14 | #include <linux/device.h> |
f8beab2b MB |
15 | #include <linux/regmap.h> |
16 | #include <linux/irq.h> | |
17 | #include <linux/interrupt.h> | |
4af8be67 | 18 | #include <linux/irqdomain.h> |
0c00c50b | 19 | #include <linux/pm_runtime.h> |
f8beab2b MB |
20 | #include <linux/slab.h> |
21 | ||
22 | #include "internal.h" | |
23 | ||
24 | struct regmap_irq_chip_data { | |
25 | struct mutex lock; | |
7ac140ec | 26 | struct irq_chip irq_chip; |
f8beab2b MB |
27 | |
28 | struct regmap *map; | |
b026ddbb | 29 | const struct regmap_irq_chip *chip; |
f8beab2b MB |
30 | |
31 | int irq_base; | |
4af8be67 | 32 | struct irq_domain *domain; |
f8beab2b | 33 | |
a43fd50d MB |
34 | int irq; |
35 | int wake_count; | |
36 | ||
a7440eaa | 37 | void *status_reg_buf; |
f8beab2b MB |
38 | unsigned int *status_buf; |
39 | unsigned int *mask_buf; | |
40 | unsigned int *mask_buf_def; | |
a43fd50d | 41 | unsigned int *wake_buf; |
022f926a GG |
42 | |
43 | unsigned int irq_reg_stride; | |
f8beab2b MB |
44 | }; |
45 | ||
46 | static inline const | |
47 | struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, | |
48 | int irq) | |
49 | { | |
4af8be67 | 50 | return &data->chip->irqs[irq]; |
f8beab2b MB |
51 | } |
52 | ||
53 | static void regmap_irq_lock(struct irq_data *data) | |
54 | { | |
55 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
56 | ||
57 | mutex_lock(&d->lock); | |
58 | } | |
59 | ||
60 | static void regmap_irq_sync_unlock(struct irq_data *data) | |
61 | { | |
62 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
56806555 | 63 | struct regmap *map = d->map; |
f8beab2b | 64 | int i, ret; |
16032624 | 65 | u32 reg; |
f8beab2b | 66 | |
0c00c50b MB |
67 | if (d->chip->runtime_pm) { |
68 | ret = pm_runtime_get_sync(map->dev); | |
69 | if (ret < 0) | |
70 | dev_err(map->dev, "IRQ sync failed to resume: %d\n", | |
71 | ret); | |
72 | } | |
73 | ||
f8beab2b MB |
74 | /* |
75 | * If there's been a change in the mask write it back to the | |
76 | * hardware. We rely on the use of the regmap core cache to | |
77 | * suppress pointless writes. | |
78 | */ | |
79 | for (i = 0; i < d->chip->num_regs; i++) { | |
16032624 SW |
80 | reg = d->chip->mask_base + |
81 | (i * map->reg_stride * d->irq_reg_stride); | |
36ac914b XT |
82 | if (d->chip->mask_invert) |
83 | ret = regmap_update_bits(d->map, reg, | |
84 | d->mask_buf_def[i], ~d->mask_buf[i]); | |
85 | else | |
86 | ret = regmap_update_bits(d->map, reg, | |
f8beab2b MB |
87 | d->mask_buf_def[i], d->mask_buf[i]); |
88 | if (ret != 0) | |
89 | dev_err(d->map->dev, "Failed to sync masks in %x\n", | |
16032624 | 90 | reg); |
33be4932 MB |
91 | |
92 | reg = d->chip->wake_base + | |
93 | (i * map->reg_stride * d->irq_reg_stride); | |
94 | if (d->wake_buf) { | |
9442490a MB |
95 | if (d->chip->wake_invert) |
96 | ret = regmap_update_bits(d->map, reg, | |
97 | d->mask_buf_def[i], | |
98 | ~d->wake_buf[i]); | |
99 | else | |
100 | ret = regmap_update_bits(d->map, reg, | |
101 | d->mask_buf_def[i], | |
102 | d->wake_buf[i]); | |
33be4932 MB |
103 | if (ret != 0) |
104 | dev_err(d->map->dev, | |
105 | "Failed to sync wakes in %x: %d\n", | |
106 | reg, ret); | |
107 | } | |
4bd7145b YZ |
108 | |
109 | if (!d->chip->init_ack_masked) | |
110 | continue; | |
111 | /* | |
112 | * Ack all the masked interrupts uncondictionly, | |
113 | * OR if there is masked interrupt which hasn't been Acked, | |
114 | * it'll be ignored in irq handler, then may introduce irq storm | |
115 | */ | |
116 | if (d->mask_buf[i] && d->chip->ack_base) { | |
117 | reg = d->chip->ack_base + | |
118 | (i * map->reg_stride * d->irq_reg_stride); | |
119 | ret = regmap_write(map, reg, d->mask_buf[i]); | |
120 | if (ret != 0) | |
121 | dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", | |
122 | reg, ret); | |
123 | } | |
f8beab2b MB |
124 | } |
125 | ||
0c00c50b MB |
126 | if (d->chip->runtime_pm) |
127 | pm_runtime_put(map->dev); | |
128 | ||
a43fd50d MB |
129 | /* If we've changed our wakeup count propagate it to the parent */ |
130 | if (d->wake_count < 0) | |
131 | for (i = d->wake_count; i < 0; i++) | |
132 | irq_set_irq_wake(d->irq, 0); | |
133 | else if (d->wake_count > 0) | |
134 | for (i = 0; i < d->wake_count; i++) | |
135 | irq_set_irq_wake(d->irq, 1); | |
136 | ||
137 | d->wake_count = 0; | |
138 | ||
f8beab2b MB |
139 | mutex_unlock(&d->lock); |
140 | } | |
141 | ||
142 | static void regmap_irq_enable(struct irq_data *data) | |
143 | { | |
144 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
56806555 | 145 | struct regmap *map = d->map; |
4af8be67 | 146 | const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); |
f8beab2b | 147 | |
f01ee60f | 148 | d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask; |
f8beab2b MB |
149 | } |
150 | ||
151 | static void regmap_irq_disable(struct irq_data *data) | |
152 | { | |
153 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
56806555 | 154 | struct regmap *map = d->map; |
4af8be67 | 155 | const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); |
f8beab2b | 156 | |
f01ee60f | 157 | d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; |
f8beab2b MB |
158 | } |
159 | ||
a43fd50d MB |
160 | static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) |
161 | { | |
162 | struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
163 | struct regmap *map = d->map; | |
164 | const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); | |
165 | ||
a43fd50d | 166 | if (on) { |
55ac85e9 LD |
167 | if (d->wake_buf) |
168 | d->wake_buf[irq_data->reg_offset / map->reg_stride] | |
169 | &= ~irq_data->mask; | |
a43fd50d MB |
170 | d->wake_count++; |
171 | } else { | |
55ac85e9 LD |
172 | if (d->wake_buf) |
173 | d->wake_buf[irq_data->reg_offset / map->reg_stride] | |
174 | |= irq_data->mask; | |
a43fd50d MB |
175 | d->wake_count--; |
176 | } | |
177 | ||
178 | return 0; | |
179 | } | |
180 | ||
7ac140ec | 181 | static const struct irq_chip regmap_irq_chip = { |
f8beab2b MB |
182 | .irq_bus_lock = regmap_irq_lock, |
183 | .irq_bus_sync_unlock = regmap_irq_sync_unlock, | |
184 | .irq_disable = regmap_irq_disable, | |
185 | .irq_enable = regmap_irq_enable, | |
a43fd50d | 186 | .irq_set_wake = regmap_irq_set_wake, |
f8beab2b MB |
187 | }; |
188 | ||
189 | static irqreturn_t regmap_irq_thread(int irq, void *d) | |
190 | { | |
191 | struct regmap_irq_chip_data *data = d; | |
b026ddbb | 192 | const struct regmap_irq_chip *chip = data->chip; |
f8beab2b MB |
193 | struct regmap *map = data->map; |
194 | int ret, i; | |
d23511f9 | 195 | bool handled = false; |
16032624 | 196 | u32 reg; |
f8beab2b | 197 | |
0c00c50b MB |
198 | if (chip->runtime_pm) { |
199 | ret = pm_runtime_get_sync(map->dev); | |
200 | if (ret < 0) { | |
201 | dev_err(map->dev, "IRQ thread failed to resume: %d\n", | |
202 | ret); | |
283189d3 | 203 | pm_runtime_put(map->dev); |
0c00c50b MB |
204 | return IRQ_NONE; |
205 | } | |
206 | } | |
207 | ||
a7440eaa MB |
208 | /* |
209 | * Read in the statuses, using a single bulk read if possible | |
210 | * in order to reduce the I/O overheads. | |
211 | */ | |
212 | if (!map->use_single_rw && map->reg_stride == 1 && | |
213 | data->irq_reg_stride == 1) { | |
214 | u8 *buf8 = data->status_reg_buf; | |
215 | u16 *buf16 = data->status_reg_buf; | |
216 | u32 *buf32 = data->status_reg_buf; | |
022f926a | 217 | |
a7440eaa MB |
218 | BUG_ON(!data->status_reg_buf); |
219 | ||
220 | ret = regmap_bulk_read(map, chip->status_base, | |
221 | data->status_reg_buf, | |
222 | chip->num_regs); | |
022f926a GG |
223 | if (ret != 0) { |
224 | dev_err(map->dev, "Failed to read IRQ status: %d\n", | |
a7440eaa | 225 | ret); |
f8beab2b MB |
226 | return IRQ_NONE; |
227 | } | |
a7440eaa MB |
228 | |
229 | for (i = 0; i < data->chip->num_regs; i++) { | |
230 | switch (map->format.val_bytes) { | |
231 | case 1: | |
232 | data->status_buf[i] = buf8[i]; | |
233 | break; | |
234 | case 2: | |
235 | data->status_buf[i] = buf16[i]; | |
236 | break; | |
237 | case 4: | |
238 | data->status_buf[i] = buf32[i]; | |
239 | break; | |
240 | default: | |
241 | BUG(); | |
242 | return IRQ_NONE; | |
243 | } | |
244 | } | |
245 | ||
246 | } else { | |
247 | for (i = 0; i < data->chip->num_regs; i++) { | |
248 | ret = regmap_read(map, chip->status_base + | |
249 | (i * map->reg_stride | |
250 | * data->irq_reg_stride), | |
251 | &data->status_buf[i]); | |
252 | ||
253 | if (ret != 0) { | |
254 | dev_err(map->dev, | |
255 | "Failed to read IRQ status: %d\n", | |
256 | ret); | |
257 | if (chip->runtime_pm) | |
258 | pm_runtime_put(map->dev); | |
259 | return IRQ_NONE; | |
260 | } | |
261 | } | |
bbae92ca | 262 | } |
f8beab2b | 263 | |
bbae92ca MB |
264 | /* |
265 | * Ignore masked IRQs and ack if we need to; we ack early so | |
266 | * there is no race between handling and acknowleding the | |
267 | * interrupt. We assume that typically few of the interrupts | |
268 | * will fire simultaneously so don't worry about overhead from | |
269 | * doing a write per register. | |
270 | */ | |
271 | for (i = 0; i < data->chip->num_regs; i++) { | |
f8beab2b MB |
272 | data->status_buf[i] &= ~data->mask_buf[i]; |
273 | ||
274 | if (data->status_buf[i] && chip->ack_base) { | |
16032624 SW |
275 | reg = chip->ack_base + |
276 | (i * map->reg_stride * data->irq_reg_stride); | |
277 | ret = regmap_write(map, reg, data->status_buf[i]); | |
f8beab2b MB |
278 | if (ret != 0) |
279 | dev_err(map->dev, "Failed to ack 0x%x: %d\n", | |
16032624 | 280 | reg, ret); |
f8beab2b MB |
281 | } |
282 | } | |
283 | ||
284 | for (i = 0; i < chip->num_irqs; i++) { | |
f01ee60f SW |
285 | if (data->status_buf[chip->irqs[i].reg_offset / |
286 | map->reg_stride] & chip->irqs[i].mask) { | |
4af8be67 | 287 | handle_nested_irq(irq_find_mapping(data->domain, i)); |
d23511f9 | 288 | handled = true; |
f8beab2b MB |
289 | } |
290 | } | |
291 | ||
0c00c50b MB |
292 | if (chip->runtime_pm) |
293 | pm_runtime_put(map->dev); | |
294 | ||
d23511f9 MB |
295 | if (handled) |
296 | return IRQ_HANDLED; | |
297 | else | |
298 | return IRQ_NONE; | |
f8beab2b MB |
299 | } |
300 | ||
4af8be67 MB |
301 | static int regmap_irq_map(struct irq_domain *h, unsigned int virq, |
302 | irq_hw_number_t hw) | |
303 | { | |
304 | struct regmap_irq_chip_data *data = h->host_data; | |
305 | ||
306 | irq_set_chip_data(virq, data); | |
81380739 | 307 | irq_set_chip(virq, &data->irq_chip); |
4af8be67 MB |
308 | irq_set_nested_thread(virq, 1); |
309 | ||
310 | /* ARM needs us to explicitly flag the IRQ as valid | |
311 | * and will set them noprobe when we do so. */ | |
312 | #ifdef CONFIG_ARM | |
313 | set_irq_flags(virq, IRQF_VALID); | |
314 | #else | |
315 | irq_set_noprobe(virq); | |
316 | #endif | |
317 | ||
318 | return 0; | |
319 | } | |
320 | ||
321 | static struct irq_domain_ops regmap_domain_ops = { | |
322 | .map = regmap_irq_map, | |
323 | .xlate = irq_domain_xlate_twocell, | |
324 | }; | |
325 | ||
f8beab2b MB |
326 | /** |
327 | * regmap_add_irq_chip(): Use standard regmap IRQ controller handling | |
328 | * | |
329 | * map: The regmap for the device. | |
330 | * irq: The IRQ the device uses to signal interrupts | |
331 | * irq_flags: The IRQF_ flags to use for the primary interrupt. | |
332 | * chip: Configuration for the interrupt controller. | |
333 | * data: Runtime data structure for the controller, allocated on success | |
334 | * | |
335 | * Returns 0 on success or an errno on failure. | |
336 | * | |
337 | * In order for this to be efficient the chip really should use a | |
338 | * register cache. The chip driver is responsible for restoring the | |
339 | * register values used by the IRQ controller over suspend and resume. | |
340 | */ | |
341 | int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |
b026ddbb | 342 | int irq_base, const struct regmap_irq_chip *chip, |
f8beab2b MB |
343 | struct regmap_irq_chip_data **data) |
344 | { | |
345 | struct regmap_irq_chip_data *d; | |
4af8be67 | 346 | int i; |
f8beab2b | 347 | int ret = -ENOMEM; |
16032624 | 348 | u32 reg; |
f8beab2b | 349 | |
f01ee60f SW |
350 | for (i = 0; i < chip->num_irqs; i++) { |
351 | if (chip->irqs[i].reg_offset % map->reg_stride) | |
352 | return -EINVAL; | |
353 | if (chip->irqs[i].reg_offset / map->reg_stride >= | |
354 | chip->num_regs) | |
355 | return -EINVAL; | |
356 | } | |
357 | ||
4af8be67 MB |
358 | if (irq_base) { |
359 | irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); | |
360 | if (irq_base < 0) { | |
361 | dev_warn(map->dev, "Failed to allocate IRQs: %d\n", | |
362 | irq_base); | |
363 | return irq_base; | |
364 | } | |
f8beab2b MB |
365 | } |
366 | ||
367 | d = kzalloc(sizeof(*d), GFP_KERNEL); | |
368 | if (!d) | |
369 | return -ENOMEM; | |
370 | ||
2431d0a1 MB |
371 | *data = d; |
372 | ||
f8beab2b MB |
373 | d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, |
374 | GFP_KERNEL); | |
375 | if (!d->status_buf) | |
376 | goto err_alloc; | |
377 | ||
f8beab2b MB |
378 | d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, |
379 | GFP_KERNEL); | |
380 | if (!d->mask_buf) | |
381 | goto err_alloc; | |
382 | ||
383 | d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, | |
384 | GFP_KERNEL); | |
385 | if (!d->mask_buf_def) | |
386 | goto err_alloc; | |
387 | ||
a43fd50d MB |
388 | if (chip->wake_base) { |
389 | d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, | |
390 | GFP_KERNEL); | |
391 | if (!d->wake_buf) | |
392 | goto err_alloc; | |
393 | } | |
394 | ||
7ac140ec | 395 | d->irq_chip = regmap_irq_chip; |
ca142750 | 396 | d->irq_chip.name = chip->name; |
a43fd50d | 397 | d->irq = irq; |
f8beab2b MB |
398 | d->map = map; |
399 | d->chip = chip; | |
400 | d->irq_base = irq_base; | |
022f926a GG |
401 | |
402 | if (chip->irq_reg_stride) | |
403 | d->irq_reg_stride = chip->irq_reg_stride; | |
404 | else | |
405 | d->irq_reg_stride = 1; | |
406 | ||
a7440eaa MB |
407 | if (!map->use_single_rw && map->reg_stride == 1 && |
408 | d->irq_reg_stride == 1) { | |
409 | d->status_reg_buf = kmalloc(map->format.val_bytes * | |
410 | chip->num_regs, GFP_KERNEL); | |
411 | if (!d->status_reg_buf) | |
412 | goto err_alloc; | |
413 | } | |
414 | ||
f8beab2b MB |
415 | mutex_init(&d->lock); |
416 | ||
417 | for (i = 0; i < chip->num_irqs; i++) | |
f01ee60f | 418 | d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] |
f8beab2b MB |
419 | |= chip->irqs[i].mask; |
420 | ||
421 | /* Mask all the interrupts by default */ | |
422 | for (i = 0; i < chip->num_regs; i++) { | |
423 | d->mask_buf[i] = d->mask_buf_def[i]; | |
16032624 SW |
424 | reg = chip->mask_base + |
425 | (i * map->reg_stride * d->irq_reg_stride); | |
36ac914b XT |
426 | if (chip->mask_invert) |
427 | ret = regmap_update_bits(map, reg, | |
428 | d->mask_buf[i], ~d->mask_buf[i]); | |
429 | else | |
430 | ret = regmap_update_bits(map, reg, | |
0eb46ad0 | 431 | d->mask_buf[i], d->mask_buf[i]); |
f8beab2b MB |
432 | if (ret != 0) { |
433 | dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", | |
16032624 | 434 | reg, ret); |
f8beab2b MB |
435 | goto err_alloc; |
436 | } | |
2753e6f8 PZ |
437 | |
438 | if (!chip->init_ack_masked) | |
439 | continue; | |
440 | ||
441 | /* Ack masked but set interrupts */ | |
442 | reg = chip->status_base + | |
443 | (i * map->reg_stride * d->irq_reg_stride); | |
444 | ret = regmap_read(map, reg, &d->status_buf[i]); | |
445 | if (ret != 0) { | |
446 | dev_err(map->dev, "Failed to read IRQ status: %d\n", | |
447 | ret); | |
448 | goto err_alloc; | |
449 | } | |
450 | ||
451 | if (d->status_buf[i] && chip->ack_base) { | |
452 | reg = chip->ack_base + | |
453 | (i * map->reg_stride * d->irq_reg_stride); | |
454 | ret = regmap_write(map, reg, | |
455 | d->status_buf[i] & d->mask_buf[i]); | |
456 | if (ret != 0) { | |
457 | dev_err(map->dev, "Failed to ack 0x%x: %d\n", | |
458 | reg, ret); | |
459 | goto err_alloc; | |
460 | } | |
461 | } | |
f8beab2b MB |
462 | } |
463 | ||
40052ca0 SW |
464 | /* Wake is disabled by default */ |
465 | if (d->wake_buf) { | |
466 | for (i = 0; i < chip->num_regs; i++) { | |
467 | d->wake_buf[i] = d->mask_buf_def[i]; | |
468 | reg = chip->wake_base + | |
469 | (i * map->reg_stride * d->irq_reg_stride); | |
9442490a MB |
470 | |
471 | if (chip->wake_invert) | |
472 | ret = regmap_update_bits(map, reg, | |
473 | d->mask_buf_def[i], | |
474 | 0); | |
475 | else | |
476 | ret = regmap_update_bits(map, reg, | |
477 | d->mask_buf_def[i], | |
478 | d->wake_buf[i]); | |
40052ca0 SW |
479 | if (ret != 0) { |
480 | dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", | |
481 | reg, ret); | |
482 | goto err_alloc; | |
483 | } | |
484 | } | |
485 | } | |
486 | ||
4af8be67 MB |
487 | if (irq_base) |
488 | d->domain = irq_domain_add_legacy(map->dev->of_node, | |
489 | chip->num_irqs, irq_base, 0, | |
490 | ®map_domain_ops, d); | |
491 | else | |
492 | d->domain = irq_domain_add_linear(map->dev->of_node, | |
493 | chip->num_irqs, | |
494 | ®map_domain_ops, d); | |
495 | if (!d->domain) { | |
496 | dev_err(map->dev, "Failed to create IRQ domain\n"); | |
497 | ret = -ENOMEM; | |
498 | goto err_alloc; | |
f8beab2b MB |
499 | } |
500 | ||
501 | ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, | |
502 | chip->name, d); | |
503 | if (ret != 0) { | |
eed456f9 MB |
504 | dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", |
505 | irq, chip->name, ret); | |
4af8be67 | 506 | goto err_domain; |
f8beab2b MB |
507 | } |
508 | ||
509 | return 0; | |
510 | ||
4af8be67 MB |
511 | err_domain: |
512 | /* Should really dispose of the domain but... */ | |
f8beab2b | 513 | err_alloc: |
a43fd50d | 514 | kfree(d->wake_buf); |
f8beab2b MB |
515 | kfree(d->mask_buf_def); |
516 | kfree(d->mask_buf); | |
f8beab2b | 517 | kfree(d->status_buf); |
a7440eaa | 518 | kfree(d->status_reg_buf); |
f8beab2b MB |
519 | kfree(d); |
520 | return ret; | |
521 | } | |
522 | EXPORT_SYMBOL_GPL(regmap_add_irq_chip); | |
523 | ||
524 | /** | |
525 | * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip | |
526 | * | |
527 | * @irq: Primary IRQ for the device | |
528 | * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() | |
529 | */ | |
530 | void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) | |
531 | { | |
532 | if (!d) | |
533 | return; | |
534 | ||
535 | free_irq(irq, d); | |
4af8be67 | 536 | /* We should unmap the domain but... */ |
a43fd50d | 537 | kfree(d->wake_buf); |
f8beab2b MB |
538 | kfree(d->mask_buf_def); |
539 | kfree(d->mask_buf); | |
a7440eaa | 540 | kfree(d->status_reg_buf); |
f8beab2b MB |
541 | kfree(d->status_buf); |
542 | kfree(d); | |
543 | } | |
544 | EXPORT_SYMBOL_GPL(regmap_del_irq_chip); | |
209a6006 MB |
545 | |
546 | /** | |
547 | * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip | |
548 | * | |
549 | * Useful for drivers to request their own IRQs. | |
550 | * | |
551 | * @data: regmap_irq controller to operate on. | |
552 | */ | |
553 | int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) | |
554 | { | |
4af8be67 | 555 | WARN_ON(!data->irq_base); |
209a6006 MB |
556 | return data->irq_base; |
557 | } | |
558 | EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); | |
4af8be67 MB |
559 | |
560 | /** | |
561 | * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ | |
562 | * | |
563 | * Useful for drivers to request their own IRQs. | |
564 | * | |
565 | * @data: regmap_irq controller to operate on. | |
566 | * @irq: index of the interrupt requested in the chip IRQs | |
567 | */ | |
568 | int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) | |
569 | { | |
bfd6185d MB |
570 | /* Handle holes in the IRQ list */ |
571 | if (!data->chip->irqs[irq].mask) | |
572 | return -EINVAL; | |
573 | ||
4af8be67 MB |
574 | return irq_create_mapping(data->domain, irq); |
575 | } | |
576 | EXPORT_SYMBOL_GPL(regmap_irq_get_virq); | |
90f790d2 MB |
577 | |
578 | /** | |
579 | * regmap_irq_get_domain(): Retrieve the irq_domain for the chip | |
580 | * | |
581 | * Useful for drivers to request their own IRQs and for integration | |
582 | * with subsystems. For ease of integration NULL is accepted as a | |
583 | * domain, allowing devices to just call this even if no domain is | |
584 | * allocated. | |
585 | * | |
586 | * @data: regmap_irq controller to operate on. | |
587 | */ | |
588 | struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) | |
589 | { | |
590 | if (data) | |
591 | return data->domain; | |
592 | else | |
593 | return NULL; | |
594 | } | |
595 | EXPORT_SYMBOL_GPL(regmap_irq_get_domain); |