gpio/omap: make non-wakeup GPIO part of pdata
[deliverable/linux.git] / drivers / gpio / gpio-omap.c
1 /*
2 * Support functions for OMAP GPIO
3 *
4 * Copyright (C) 2003-2005 Nokia Corporation
5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6 *
7 * Copyright (C) 2009 Texas Instruments
8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24
25 #include <mach/hardware.h>
26 #include <asm/irq.h>
27 #include <mach/irqs.h>
28 #include <asm/gpio.h>
29 #include <asm/mach/irq.h>
30
31 static LIST_HEAD(omap_gpio_list);
32
33 struct gpio_regs {
34 u32 irqenable1;
35 u32 irqenable2;
36 u32 wake_en;
37 u32 ctrl;
38 u32 oe;
39 u32 leveldetect0;
40 u32 leveldetect1;
41 u32 risingdetect;
42 u32 fallingdetect;
43 u32 dataout;
44 };
45
46 struct gpio_bank {
47 struct list_head node;
48 unsigned long pbase;
49 void __iomem *base;
50 u16 irq;
51 u16 virtual_irq_start;
52 int method;
53 u32 suspend_wakeup;
54 #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
55 u32 saved_wakeup;
56 #endif
57 u32 non_wakeup_gpios;
58 u32 enabled_non_wakeup_gpios;
59 struct gpio_regs context;
60 u32 saved_datain;
61 u32 saved_fallingdetect;
62 u32 saved_risingdetect;
63 u32 level_mask;
64 u32 toggle_mask;
65 spinlock_t lock;
66 struct gpio_chip chip;
67 struct clk *dbck;
68 u32 mod_usage;
69 u32 dbck_enable_mask;
70 struct device *dev;
71 bool dbck_flag;
72 bool loses_context;
73 int stride;
74 u32 width;
75 int context_loss_count;
76 u16 id;
77
78 void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
79 int (*get_context_loss_count)(struct device *dev);
80
81 struct omap_gpio_reg_offs *regs;
82 };
83
84 #define GPIO_INDEX(bank, gpio) (gpio % bank->width)
85 #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
86
87 static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
88 {
89 void __iomem *reg = bank->base;
90 u32 l;
91
92 reg += bank->regs->direction;
93 l = __raw_readl(reg);
94 if (is_input)
95 l |= 1 << gpio;
96 else
97 l &= ~(1 << gpio);
98 __raw_writel(l, reg);
99 }
100
101
102 /* set data out value using dedicate set/clear register */
103 static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
104 {
105 void __iomem *reg = bank->base;
106 u32 l = GPIO_BIT(bank, gpio);
107
108 if (enable)
109 reg += bank->regs->set_dataout;
110 else
111 reg += bank->regs->clr_dataout;
112
113 __raw_writel(l, reg);
114 }
115
116 /* set data out value using mask register */
117 static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
118 {
119 void __iomem *reg = bank->base + bank->regs->dataout;
120 u32 gpio_bit = GPIO_BIT(bank, gpio);
121 u32 l;
122
123 l = __raw_readl(reg);
124 if (enable)
125 l |= gpio_bit;
126 else
127 l &= ~gpio_bit;
128 __raw_writel(l, reg);
129 }
130
131 static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
132 {
133 void __iomem *reg = bank->base + bank->regs->datain;
134
135 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
136 }
137
138 static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
139 {
140 void __iomem *reg = bank->base + bank->regs->dataout;
141
142 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
143 }
144
145 static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
146 {
147 int l = __raw_readl(base + reg);
148
149 if (set)
150 l |= mask;
151 else
152 l &= ~mask;
153
154 __raw_writel(l, base + reg);
155 }
156
157 /**
158 * _set_gpio_debounce - low level gpio debounce time
159 * @bank: the gpio bank we're acting upon
160 * @gpio: the gpio number on this @gpio
161 * @debounce: debounce time to use
162 *
163 * OMAP's debounce time is in 31us steps so we need
164 * to convert and round up to the closest unit.
165 */
166 static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
167 unsigned debounce)
168 {
169 void __iomem *reg;
170 u32 val;
171 u32 l;
172
173 if (!bank->dbck_flag)
174 return;
175
176 if (debounce < 32)
177 debounce = 0x01;
178 else if (debounce > 7936)
179 debounce = 0xff;
180 else
181 debounce = (debounce / 0x1f) - 1;
182
183 l = GPIO_BIT(bank, gpio);
184
185 reg = bank->base + bank->regs->debounce;
186 __raw_writel(debounce, reg);
187
188 reg = bank->base + bank->regs->debounce_en;
189 val = __raw_readl(reg);
190
191 if (debounce) {
192 val |= l;
193 clk_enable(bank->dbck);
194 } else {
195 val &= ~l;
196 clk_disable(bank->dbck);
197 }
198 bank->dbck_enable_mask = val;
199
200 __raw_writel(val, reg);
201 }
202
203 #ifdef CONFIG_ARCH_OMAP2PLUS
204 static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
205 int trigger)
206 {
207 void __iomem *base = bank->base;
208 u32 gpio_bit = 1 << gpio;
209
210 if (cpu_is_omap44xx()) {
211 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
212 trigger & IRQ_TYPE_LEVEL_LOW);
213 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
214 trigger & IRQ_TYPE_LEVEL_HIGH);
215 _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit,
216 trigger & IRQ_TYPE_EDGE_RISING);
217 _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit,
218 trigger & IRQ_TYPE_EDGE_FALLING);
219 } else {
220 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
221 trigger & IRQ_TYPE_LEVEL_LOW);
222 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
223 trigger & IRQ_TYPE_LEVEL_HIGH);
224 _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
225 trigger & IRQ_TYPE_EDGE_RISING);
226 _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
227 trigger & IRQ_TYPE_EDGE_FALLING);
228 }
229 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
230 if (cpu_is_omap44xx()) {
231 _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
232 trigger != 0);
233 } else {
234 /*
235 * GPIO wakeup request can only be generated on edge
236 * transitions
237 */
238 if (trigger & IRQ_TYPE_EDGE_BOTH)
239 __raw_writel(1 << gpio, bank->base
240 + OMAP24XX_GPIO_SETWKUENA);
241 else
242 __raw_writel(1 << gpio, bank->base
243 + OMAP24XX_GPIO_CLEARWKUENA);
244 }
245 }
246 /* This part needs to be executed always for OMAP{34xx, 44xx} */
247 if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
248 (bank->non_wakeup_gpios & gpio_bit)) {
249 /*
250 * Log the edge gpio and manually trigger the IRQ
251 * after resume if the input level changes
252 * to avoid irq lost during PER RET/OFF mode
253 * Applies for omap2 non-wakeup gpio and all omap3 gpios
254 */
255 if (trigger & IRQ_TYPE_EDGE_BOTH)
256 bank->enabled_non_wakeup_gpios |= gpio_bit;
257 else
258 bank->enabled_non_wakeup_gpios &= ~gpio_bit;
259 }
260
261 if (cpu_is_omap44xx()) {
262 bank->level_mask =
263 __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
264 __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
265 } else {
266 bank->level_mask =
267 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
268 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
269 }
270 }
271 #endif
272
273 #ifdef CONFIG_ARCH_OMAP1
274 /*
275 * This only applies to chips that can't do both rising and falling edge
276 * detection at once. For all other chips, this function is a noop.
277 */
278 static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
279 {
280 void __iomem *reg = bank->base;
281 u32 l = 0;
282
283 switch (bank->method) {
284 case METHOD_MPUIO:
285 reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
286 break;
287 #ifdef CONFIG_ARCH_OMAP15XX
288 case METHOD_GPIO_1510:
289 reg += OMAP1510_GPIO_INT_CONTROL;
290 break;
291 #endif
292 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
293 case METHOD_GPIO_7XX:
294 reg += OMAP7XX_GPIO_INT_CONTROL;
295 break;
296 #endif
297 default:
298 return;
299 }
300
301 l = __raw_readl(reg);
302 if ((l >> gpio) & 1)
303 l &= ~(1 << gpio);
304 else
305 l |= 1 << gpio;
306
307 __raw_writel(l, reg);
308 }
309 #endif
310
311 static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
312 {
313 void __iomem *reg = bank->base;
314 u32 l = 0;
315
316 switch (bank->method) {
317 #ifdef CONFIG_ARCH_OMAP1
318 case METHOD_MPUIO:
319 reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
320 l = __raw_readl(reg);
321 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
322 bank->toggle_mask |= 1 << gpio;
323 if (trigger & IRQ_TYPE_EDGE_RISING)
324 l |= 1 << gpio;
325 else if (trigger & IRQ_TYPE_EDGE_FALLING)
326 l &= ~(1 << gpio);
327 else
328 goto bad;
329 break;
330 #endif
331 #ifdef CONFIG_ARCH_OMAP15XX
332 case METHOD_GPIO_1510:
333 reg += OMAP1510_GPIO_INT_CONTROL;
334 l = __raw_readl(reg);
335 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
336 bank->toggle_mask |= 1 << gpio;
337 if (trigger & IRQ_TYPE_EDGE_RISING)
338 l |= 1 << gpio;
339 else if (trigger & IRQ_TYPE_EDGE_FALLING)
340 l &= ~(1 << gpio);
341 else
342 goto bad;
343 break;
344 #endif
345 #ifdef CONFIG_ARCH_OMAP16XX
346 case METHOD_GPIO_1610:
347 if (gpio & 0x08)
348 reg += OMAP1610_GPIO_EDGE_CTRL2;
349 else
350 reg += OMAP1610_GPIO_EDGE_CTRL1;
351 gpio &= 0x07;
352 l = __raw_readl(reg);
353 l &= ~(3 << (gpio << 1));
354 if (trigger & IRQ_TYPE_EDGE_RISING)
355 l |= 2 << (gpio << 1);
356 if (trigger & IRQ_TYPE_EDGE_FALLING)
357 l |= 1 << (gpio << 1);
358 if (trigger)
359 /* Enable wake-up during idle for dynamic tick */
360 __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
361 else
362 __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
363 break;
364 #endif
365 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
366 case METHOD_GPIO_7XX:
367 reg += OMAP7XX_GPIO_INT_CONTROL;
368 l = __raw_readl(reg);
369 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
370 bank->toggle_mask |= 1 << gpio;
371 if (trigger & IRQ_TYPE_EDGE_RISING)
372 l |= 1 << gpio;
373 else if (trigger & IRQ_TYPE_EDGE_FALLING)
374 l &= ~(1 << gpio);
375 else
376 goto bad;
377 break;
378 #endif
379 #ifdef CONFIG_ARCH_OMAP2PLUS
380 case METHOD_GPIO_24XX:
381 case METHOD_GPIO_44XX:
382 set_24xx_gpio_triggering(bank, gpio, trigger);
383 return 0;
384 #endif
385 default:
386 goto bad;
387 }
388 __raw_writel(l, reg);
389 return 0;
390 bad:
391 return -EINVAL;
392 }
393
394 static int gpio_irq_type(struct irq_data *d, unsigned type)
395 {
396 struct gpio_bank *bank;
397 unsigned gpio;
398 int retval;
399 unsigned long flags;
400
401 if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
402 gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
403 else
404 gpio = d->irq - IH_GPIO_BASE;
405
406 if (type & ~IRQ_TYPE_SENSE_MASK)
407 return -EINVAL;
408
409 /* OMAP1 allows only only edge triggering */
410 if (!cpu_class_is_omap2()
411 && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
412 return -EINVAL;
413
414 bank = irq_data_get_irq_chip_data(d);
415 spin_lock_irqsave(&bank->lock, flags);
416 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
417 spin_unlock_irqrestore(&bank->lock, flags);
418
419 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
420 __irq_set_handler_locked(d->irq, handle_level_irq);
421 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
422 __irq_set_handler_locked(d->irq, handle_edge_irq);
423
424 return retval;
425 }
426
427 static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
428 {
429 void __iomem *reg = bank->base;
430
431 reg += bank->regs->irqstatus;
432 __raw_writel(gpio_mask, reg);
433
434 /* Workaround for clearing DSP GPIO interrupts to allow retention */
435 if (bank->regs->irqstatus2) {
436 reg = bank->base + bank->regs->irqstatus2;
437 __raw_writel(gpio_mask, reg);
438 }
439
440 /* Flush posted write for the irq status to avoid spurious interrupts */
441 __raw_readl(reg);
442 }
443
444 static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
445 {
446 _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
447 }
448
449 static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
450 {
451 void __iomem *reg = bank->base;
452 u32 l;
453 u32 mask = (1 << bank->width) - 1;
454
455 reg += bank->regs->irqenable;
456 l = __raw_readl(reg);
457 if (bank->regs->irqenable_inv)
458 l = ~l;
459 l &= mask;
460 return l;
461 }
462
463 static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
464 {
465 void __iomem *reg = bank->base;
466 u32 l;
467
468 if (bank->regs->set_irqenable) {
469 reg += bank->regs->set_irqenable;
470 l = gpio_mask;
471 } else {
472 reg += bank->regs->irqenable;
473 l = __raw_readl(reg);
474 if (bank->regs->irqenable_inv)
475 l &= ~gpio_mask;
476 else
477 l |= gpio_mask;
478 }
479
480 __raw_writel(l, reg);
481 }
482
483 static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
484 {
485 void __iomem *reg = bank->base;
486 u32 l;
487
488 if (bank->regs->clr_irqenable) {
489 reg += bank->regs->clr_irqenable;
490 l = gpio_mask;
491 } else {
492 reg += bank->regs->irqenable;
493 l = __raw_readl(reg);
494 if (bank->regs->irqenable_inv)
495 l |= gpio_mask;
496 else
497 l &= ~gpio_mask;
498 }
499
500 __raw_writel(l, reg);
501 }
502
503 static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
504 {
505 _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
506 }
507
508 /*
509 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
510 * 1510 does not seem to have a wake-up register. If JTAG is connected
511 * to the target, system will wake up always on GPIO events. While
512 * system is running all registered GPIO interrupts need to have wake-up
513 * enabled. When system is suspended, only selected GPIO interrupts need
514 * to have wake-up enabled.
515 */
516 static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
517 {
518 u32 gpio_bit = GPIO_BIT(bank, gpio);
519 unsigned long flags;
520
521 if (bank->non_wakeup_gpios & gpio_bit) {
522 dev_err(bank->dev,
523 "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
524 return -EINVAL;
525 }
526
527 spin_lock_irqsave(&bank->lock, flags);
528 if (enable)
529 bank->suspend_wakeup |= gpio_bit;
530 else
531 bank->suspend_wakeup &= ~gpio_bit;
532
533 spin_unlock_irqrestore(&bank->lock, flags);
534
535 return 0;
536 }
537
538 static void _reset_gpio(struct gpio_bank *bank, int gpio)
539 {
540 _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
541 _set_gpio_irqenable(bank, gpio, 0);
542 _clear_gpio_irqstatus(bank, gpio);
543 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
544 }
545
546 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
547 static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
548 {
549 unsigned int gpio = d->irq - IH_GPIO_BASE;
550 struct gpio_bank *bank;
551 int retval;
552
553 bank = irq_data_get_irq_chip_data(d);
554 retval = _set_gpio_wakeup(bank, gpio, enable);
555
556 return retval;
557 }
558
559 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
560 {
561 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
562 unsigned long flags;
563
564 spin_lock_irqsave(&bank->lock, flags);
565
566 /* Set trigger to none. You need to enable the desired trigger with
567 * request_irq() or set_irq_type().
568 */
569 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
570
571 #ifdef CONFIG_ARCH_OMAP15XX
572 if (bank->method == METHOD_GPIO_1510) {
573 void __iomem *reg;
574
575 /* Claim the pin for MPU */
576 reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
577 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
578 }
579 #endif
580 if (!cpu_class_is_omap1()) {
581 if (!bank->mod_usage) {
582 void __iomem *reg = bank->base;
583 u32 ctrl;
584
585 if (cpu_is_omap24xx() || cpu_is_omap34xx())
586 reg += OMAP24XX_GPIO_CTRL;
587 else if (cpu_is_omap44xx())
588 reg += OMAP4_GPIO_CTRL;
589 ctrl = __raw_readl(reg);
590 /* Module is enabled, clocks are not gated */
591 ctrl &= 0xFFFFFFFE;
592 __raw_writel(ctrl, reg);
593 }
594 bank->mod_usage |= 1 << offset;
595 }
596 spin_unlock_irqrestore(&bank->lock, flags);
597
598 return 0;
599 }
600
601 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
602 {
603 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
604 unsigned long flags;
605
606 spin_lock_irqsave(&bank->lock, flags);
607 #ifdef CONFIG_ARCH_OMAP16XX
608 if (bank->method == METHOD_GPIO_1610) {
609 /* Disable wake-up during idle for dynamic tick */
610 void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
611 __raw_writel(1 << offset, reg);
612 }
613 #endif
614 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
615 if (bank->method == METHOD_GPIO_24XX) {
616 /* Disable wake-up during idle for dynamic tick */
617 void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
618 __raw_writel(1 << offset, reg);
619 }
620 #endif
621 #ifdef CONFIG_ARCH_OMAP4
622 if (bank->method == METHOD_GPIO_44XX) {
623 /* Disable wake-up during idle for dynamic tick */
624 void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
625 __raw_writel(1 << offset, reg);
626 }
627 #endif
628 if (!cpu_class_is_omap1()) {
629 bank->mod_usage &= ~(1 << offset);
630 if (!bank->mod_usage) {
631 void __iomem *reg = bank->base;
632 u32 ctrl;
633
634 if (cpu_is_omap24xx() || cpu_is_omap34xx())
635 reg += OMAP24XX_GPIO_CTRL;
636 else if (cpu_is_omap44xx())
637 reg += OMAP4_GPIO_CTRL;
638 ctrl = __raw_readl(reg);
639 /* Module is disabled, clocks are gated */
640 ctrl |= 1;
641 __raw_writel(ctrl, reg);
642 }
643 }
644 _reset_gpio(bank, bank->chip.base + offset);
645 spin_unlock_irqrestore(&bank->lock, flags);
646 }
647
648 /*
649 * We need to unmask the GPIO bank interrupt as soon as possible to
650 * avoid missing GPIO interrupts for other lines in the bank.
651 * Then we need to mask-read-clear-unmask the triggered GPIO lines
652 * in the bank to avoid missing nested interrupts for a GPIO line.
653 * If we wait to unmask individual GPIO lines in the bank after the
654 * line's interrupt handler has been run, we may miss some nested
655 * interrupts.
656 */
657 static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
658 {
659 void __iomem *isr_reg = NULL;
660 u32 isr;
661 unsigned int gpio_irq, gpio_index;
662 struct gpio_bank *bank;
663 u32 retrigger = 0;
664 int unmasked = 0;
665 struct irq_chip *chip = irq_desc_get_chip(desc);
666
667 chained_irq_enter(chip, desc);
668
669 bank = irq_get_handler_data(irq);
670 isr_reg = bank->base + bank->regs->irqstatus;
671
672 if (WARN_ON(!isr_reg))
673 goto exit;
674
675 while(1) {
676 u32 isr_saved, level_mask = 0;
677 u32 enabled;
678
679 enabled = _get_gpio_irqbank_mask(bank);
680 isr_saved = isr = __raw_readl(isr_reg) & enabled;
681
682 if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
683 isr &= 0x0000ffff;
684
685 if (cpu_class_is_omap2()) {
686 level_mask = bank->level_mask & enabled;
687 }
688
689 /* clear edge sensitive interrupts before handler(s) are
690 called so that we don't miss any interrupt occurred while
691 executing them */
692 _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
693 _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
694 _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
695
696 /* if there is only edge sensitive GPIO pin interrupts
697 configured, we could unmask GPIO bank interrupt immediately */
698 if (!level_mask && !unmasked) {
699 unmasked = 1;
700 chained_irq_exit(chip, desc);
701 }
702
703 isr |= retrigger;
704 retrigger = 0;
705 if (!isr)
706 break;
707
708 gpio_irq = bank->virtual_irq_start;
709 for (; isr != 0; isr >>= 1, gpio_irq++) {
710 gpio_index = GPIO_INDEX(bank, irq_to_gpio(gpio_irq));
711
712 if (!(isr & 1))
713 continue;
714
715 #ifdef CONFIG_ARCH_OMAP1
716 /*
717 * Some chips can't respond to both rising and falling
718 * at the same time. If this irq was requested with
719 * both flags, we need to flip the ICR data for the IRQ
720 * to respond to the IRQ for the opposite direction.
721 * This will be indicated in the bank toggle_mask.
722 */
723 if (bank->toggle_mask & (1 << gpio_index))
724 _toggle_gpio_edge_triggering(bank, gpio_index);
725 #endif
726
727 generic_handle_irq(gpio_irq);
728 }
729 }
730 /* if bank has any level sensitive GPIO pin interrupt
731 configured, we must unmask the bank interrupt only after
732 handler(s) are executed in order to avoid spurious bank
733 interrupt */
734 exit:
735 if (!unmasked)
736 chained_irq_exit(chip, desc);
737 }
738
739 static void gpio_irq_shutdown(struct irq_data *d)
740 {
741 unsigned int gpio = d->irq - IH_GPIO_BASE;
742 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
743 unsigned long flags;
744
745 spin_lock_irqsave(&bank->lock, flags);
746 _reset_gpio(bank, gpio);
747 spin_unlock_irqrestore(&bank->lock, flags);
748 }
749
750 static void gpio_ack_irq(struct irq_data *d)
751 {
752 unsigned int gpio = d->irq - IH_GPIO_BASE;
753 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
754
755 _clear_gpio_irqstatus(bank, gpio);
756 }
757
758 static void gpio_mask_irq(struct irq_data *d)
759 {
760 unsigned int gpio = d->irq - IH_GPIO_BASE;
761 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
762 unsigned long flags;
763
764 spin_lock_irqsave(&bank->lock, flags);
765 _set_gpio_irqenable(bank, gpio, 0);
766 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
767 spin_unlock_irqrestore(&bank->lock, flags);
768 }
769
770 static void gpio_unmask_irq(struct irq_data *d)
771 {
772 unsigned int gpio = d->irq - IH_GPIO_BASE;
773 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
774 unsigned int irq_mask = GPIO_BIT(bank, gpio);
775 u32 trigger = irqd_get_trigger_type(d);
776 unsigned long flags;
777
778 spin_lock_irqsave(&bank->lock, flags);
779 if (trigger)
780 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
781
782 /* For level-triggered GPIOs, the clearing must be done after
783 * the HW source is cleared, thus after the handler has run */
784 if (bank->level_mask & irq_mask) {
785 _set_gpio_irqenable(bank, gpio, 0);
786 _clear_gpio_irqstatus(bank, gpio);
787 }
788
789 _set_gpio_irqenable(bank, gpio, 1);
790 spin_unlock_irqrestore(&bank->lock, flags);
791 }
792
793 static struct irq_chip gpio_irq_chip = {
794 .name = "GPIO",
795 .irq_shutdown = gpio_irq_shutdown,
796 .irq_ack = gpio_ack_irq,
797 .irq_mask = gpio_mask_irq,
798 .irq_unmask = gpio_unmask_irq,
799 .irq_set_type = gpio_irq_type,
800 .irq_set_wake = gpio_wake_enable,
801 };
802
803 /*---------------------------------------------------------------------*/
804
805 #ifdef CONFIG_ARCH_OMAP1
806
807 #define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
808
809 #ifdef CONFIG_ARCH_OMAP16XX
810
811 #include <linux/platform_device.h>
812
813 static int omap_mpuio_suspend_noirq(struct device *dev)
814 {
815 struct platform_device *pdev = to_platform_device(dev);
816 struct gpio_bank *bank = platform_get_drvdata(pdev);
817 void __iomem *mask_reg = bank->base +
818 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
819 unsigned long flags;
820
821 spin_lock_irqsave(&bank->lock, flags);
822 bank->saved_wakeup = __raw_readl(mask_reg);
823 __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
824 spin_unlock_irqrestore(&bank->lock, flags);
825
826 return 0;
827 }
828
829 static int omap_mpuio_resume_noirq(struct device *dev)
830 {
831 struct platform_device *pdev = to_platform_device(dev);
832 struct gpio_bank *bank = platform_get_drvdata(pdev);
833 void __iomem *mask_reg = bank->base +
834 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
835 unsigned long flags;
836
837 spin_lock_irqsave(&bank->lock, flags);
838 __raw_writel(bank->saved_wakeup, mask_reg);
839 spin_unlock_irqrestore(&bank->lock, flags);
840
841 return 0;
842 }
843
844 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
845 .suspend_noirq = omap_mpuio_suspend_noirq,
846 .resume_noirq = omap_mpuio_resume_noirq,
847 };
848
849 /* use platform_driver for this. */
850 static struct platform_driver omap_mpuio_driver = {
851 .driver = {
852 .name = "mpuio",
853 .pm = &omap_mpuio_dev_pm_ops,
854 },
855 };
856
857 static struct platform_device omap_mpuio_device = {
858 .name = "mpuio",
859 .id = -1,
860 .dev = {
861 .driver = &omap_mpuio_driver.driver,
862 }
863 /* could list the /proc/iomem resources */
864 };
865
866 static inline void mpuio_init(struct gpio_bank *bank)
867 {
868 platform_set_drvdata(&omap_mpuio_device, bank);
869
870 if (platform_driver_register(&omap_mpuio_driver) == 0)
871 (void) platform_device_register(&omap_mpuio_device);
872 }
873
874 #else
875 static inline void mpuio_init(struct gpio_bank *bank) {}
876 #endif /* 16xx */
877
878 #else
879
880 #define bank_is_mpuio(bank) 0
881 static inline void mpuio_init(struct gpio_bank *bank) {}
882
883 #endif
884
885 /*---------------------------------------------------------------------*/
886
887 /* REVISIT these are stupid implementations! replace by ones that
888 * don't switch on METHOD_* and which mostly avoid spinlocks
889 */
890
891 static int gpio_input(struct gpio_chip *chip, unsigned offset)
892 {
893 struct gpio_bank *bank;
894 unsigned long flags;
895
896 bank = container_of(chip, struct gpio_bank, chip);
897 spin_lock_irqsave(&bank->lock, flags);
898 _set_gpio_direction(bank, offset, 1);
899 spin_unlock_irqrestore(&bank->lock, flags);
900 return 0;
901 }
902
903 static int gpio_is_input(struct gpio_bank *bank, int mask)
904 {
905 void __iomem *reg = bank->base + bank->regs->direction;
906
907 return __raw_readl(reg) & mask;
908 }
909
910 static int gpio_get(struct gpio_chip *chip, unsigned offset)
911 {
912 struct gpio_bank *bank;
913 void __iomem *reg;
914 int gpio;
915 u32 mask;
916
917 gpio = chip->base + offset;
918 bank = container_of(chip, struct gpio_bank, chip);
919 reg = bank->base;
920 mask = GPIO_BIT(bank, gpio);
921
922 if (gpio_is_input(bank, mask))
923 return _get_gpio_datain(bank, gpio);
924 else
925 return _get_gpio_dataout(bank, gpio);
926 }
927
928 static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
929 {
930 struct gpio_bank *bank;
931 unsigned long flags;
932
933 bank = container_of(chip, struct gpio_bank, chip);
934 spin_lock_irqsave(&bank->lock, flags);
935 bank->set_dataout(bank, offset, value);
936 _set_gpio_direction(bank, offset, 0);
937 spin_unlock_irqrestore(&bank->lock, flags);
938 return 0;
939 }
940
941 static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
942 unsigned debounce)
943 {
944 struct gpio_bank *bank;
945 unsigned long flags;
946
947 bank = container_of(chip, struct gpio_bank, chip);
948
949 if (!bank->dbck) {
950 bank->dbck = clk_get(bank->dev, "dbclk");
951 if (IS_ERR(bank->dbck))
952 dev_err(bank->dev, "Could not get gpio dbck\n");
953 }
954
955 spin_lock_irqsave(&bank->lock, flags);
956 _set_gpio_debounce(bank, offset, debounce);
957 spin_unlock_irqrestore(&bank->lock, flags);
958
959 return 0;
960 }
961
962 static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
963 {
964 struct gpio_bank *bank;
965 unsigned long flags;
966
967 bank = container_of(chip, struct gpio_bank, chip);
968 spin_lock_irqsave(&bank->lock, flags);
969 bank->set_dataout(bank, offset, value);
970 spin_unlock_irqrestore(&bank->lock, flags);
971 }
972
973 static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
974 {
975 struct gpio_bank *bank;
976
977 bank = container_of(chip, struct gpio_bank, chip);
978 return bank->virtual_irq_start + offset;
979 }
980
981 /*---------------------------------------------------------------------*/
982
983 static void __init omap_gpio_show_rev(struct gpio_bank *bank)
984 {
985 static bool called;
986 u32 rev;
987
988 if (called || bank->regs->revision == USHRT_MAX)
989 return;
990
991 rev = __raw_readw(bank->base + bank->regs->revision);
992 pr_info("OMAP GPIO hardware version %d.%d\n",
993 (rev >> 4) & 0x0f, rev & 0x0f);
994
995 called = true;
996 }
997
998 /* This lock class tells lockdep that GPIO irqs are in a different
999 * category than their parents, so it won't report false recursion.
1000 */
1001 static struct lock_class_key gpio_lock_class;
1002
1003 /* TODO: Cleanup cpu_is_* checks */
1004 static void omap_gpio_mod_init(struct gpio_bank *bank)
1005 {
1006 if (cpu_class_is_omap2()) {
1007 if (cpu_is_omap44xx()) {
1008 __raw_writel(0xffffffff, bank->base +
1009 OMAP4_GPIO_IRQSTATUSCLR0);
1010 __raw_writel(0x00000000, bank->base +
1011 OMAP4_GPIO_DEBOUNCENABLE);
1012 /* Initialize interface clk ungated, module enabled */
1013 __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
1014 } else if (cpu_is_omap34xx()) {
1015 __raw_writel(0x00000000, bank->base +
1016 OMAP24XX_GPIO_IRQENABLE1);
1017 __raw_writel(0xffffffff, bank->base +
1018 OMAP24XX_GPIO_IRQSTATUS1);
1019 __raw_writel(0x00000000, bank->base +
1020 OMAP24XX_GPIO_DEBOUNCE_EN);
1021
1022 /* Initialize interface clk ungated, module enabled */
1023 __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
1024 }
1025 } else if (cpu_class_is_omap1()) {
1026 if (bank_is_mpuio(bank)) {
1027 __raw_writew(0xffff, bank->base +
1028 OMAP_MPUIO_GPIO_MASKIT / bank->stride);
1029 mpuio_init(bank);
1030 }
1031 if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
1032 __raw_writew(0xffff, bank->base
1033 + OMAP1510_GPIO_INT_MASK);
1034 __raw_writew(0x0000, bank->base
1035 + OMAP1510_GPIO_INT_STATUS);
1036 }
1037 if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
1038 __raw_writew(0x0000, bank->base
1039 + OMAP1610_GPIO_IRQENABLE1);
1040 __raw_writew(0xffff, bank->base
1041 + OMAP1610_GPIO_IRQSTATUS1);
1042 __raw_writew(0x0014, bank->base
1043 + OMAP1610_GPIO_SYSCONFIG);
1044
1045 /*
1046 * Enable system clock for GPIO module.
1047 * The CAM_CLK_CTRL *is* really the right place.
1048 */
1049 omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
1050 ULPD_CAM_CLK_CTRL);
1051 }
1052 if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
1053 __raw_writel(0xffffffff, bank->base
1054 + OMAP7XX_GPIO_INT_MASK);
1055 __raw_writel(0x00000000, bank->base
1056 + OMAP7XX_GPIO_INT_STATUS);
1057 }
1058 }
1059 }
1060
1061 static __init void
1062 omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1063 unsigned int num)
1064 {
1065 struct irq_chip_generic *gc;
1066 struct irq_chip_type *ct;
1067
1068 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1069 handle_simple_irq);
1070 if (!gc) {
1071 dev_err(bank->dev, "Memory alloc failed for gc\n");
1072 return;
1073 }
1074
1075 ct = gc->chip_types;
1076
1077 /* NOTE: No ack required, reading IRQ status clears it. */
1078 ct->chip.irq_mask = irq_gc_mask_set_bit;
1079 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1080 ct->chip.irq_set_type = gpio_irq_type;
1081 /* REVISIT: assuming only 16xx supports MPUIO wake events */
1082 if (cpu_is_omap16xx())
1083 ct->chip.irq_set_wake = gpio_wake_enable,
1084
1085 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1086 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1087 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1088 }
1089
1090 static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1091 {
1092 int j;
1093 static int gpio;
1094
1095 bank->mod_usage = 0;
1096 /*
1097 * REVISIT eventually switch from OMAP-specific gpio structs
1098 * over to the generic ones
1099 */
1100 bank->chip.request = omap_gpio_request;
1101 bank->chip.free = omap_gpio_free;
1102 bank->chip.direction_input = gpio_input;
1103 bank->chip.get = gpio_get;
1104 bank->chip.direction_output = gpio_output;
1105 bank->chip.set_debounce = gpio_debounce;
1106 bank->chip.set = gpio_set;
1107 bank->chip.to_irq = gpio_2irq;
1108 if (bank_is_mpuio(bank)) {
1109 bank->chip.label = "mpuio";
1110 #ifdef CONFIG_ARCH_OMAP16XX
1111 bank->chip.dev = &omap_mpuio_device.dev;
1112 #endif
1113 bank->chip.base = OMAP_MPUIO(0);
1114 } else {
1115 bank->chip.label = "gpio";
1116 bank->chip.base = gpio;
1117 gpio += bank->width;
1118 }
1119 bank->chip.ngpio = bank->width;
1120
1121 gpiochip_add(&bank->chip);
1122
1123 for (j = bank->virtual_irq_start;
1124 j < bank->virtual_irq_start + bank->width; j++) {
1125 irq_set_lockdep_class(j, &gpio_lock_class);
1126 irq_set_chip_data(j, bank);
1127 if (bank_is_mpuio(bank)) {
1128 omap_mpuio_alloc_gc(bank, j, bank->width);
1129 } else {
1130 irq_set_chip(j, &gpio_irq_chip);
1131 irq_set_handler(j, handle_simple_irq);
1132 set_irq_flags(j, IRQF_VALID);
1133 }
1134 }
1135 irq_set_chained_handler(bank->irq, gpio_irq_handler);
1136 irq_set_handler_data(bank->irq, bank);
1137 }
1138
1139 static int __devinit omap_gpio_probe(struct platform_device *pdev)
1140 {
1141 struct omap_gpio_platform_data *pdata;
1142 struct resource *res;
1143 struct gpio_bank *bank;
1144 int ret = 0;
1145
1146 if (!pdev->dev.platform_data) {
1147 ret = -EINVAL;
1148 goto err_exit;
1149 }
1150
1151 bank = kzalloc(sizeof(struct gpio_bank), GFP_KERNEL);
1152 if (!bank) {
1153 dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
1154 ret = -ENOMEM;
1155 goto err_exit;
1156 }
1157
1158 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1159 if (unlikely(!res)) {
1160 dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n",
1161 pdev->id);
1162 ret = -ENODEV;
1163 goto err_free;
1164 }
1165
1166 bank->irq = res->start;
1167 bank->id = pdev->id;
1168
1169 pdata = pdev->dev.platform_data;
1170 bank->virtual_irq_start = pdata->virtual_irq_start;
1171 bank->method = pdata->bank_type;
1172 bank->dev = &pdev->dev;
1173 bank->dbck_flag = pdata->dbck_flag;
1174 bank->stride = pdata->bank_stride;
1175 bank->width = pdata->bank_width;
1176 bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1177 bank->loses_context = pdata->loses_context;
1178 bank->get_context_loss_count = pdata->get_context_loss_count;
1179 bank->regs = pdata->regs;
1180
1181 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1182 bank->set_dataout = _set_gpio_dataout_reg;
1183 else
1184 bank->set_dataout = _set_gpio_dataout_mask;
1185
1186 spin_lock_init(&bank->lock);
1187
1188 /* Static mapping, never released */
1189 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1190 if (unlikely(!res)) {
1191 dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n",
1192 pdev->id);
1193 ret = -ENODEV;
1194 goto err_free;
1195 }
1196
1197 bank->base = ioremap(res->start, resource_size(res));
1198 if (!bank->base) {
1199 dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n",
1200 pdev->id);
1201 ret = -ENOMEM;
1202 goto err_free;
1203 }
1204
1205 pm_runtime_enable(bank->dev);
1206 pm_runtime_get_sync(bank->dev);
1207
1208 omap_gpio_mod_init(bank);
1209 omap_gpio_chip_init(bank);
1210 omap_gpio_show_rev(bank);
1211
1212 list_add_tail(&bank->node, &omap_gpio_list);
1213
1214 return ret;
1215
1216 err_free:
1217 kfree(bank);
1218 err_exit:
1219 return ret;
1220 }
1221
1222 #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
1223 static int omap_gpio_suspend(void)
1224 {
1225 struct gpio_bank *bank;
1226
1227 if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
1228 return 0;
1229
1230 list_for_each_entry(bank, &omap_gpio_list, node) {
1231 void __iomem *wake_status;
1232 void __iomem *wake_clear;
1233 void __iomem *wake_set;
1234 unsigned long flags;
1235
1236 switch (bank->method) {
1237 #ifdef CONFIG_ARCH_OMAP16XX
1238 case METHOD_GPIO_1610:
1239 wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
1240 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1241 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1242 break;
1243 #endif
1244 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1245 case METHOD_GPIO_24XX:
1246 wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
1247 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1248 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1249 break;
1250 #endif
1251 #ifdef CONFIG_ARCH_OMAP4
1252 case METHOD_GPIO_44XX:
1253 wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
1254 wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
1255 wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
1256 break;
1257 #endif
1258 default:
1259 continue;
1260 }
1261
1262 spin_lock_irqsave(&bank->lock, flags);
1263 bank->saved_wakeup = __raw_readl(wake_status);
1264 __raw_writel(0xffffffff, wake_clear);
1265 __raw_writel(bank->suspend_wakeup, wake_set);
1266 spin_unlock_irqrestore(&bank->lock, flags);
1267 }
1268
1269 return 0;
1270 }
1271
1272 static void omap_gpio_resume(void)
1273 {
1274 struct gpio_bank *bank;
1275
1276 if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
1277 return;
1278
1279 list_for_each_entry(bank, &omap_gpio_list, node) {
1280 void __iomem *wake_clear;
1281 void __iomem *wake_set;
1282 unsigned long flags;
1283
1284 switch (bank->method) {
1285 #ifdef CONFIG_ARCH_OMAP16XX
1286 case METHOD_GPIO_1610:
1287 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1288 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1289 break;
1290 #endif
1291 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
1292 case METHOD_GPIO_24XX:
1293 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
1294 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
1295 break;
1296 #endif
1297 #ifdef CONFIG_ARCH_OMAP4
1298 case METHOD_GPIO_44XX:
1299 wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
1300 wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
1301 break;
1302 #endif
1303 default:
1304 continue;
1305 }
1306
1307 spin_lock_irqsave(&bank->lock, flags);
1308 __raw_writel(0xffffffff, wake_clear);
1309 __raw_writel(bank->saved_wakeup, wake_set);
1310 spin_unlock_irqrestore(&bank->lock, flags);
1311 }
1312 }
1313
1314 static struct syscore_ops omap_gpio_syscore_ops = {
1315 .suspend = omap_gpio_suspend,
1316 .resume = omap_gpio_resume,
1317 };
1318
1319 #endif
1320
1321 #ifdef CONFIG_ARCH_OMAP2PLUS
1322
1323 static void omap_gpio_save_context(struct gpio_bank *bank);
1324 static void omap_gpio_restore_context(struct gpio_bank *bank);
1325
1326 void omap2_gpio_prepare_for_idle(int off_mode)
1327 {
1328 struct gpio_bank *bank;
1329
1330 list_for_each_entry(bank, &omap_gpio_list, node) {
1331 u32 l1 = 0, l2 = 0;
1332 int j;
1333
1334 if (!bank->loses_context)
1335 continue;
1336
1337 for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
1338 clk_disable(bank->dbck);
1339
1340 if (!off_mode)
1341 continue;
1342
1343 /* If going to OFF, remove triggering for all
1344 * non-wakeup GPIOs. Otherwise spurious IRQs will be
1345 * generated. See OMAP2420 Errata item 1.101. */
1346 if (!(bank->enabled_non_wakeup_gpios))
1347 goto save_gpio_context;
1348
1349 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1350 bank->saved_datain = __raw_readl(bank->base +
1351 OMAP24XX_GPIO_DATAIN);
1352 l1 = __raw_readl(bank->base +
1353 OMAP24XX_GPIO_FALLINGDETECT);
1354 l2 = __raw_readl(bank->base +
1355 OMAP24XX_GPIO_RISINGDETECT);
1356 }
1357
1358 if (cpu_is_omap44xx()) {
1359 bank->saved_datain = __raw_readl(bank->base +
1360 OMAP4_GPIO_DATAIN);
1361 l1 = __raw_readl(bank->base +
1362 OMAP4_GPIO_FALLINGDETECT);
1363 l2 = __raw_readl(bank->base +
1364 OMAP4_GPIO_RISINGDETECT);
1365 }
1366
1367 bank->saved_fallingdetect = l1;
1368 bank->saved_risingdetect = l2;
1369 l1 &= ~bank->enabled_non_wakeup_gpios;
1370 l2 &= ~bank->enabled_non_wakeup_gpios;
1371
1372 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1373 __raw_writel(l1, bank->base +
1374 OMAP24XX_GPIO_FALLINGDETECT);
1375 __raw_writel(l2, bank->base +
1376 OMAP24XX_GPIO_RISINGDETECT);
1377 }
1378
1379 if (cpu_is_omap44xx()) {
1380 __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
1381 __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
1382 }
1383
1384 save_gpio_context:
1385 if (bank->get_context_loss_count)
1386 bank->context_loss_count =
1387 bank->get_context_loss_count(bank->dev);
1388
1389 omap_gpio_save_context(bank);
1390 }
1391 }
1392
1393 void omap2_gpio_resume_after_idle(void)
1394 {
1395 struct gpio_bank *bank;
1396
1397 list_for_each_entry(bank, &omap_gpio_list, node) {
1398 int context_lost_cnt_after;
1399 u32 l = 0, gen, gen0, gen1;
1400 int j;
1401
1402 if (!bank->loses_context)
1403 continue;
1404
1405 for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
1406 clk_enable(bank->dbck);
1407
1408 if (bank->get_context_loss_count) {
1409 context_lost_cnt_after =
1410 bank->get_context_loss_count(bank->dev);
1411 if (context_lost_cnt_after != bank->context_loss_count
1412 || !context_lost_cnt_after)
1413 omap_gpio_restore_context(bank);
1414 }
1415
1416 if (!(bank->enabled_non_wakeup_gpios))
1417 continue;
1418
1419 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1420 __raw_writel(bank->saved_fallingdetect,
1421 bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1422 __raw_writel(bank->saved_risingdetect,
1423 bank->base + OMAP24XX_GPIO_RISINGDETECT);
1424 l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
1425 }
1426
1427 if (cpu_is_omap44xx()) {
1428 __raw_writel(bank->saved_fallingdetect,
1429 bank->base + OMAP4_GPIO_FALLINGDETECT);
1430 __raw_writel(bank->saved_risingdetect,
1431 bank->base + OMAP4_GPIO_RISINGDETECT);
1432 l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
1433 }
1434
1435 /* Check if any of the non-wakeup interrupt GPIOs have changed
1436 * state. If so, generate an IRQ by software. This is
1437 * horribly racy, but it's the best we can do to work around
1438 * this silicon bug. */
1439 l ^= bank->saved_datain;
1440 l &= bank->enabled_non_wakeup_gpios;
1441
1442 /*
1443 * No need to generate IRQs for the rising edge for gpio IRQs
1444 * configured with falling edge only; and vice versa.
1445 */
1446 gen0 = l & bank->saved_fallingdetect;
1447 gen0 &= bank->saved_datain;
1448
1449 gen1 = l & bank->saved_risingdetect;
1450 gen1 &= ~(bank->saved_datain);
1451
1452 /* FIXME: Consider GPIO IRQs with level detections properly! */
1453 gen = l & (~(bank->saved_fallingdetect) &
1454 ~(bank->saved_risingdetect));
1455 /* Consider all GPIO IRQs needed to be updated */
1456 gen |= gen0 | gen1;
1457
1458 if (gen) {
1459 u32 old0, old1;
1460
1461 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1462 old0 = __raw_readl(bank->base +
1463 OMAP24XX_GPIO_LEVELDETECT0);
1464 old1 = __raw_readl(bank->base +
1465 OMAP24XX_GPIO_LEVELDETECT1);
1466 __raw_writel(old0 | gen, bank->base +
1467 OMAP24XX_GPIO_LEVELDETECT0);
1468 __raw_writel(old1 | gen, bank->base +
1469 OMAP24XX_GPIO_LEVELDETECT1);
1470 __raw_writel(old0, bank->base +
1471 OMAP24XX_GPIO_LEVELDETECT0);
1472 __raw_writel(old1, bank->base +
1473 OMAP24XX_GPIO_LEVELDETECT1);
1474 }
1475
1476 if (cpu_is_omap44xx()) {
1477 old0 = __raw_readl(bank->base +
1478 OMAP4_GPIO_LEVELDETECT0);
1479 old1 = __raw_readl(bank->base +
1480 OMAP4_GPIO_LEVELDETECT1);
1481 __raw_writel(old0 | l, bank->base +
1482 OMAP4_GPIO_LEVELDETECT0);
1483 __raw_writel(old1 | l, bank->base +
1484 OMAP4_GPIO_LEVELDETECT1);
1485 __raw_writel(old0, bank->base +
1486 OMAP4_GPIO_LEVELDETECT0);
1487 __raw_writel(old1, bank->base +
1488 OMAP4_GPIO_LEVELDETECT1);
1489 }
1490 }
1491 }
1492 }
1493
1494 static void omap_gpio_save_context(struct gpio_bank *bank)
1495 {
1496 bank->context.irqenable1 =
1497 __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
1498 bank->context.irqenable2 =
1499 __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
1500 bank->context.wake_en =
1501 __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
1502 bank->context.ctrl = __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
1503 bank->context.oe = __raw_readl(bank->base + OMAP24XX_GPIO_OE);
1504 bank->context.leveldetect0 =
1505 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
1506 bank->context.leveldetect1 =
1507 __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
1508 bank->context.risingdetect =
1509 __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
1510 bank->context.fallingdetect =
1511 __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1512 bank->context.dataout =
1513 __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
1514 }
1515
1516 static void omap_gpio_restore_context(struct gpio_bank *bank)
1517 {
1518 __raw_writel(bank->context.irqenable1,
1519 bank->base + OMAP24XX_GPIO_IRQENABLE1);
1520 __raw_writel(bank->context.irqenable2,
1521 bank->base + OMAP24XX_GPIO_IRQENABLE2);
1522 __raw_writel(bank->context.wake_en,
1523 bank->base + OMAP24XX_GPIO_WAKE_EN);
1524 __raw_writel(bank->context.ctrl, bank->base + OMAP24XX_GPIO_CTRL);
1525 __raw_writel(bank->context.oe, bank->base + OMAP24XX_GPIO_OE);
1526 __raw_writel(bank->context.leveldetect0,
1527 bank->base + OMAP24XX_GPIO_LEVELDETECT0);
1528 __raw_writel(bank->context.leveldetect1,
1529 bank->base + OMAP24XX_GPIO_LEVELDETECT1);
1530 __raw_writel(bank->context.risingdetect,
1531 bank->base + OMAP24XX_GPIO_RISINGDETECT);
1532 __raw_writel(bank->context.fallingdetect,
1533 bank->base + OMAP24XX_GPIO_FALLINGDETECT);
1534 __raw_writel(bank->context.dataout,
1535 bank->base + OMAP24XX_GPIO_DATAOUT);
1536 }
1537 #endif
1538
1539 static struct platform_driver omap_gpio_driver = {
1540 .probe = omap_gpio_probe,
1541 .driver = {
1542 .name = "omap_gpio",
1543 },
1544 };
1545
1546 /*
1547 * gpio driver register needs to be done before
1548 * machine_init functions access gpio APIs.
1549 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1550 */
1551 static int __init omap_gpio_drv_reg(void)
1552 {
1553 return platform_driver_register(&omap_gpio_driver);
1554 }
1555 postcore_initcall(omap_gpio_drv_reg);
1556
1557 static int __init omap_gpio_sysinit(void)
1558 {
1559
1560 #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
1561 if (cpu_is_omap16xx() || cpu_class_is_omap2())
1562 register_syscore_ops(&omap_gpio_syscore_ops);
1563 #endif
1564
1565 return 0;
1566 }
1567
1568 arch_initcall(omap_gpio_sysinit);
This page took 0.102508 seconds and 6 git commands to generate.