Merge tag 'nfs-for-4.3-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[deliverable/linux.git] / drivers / irqchip / irq-gic-v3-its.c
1 /*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/bitmap.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/log2.h>
23 #include <linux/mm.h>
24 #include <linux/msi.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_pci.h>
29 #include <linux/of_platform.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35
36 #include <asm/cacheflush.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
39
40 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
41
42 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
43
44 /*
45 * Collection structure - just an ID, and a redistributor address to
46 * ping. We use one per CPU as a bag of interrupts assigned to this
47 * CPU.
48 */
49 struct its_collection {
50 u64 target_address;
51 u16 col_id;
52 };
53
54 /*
55 * The ITS structure - contains most of the infrastructure, with the
56 * top-level MSI domain, the command queue, the collections, and the
57 * list of devices writing to it.
58 */
59 struct its_node {
60 raw_spinlock_t lock;
61 struct list_head entry;
62 void __iomem *base;
63 unsigned long phys_base;
64 struct its_cmd_block *cmd_base;
65 struct its_cmd_block *cmd_write;
66 void *tables[GITS_BASER_NR_REGS];
67 struct its_collection *collections;
68 struct list_head its_device_list;
69 u64 flags;
70 u32 ite_size;
71 };
72
73 #define ITS_ITT_ALIGN SZ_256
74
75 struct event_lpi_map {
76 unsigned long *lpi_map;
77 u16 *col_map;
78 irq_hw_number_t lpi_base;
79 int nr_lpis;
80 };
81
82 /*
83 * The ITS view of a device - belongs to an ITS, a collection, owns an
84 * interrupt translation table, and a list of interrupts.
85 */
86 struct its_device {
87 struct list_head entry;
88 struct its_node *its;
89 struct event_lpi_map event_map;
90 void *itt;
91 u32 nr_ites;
92 u32 device_id;
93 };
94
95 static LIST_HEAD(its_nodes);
96 static DEFINE_SPINLOCK(its_lock);
97 static struct device_node *gic_root_node;
98 static struct rdists *gic_rdists;
99
100 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
101 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
102
103 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
104 u32 event)
105 {
106 struct its_node *its = its_dev->its;
107
108 return its->collections + its_dev->event_map.col_map[event];
109 }
110
111 /*
112 * ITS command descriptors - parameters to be encoded in a command
113 * block.
114 */
115 struct its_cmd_desc {
116 union {
117 struct {
118 struct its_device *dev;
119 u32 event_id;
120 } its_inv_cmd;
121
122 struct {
123 struct its_device *dev;
124 u32 event_id;
125 } its_int_cmd;
126
127 struct {
128 struct its_device *dev;
129 int valid;
130 } its_mapd_cmd;
131
132 struct {
133 struct its_collection *col;
134 int valid;
135 } its_mapc_cmd;
136
137 struct {
138 struct its_device *dev;
139 u32 phys_id;
140 u32 event_id;
141 } its_mapvi_cmd;
142
143 struct {
144 struct its_device *dev;
145 struct its_collection *col;
146 u32 event_id;
147 } its_movi_cmd;
148
149 struct {
150 struct its_device *dev;
151 u32 event_id;
152 } its_discard_cmd;
153
154 struct {
155 struct its_collection *col;
156 } its_invall_cmd;
157 };
158 };
159
160 /*
161 * The ITS command block, which is what the ITS actually parses.
162 */
163 struct its_cmd_block {
164 u64 raw_cmd[4];
165 };
166
167 #define ITS_CMD_QUEUE_SZ SZ_64K
168 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
169
170 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
171 struct its_cmd_desc *);
172
173 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
174 {
175 cmd->raw_cmd[0] &= ~0xffUL;
176 cmd->raw_cmd[0] |= cmd_nr;
177 }
178
179 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
180 {
181 cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
182 cmd->raw_cmd[0] |= ((u64)devid) << 32;
183 }
184
185 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
186 {
187 cmd->raw_cmd[1] &= ~0xffffffffUL;
188 cmd->raw_cmd[1] |= id;
189 }
190
191 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
192 {
193 cmd->raw_cmd[1] &= 0xffffffffUL;
194 cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
195 }
196
197 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
198 {
199 cmd->raw_cmd[1] &= ~0x1fUL;
200 cmd->raw_cmd[1] |= size & 0x1f;
201 }
202
203 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
204 {
205 cmd->raw_cmd[2] &= ~0xffffffffffffUL;
206 cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
207 }
208
209 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
210 {
211 cmd->raw_cmd[2] &= ~(1UL << 63);
212 cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
213 }
214
215 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
216 {
217 cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
218 cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
219 }
220
221 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
222 {
223 cmd->raw_cmd[2] &= ~0xffffUL;
224 cmd->raw_cmd[2] |= col;
225 }
226
227 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
228 {
229 /* Let's fixup BE commands */
230 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
231 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
232 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
233 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
234 }
235
236 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
237 struct its_cmd_desc *desc)
238 {
239 unsigned long itt_addr;
240 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
241
242 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
243 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
244
245 its_encode_cmd(cmd, GITS_CMD_MAPD);
246 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
247 its_encode_size(cmd, size - 1);
248 its_encode_itt(cmd, itt_addr);
249 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
250
251 its_fixup_cmd(cmd);
252
253 return NULL;
254 }
255
256 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
257 struct its_cmd_desc *desc)
258 {
259 its_encode_cmd(cmd, GITS_CMD_MAPC);
260 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
261 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
262 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
263
264 its_fixup_cmd(cmd);
265
266 return desc->its_mapc_cmd.col;
267 }
268
269 static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
270 struct its_cmd_desc *desc)
271 {
272 struct its_collection *col;
273
274 col = dev_event_to_col(desc->its_mapvi_cmd.dev,
275 desc->its_mapvi_cmd.event_id);
276
277 its_encode_cmd(cmd, GITS_CMD_MAPVI);
278 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
279 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
280 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
281 its_encode_collection(cmd, col->col_id);
282
283 its_fixup_cmd(cmd);
284
285 return col;
286 }
287
288 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
289 struct its_cmd_desc *desc)
290 {
291 struct its_collection *col;
292
293 col = dev_event_to_col(desc->its_movi_cmd.dev,
294 desc->its_movi_cmd.event_id);
295
296 its_encode_cmd(cmd, GITS_CMD_MOVI);
297 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
298 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
299 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
300
301 its_fixup_cmd(cmd);
302
303 return col;
304 }
305
306 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
307 struct its_cmd_desc *desc)
308 {
309 struct its_collection *col;
310
311 col = dev_event_to_col(desc->its_discard_cmd.dev,
312 desc->its_discard_cmd.event_id);
313
314 its_encode_cmd(cmd, GITS_CMD_DISCARD);
315 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
316 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
317
318 its_fixup_cmd(cmd);
319
320 return col;
321 }
322
323 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
324 struct its_cmd_desc *desc)
325 {
326 struct its_collection *col;
327
328 col = dev_event_to_col(desc->its_inv_cmd.dev,
329 desc->its_inv_cmd.event_id);
330
331 its_encode_cmd(cmd, GITS_CMD_INV);
332 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
333 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
334
335 its_fixup_cmd(cmd);
336
337 return col;
338 }
339
340 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
341 struct its_cmd_desc *desc)
342 {
343 its_encode_cmd(cmd, GITS_CMD_INVALL);
344 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
345
346 its_fixup_cmd(cmd);
347
348 return NULL;
349 }
350
351 static u64 its_cmd_ptr_to_offset(struct its_node *its,
352 struct its_cmd_block *ptr)
353 {
354 return (ptr - its->cmd_base) * sizeof(*ptr);
355 }
356
357 static int its_queue_full(struct its_node *its)
358 {
359 int widx;
360 int ridx;
361
362 widx = its->cmd_write - its->cmd_base;
363 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
364
365 /* This is incredibly unlikely to happen, unless the ITS locks up. */
366 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
367 return 1;
368
369 return 0;
370 }
371
372 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
373 {
374 struct its_cmd_block *cmd;
375 u32 count = 1000000; /* 1s! */
376
377 while (its_queue_full(its)) {
378 count--;
379 if (!count) {
380 pr_err_ratelimited("ITS queue not draining\n");
381 return NULL;
382 }
383 cpu_relax();
384 udelay(1);
385 }
386
387 cmd = its->cmd_write++;
388
389 /* Handle queue wrapping */
390 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
391 its->cmd_write = its->cmd_base;
392
393 return cmd;
394 }
395
396 static struct its_cmd_block *its_post_commands(struct its_node *its)
397 {
398 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
399
400 writel_relaxed(wr, its->base + GITS_CWRITER);
401
402 return its->cmd_write;
403 }
404
405 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
406 {
407 /*
408 * Make sure the commands written to memory are observable by
409 * the ITS.
410 */
411 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
412 __flush_dcache_area(cmd, sizeof(*cmd));
413 else
414 dsb(ishst);
415 }
416
417 static void its_wait_for_range_completion(struct its_node *its,
418 struct its_cmd_block *from,
419 struct its_cmd_block *to)
420 {
421 u64 rd_idx, from_idx, to_idx;
422 u32 count = 1000000; /* 1s! */
423
424 from_idx = its_cmd_ptr_to_offset(its, from);
425 to_idx = its_cmd_ptr_to_offset(its, to);
426
427 while (1) {
428 rd_idx = readl_relaxed(its->base + GITS_CREADR);
429 if (rd_idx >= to_idx || rd_idx < from_idx)
430 break;
431
432 count--;
433 if (!count) {
434 pr_err_ratelimited("ITS queue timeout\n");
435 return;
436 }
437 cpu_relax();
438 udelay(1);
439 }
440 }
441
442 static void its_send_single_command(struct its_node *its,
443 its_cmd_builder_t builder,
444 struct its_cmd_desc *desc)
445 {
446 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
447 struct its_collection *sync_col;
448 unsigned long flags;
449
450 raw_spin_lock_irqsave(&its->lock, flags);
451
452 cmd = its_allocate_entry(its);
453 if (!cmd) { /* We're soooooo screewed... */
454 pr_err_ratelimited("ITS can't allocate, dropping command\n");
455 raw_spin_unlock_irqrestore(&its->lock, flags);
456 return;
457 }
458 sync_col = builder(cmd, desc);
459 its_flush_cmd(its, cmd);
460
461 if (sync_col) {
462 sync_cmd = its_allocate_entry(its);
463 if (!sync_cmd) {
464 pr_err_ratelimited("ITS can't SYNC, skipping\n");
465 goto post;
466 }
467 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
468 its_encode_target(sync_cmd, sync_col->target_address);
469 its_fixup_cmd(sync_cmd);
470 its_flush_cmd(its, sync_cmd);
471 }
472
473 post:
474 next_cmd = its_post_commands(its);
475 raw_spin_unlock_irqrestore(&its->lock, flags);
476
477 its_wait_for_range_completion(its, cmd, next_cmd);
478 }
479
480 static void its_send_inv(struct its_device *dev, u32 event_id)
481 {
482 struct its_cmd_desc desc;
483
484 desc.its_inv_cmd.dev = dev;
485 desc.its_inv_cmd.event_id = event_id;
486
487 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
488 }
489
490 static void its_send_mapd(struct its_device *dev, int valid)
491 {
492 struct its_cmd_desc desc;
493
494 desc.its_mapd_cmd.dev = dev;
495 desc.its_mapd_cmd.valid = !!valid;
496
497 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
498 }
499
500 static void its_send_mapc(struct its_node *its, struct its_collection *col,
501 int valid)
502 {
503 struct its_cmd_desc desc;
504
505 desc.its_mapc_cmd.col = col;
506 desc.its_mapc_cmd.valid = !!valid;
507
508 its_send_single_command(its, its_build_mapc_cmd, &desc);
509 }
510
511 static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
512 {
513 struct its_cmd_desc desc;
514
515 desc.its_mapvi_cmd.dev = dev;
516 desc.its_mapvi_cmd.phys_id = irq_id;
517 desc.its_mapvi_cmd.event_id = id;
518
519 its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
520 }
521
522 static void its_send_movi(struct its_device *dev,
523 struct its_collection *col, u32 id)
524 {
525 struct its_cmd_desc desc;
526
527 desc.its_movi_cmd.dev = dev;
528 desc.its_movi_cmd.col = col;
529 desc.its_movi_cmd.event_id = id;
530
531 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
532 }
533
534 static void its_send_discard(struct its_device *dev, u32 id)
535 {
536 struct its_cmd_desc desc;
537
538 desc.its_discard_cmd.dev = dev;
539 desc.its_discard_cmd.event_id = id;
540
541 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
542 }
543
544 static void its_send_invall(struct its_node *its, struct its_collection *col)
545 {
546 struct its_cmd_desc desc;
547
548 desc.its_invall_cmd.col = col;
549
550 its_send_single_command(its, its_build_invall_cmd, &desc);
551 }
552
553 /*
554 * irqchip functions - assumes MSI, mostly.
555 */
556
557 static inline u32 its_get_event_id(struct irq_data *d)
558 {
559 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
560 return d->hwirq - its_dev->event_map.lpi_base;
561 }
562
563 static void lpi_set_config(struct irq_data *d, bool enable)
564 {
565 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
566 irq_hw_number_t hwirq = d->hwirq;
567 u32 id = its_get_event_id(d);
568 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
569
570 if (enable)
571 *cfg |= LPI_PROP_ENABLED;
572 else
573 *cfg &= ~LPI_PROP_ENABLED;
574
575 /*
576 * Make the above write visible to the redistributors.
577 * And yes, we're flushing exactly: One. Single. Byte.
578 * Humpf...
579 */
580 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
581 __flush_dcache_area(cfg, sizeof(*cfg));
582 else
583 dsb(ishst);
584 its_send_inv(its_dev, id);
585 }
586
587 static void its_mask_irq(struct irq_data *d)
588 {
589 lpi_set_config(d, false);
590 }
591
592 static void its_unmask_irq(struct irq_data *d)
593 {
594 lpi_set_config(d, true);
595 }
596
597 static void its_eoi_irq(struct irq_data *d)
598 {
599 gic_write_eoir(d->hwirq);
600 }
601
602 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
603 bool force)
604 {
605 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
606 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
607 struct its_collection *target_col;
608 u32 id = its_get_event_id(d);
609
610 if (cpu >= nr_cpu_ids)
611 return -EINVAL;
612
613 target_col = &its_dev->its->collections[cpu];
614 its_send_movi(its_dev, target_col, id);
615 its_dev->event_map.col_map[id] = cpu;
616
617 return IRQ_SET_MASK_OK_DONE;
618 }
619
620 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
621 {
622 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
623 struct its_node *its;
624 u64 addr;
625
626 its = its_dev->its;
627 addr = its->phys_base + GITS_TRANSLATER;
628
629 msg->address_lo = addr & ((1UL << 32) - 1);
630 msg->address_hi = addr >> 32;
631 msg->data = its_get_event_id(d);
632 }
633
634 static struct irq_chip its_irq_chip = {
635 .name = "ITS",
636 .irq_mask = its_mask_irq,
637 .irq_unmask = its_unmask_irq,
638 .irq_eoi = its_eoi_irq,
639 .irq_set_affinity = its_set_affinity,
640 .irq_compose_msi_msg = its_irq_compose_msi_msg,
641 };
642
643 /*
644 * How we allocate LPIs:
645 *
646 * The GIC has id_bits bits for interrupt identifiers. From there, we
647 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
648 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
649 * bits to the right.
650 *
651 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
652 */
653 #define IRQS_PER_CHUNK_SHIFT 5
654 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
655
656 static unsigned long *lpi_bitmap;
657 static u32 lpi_chunks;
658 static DEFINE_SPINLOCK(lpi_lock);
659
660 static int its_lpi_to_chunk(int lpi)
661 {
662 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
663 }
664
665 static int its_chunk_to_lpi(int chunk)
666 {
667 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
668 }
669
670 static int its_lpi_init(u32 id_bits)
671 {
672 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
673
674 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
675 GFP_KERNEL);
676 if (!lpi_bitmap) {
677 lpi_chunks = 0;
678 return -ENOMEM;
679 }
680
681 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
682 return 0;
683 }
684
685 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
686 {
687 unsigned long *bitmap = NULL;
688 int chunk_id;
689 int nr_chunks;
690 int i;
691
692 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
693
694 spin_lock(&lpi_lock);
695
696 do {
697 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
698 0, nr_chunks, 0);
699 if (chunk_id < lpi_chunks)
700 break;
701
702 nr_chunks--;
703 } while (nr_chunks > 0);
704
705 if (!nr_chunks)
706 goto out;
707
708 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
709 GFP_ATOMIC);
710 if (!bitmap)
711 goto out;
712
713 for (i = 0; i < nr_chunks; i++)
714 set_bit(chunk_id + i, lpi_bitmap);
715
716 *base = its_chunk_to_lpi(chunk_id);
717 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
718
719 out:
720 spin_unlock(&lpi_lock);
721
722 if (!bitmap)
723 *base = *nr_ids = 0;
724
725 return bitmap;
726 }
727
728 static void its_lpi_free(struct event_lpi_map *map)
729 {
730 int base = map->lpi_base;
731 int nr_ids = map->nr_lpis;
732 int lpi;
733
734 spin_lock(&lpi_lock);
735
736 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
737 int chunk = its_lpi_to_chunk(lpi);
738 BUG_ON(chunk > lpi_chunks);
739 if (test_bit(chunk, lpi_bitmap)) {
740 clear_bit(chunk, lpi_bitmap);
741 } else {
742 pr_err("Bad LPI chunk %d\n", chunk);
743 }
744 }
745
746 spin_unlock(&lpi_lock);
747
748 kfree(map->lpi_map);
749 kfree(map->col_map);
750 }
751
752 /*
753 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
754 * deal with (one configuration byte per interrupt). PENDBASE has to
755 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
756 */
757 #define LPI_PROPBASE_SZ SZ_64K
758 #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
759
760 /*
761 * This is how many bits of ID we need, including the useless ones.
762 */
763 #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
764
765 #define LPI_PROP_DEFAULT_PRIO 0xa0
766
767 static int __init its_alloc_lpi_tables(void)
768 {
769 phys_addr_t paddr;
770
771 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
772 get_order(LPI_PROPBASE_SZ));
773 if (!gic_rdists->prop_page) {
774 pr_err("Failed to allocate PROPBASE\n");
775 return -ENOMEM;
776 }
777
778 paddr = page_to_phys(gic_rdists->prop_page);
779 pr_info("GIC: using LPI property table @%pa\n", &paddr);
780
781 /* Priority 0xa0, Group-1, disabled */
782 memset(page_address(gic_rdists->prop_page),
783 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
784 LPI_PROPBASE_SZ);
785
786 /* Make sure the GIC will observe the written configuration */
787 __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
788
789 return 0;
790 }
791
792 static const char *its_base_type_string[] = {
793 [GITS_BASER_TYPE_DEVICE] = "Devices",
794 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
795 [GITS_BASER_TYPE_CPU] = "Physical CPUs",
796 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
797 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
798 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
799 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
800 };
801
802 static void its_free_tables(struct its_node *its)
803 {
804 int i;
805
806 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
807 if (its->tables[i]) {
808 free_page((unsigned long)its->tables[i]);
809 its->tables[i] = NULL;
810 }
811 }
812 }
813
814 static int its_alloc_tables(const char *node_name, struct its_node *its)
815 {
816 int err;
817 int i;
818 int psz = SZ_64K;
819 u64 shr = GITS_BASER_InnerShareable;
820 u64 cache = GITS_BASER_WaWb;
821
822 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
823 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
824 u64 type = GITS_BASER_TYPE(val);
825 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
826 int order = get_order(psz);
827 int alloc_size;
828 u64 tmp;
829 void *base;
830
831 if (type == GITS_BASER_TYPE_NONE)
832 continue;
833
834 /*
835 * Allocate as many entries as required to fit the
836 * range of device IDs that the ITS can grok... The ID
837 * space being incredibly sparse, this results in a
838 * massive waste of memory.
839 *
840 * For other tables, only allocate a single page.
841 */
842 if (type == GITS_BASER_TYPE_DEVICE) {
843 u64 typer = readq_relaxed(its->base + GITS_TYPER);
844 u32 ids = GITS_TYPER_DEVBITS(typer);
845
846 /*
847 * 'order' was initialized earlier to the default page
848 * granule of the the ITS. We can't have an allocation
849 * smaller than that. If the requested allocation
850 * is smaller, round up to the default page granule.
851 */
852 order = max(get_order((1UL << ids) * entry_size),
853 order);
854 if (order >= MAX_ORDER) {
855 order = MAX_ORDER - 1;
856 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
857 node_name, order);
858 }
859 }
860
861 alloc_size = (1 << order) * PAGE_SIZE;
862 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
863 if (!base) {
864 err = -ENOMEM;
865 goto out_free;
866 }
867
868 its->tables[i] = base;
869
870 retry_baser:
871 val = (virt_to_phys(base) |
872 (type << GITS_BASER_TYPE_SHIFT) |
873 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
874 cache |
875 shr |
876 GITS_BASER_VALID);
877
878 switch (psz) {
879 case SZ_4K:
880 val |= GITS_BASER_PAGE_SIZE_4K;
881 break;
882 case SZ_16K:
883 val |= GITS_BASER_PAGE_SIZE_16K;
884 break;
885 case SZ_64K:
886 val |= GITS_BASER_PAGE_SIZE_64K;
887 break;
888 }
889
890 val |= (alloc_size / psz) - 1;
891
892 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
893 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
894
895 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
896 /*
897 * Shareability didn't stick. Just use
898 * whatever the read reported, which is likely
899 * to be the only thing this redistributor
900 * supports. If that's zero, make it
901 * non-cacheable as well.
902 */
903 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
904 if (!shr) {
905 cache = GITS_BASER_nC;
906 __flush_dcache_area(base, alloc_size);
907 }
908 goto retry_baser;
909 }
910
911 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
912 /*
913 * Page size didn't stick. Let's try a smaller
914 * size and retry. If we reach 4K, then
915 * something is horribly wrong...
916 */
917 switch (psz) {
918 case SZ_16K:
919 psz = SZ_4K;
920 goto retry_baser;
921 case SZ_64K:
922 psz = SZ_16K;
923 goto retry_baser;
924 }
925 }
926
927 if (val != tmp) {
928 pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
929 node_name, i,
930 (unsigned long) val, (unsigned long) tmp);
931 err = -ENXIO;
932 goto out_free;
933 }
934
935 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
936 (int)(alloc_size / entry_size),
937 its_base_type_string[type],
938 (unsigned long)virt_to_phys(base),
939 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
940 }
941
942 return 0;
943
944 out_free:
945 its_free_tables(its);
946
947 return err;
948 }
949
950 static int its_alloc_collections(struct its_node *its)
951 {
952 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
953 GFP_KERNEL);
954 if (!its->collections)
955 return -ENOMEM;
956
957 return 0;
958 }
959
960 static void its_cpu_init_lpis(void)
961 {
962 void __iomem *rbase = gic_data_rdist_rd_base();
963 struct page *pend_page;
964 u64 val, tmp;
965
966 /* If we didn't allocate the pending table yet, do it now */
967 pend_page = gic_data_rdist()->pend_page;
968 if (!pend_page) {
969 phys_addr_t paddr;
970 /*
971 * The pending pages have to be at least 64kB aligned,
972 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
973 */
974 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
975 get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
976 if (!pend_page) {
977 pr_err("Failed to allocate PENDBASE for CPU%d\n",
978 smp_processor_id());
979 return;
980 }
981
982 /* Make sure the GIC will observe the zero-ed page */
983 __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
984
985 paddr = page_to_phys(pend_page);
986 pr_info("CPU%d: using LPI pending table @%pa\n",
987 smp_processor_id(), &paddr);
988 gic_data_rdist()->pend_page = pend_page;
989 }
990
991 /* Disable LPIs */
992 val = readl_relaxed(rbase + GICR_CTLR);
993 val &= ~GICR_CTLR_ENABLE_LPIS;
994 writel_relaxed(val, rbase + GICR_CTLR);
995
996 /*
997 * Make sure any change to the table is observable by the GIC.
998 */
999 dsb(sy);
1000
1001 /* set PROPBASE */
1002 val = (page_to_phys(gic_rdists->prop_page) |
1003 GICR_PROPBASER_InnerShareable |
1004 GICR_PROPBASER_WaWb |
1005 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1006
1007 writeq_relaxed(val, rbase + GICR_PROPBASER);
1008 tmp = readq_relaxed(rbase + GICR_PROPBASER);
1009
1010 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1011 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1012 /*
1013 * The HW reports non-shareable, we must
1014 * remove the cacheability attributes as
1015 * well.
1016 */
1017 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1018 GICR_PROPBASER_CACHEABILITY_MASK);
1019 val |= GICR_PROPBASER_nC;
1020 writeq_relaxed(val, rbase + GICR_PROPBASER);
1021 }
1022 pr_info_once("GIC: using cache flushing for LPI property table\n");
1023 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1024 }
1025
1026 /* set PENDBASE */
1027 val = (page_to_phys(pend_page) |
1028 GICR_PENDBASER_InnerShareable |
1029 GICR_PENDBASER_WaWb);
1030
1031 writeq_relaxed(val, rbase + GICR_PENDBASER);
1032 tmp = readq_relaxed(rbase + GICR_PENDBASER);
1033
1034 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1035 /*
1036 * The HW reports non-shareable, we must remove the
1037 * cacheability attributes as well.
1038 */
1039 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1040 GICR_PENDBASER_CACHEABILITY_MASK);
1041 val |= GICR_PENDBASER_nC;
1042 writeq_relaxed(val, rbase + GICR_PENDBASER);
1043 }
1044
1045 /* Enable LPIs */
1046 val = readl_relaxed(rbase + GICR_CTLR);
1047 val |= GICR_CTLR_ENABLE_LPIS;
1048 writel_relaxed(val, rbase + GICR_CTLR);
1049
1050 /* Make sure the GIC has seen the above */
1051 dsb(sy);
1052 }
1053
1054 static void its_cpu_init_collection(void)
1055 {
1056 struct its_node *its;
1057 int cpu;
1058
1059 spin_lock(&its_lock);
1060 cpu = smp_processor_id();
1061
1062 list_for_each_entry(its, &its_nodes, entry) {
1063 u64 target;
1064
1065 /*
1066 * We now have to bind each collection to its target
1067 * redistributor.
1068 */
1069 if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1070 /*
1071 * This ITS wants the physical address of the
1072 * redistributor.
1073 */
1074 target = gic_data_rdist()->phys_base;
1075 } else {
1076 /*
1077 * This ITS wants a linear CPU number.
1078 */
1079 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1080 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1081 }
1082
1083 /* Perform collection mapping */
1084 its->collections[cpu].target_address = target;
1085 its->collections[cpu].col_id = cpu;
1086
1087 its_send_mapc(its, &its->collections[cpu], 1);
1088 its_send_invall(its, &its->collections[cpu]);
1089 }
1090
1091 spin_unlock(&its_lock);
1092 }
1093
1094 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1095 {
1096 struct its_device *its_dev = NULL, *tmp;
1097 unsigned long flags;
1098
1099 raw_spin_lock_irqsave(&its->lock, flags);
1100
1101 list_for_each_entry(tmp, &its->its_device_list, entry) {
1102 if (tmp->device_id == dev_id) {
1103 its_dev = tmp;
1104 break;
1105 }
1106 }
1107
1108 raw_spin_unlock_irqrestore(&its->lock, flags);
1109
1110 return its_dev;
1111 }
1112
1113 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1114 int nvecs)
1115 {
1116 struct its_device *dev;
1117 unsigned long *lpi_map;
1118 unsigned long flags;
1119 u16 *col_map = NULL;
1120 void *itt;
1121 int lpi_base;
1122 int nr_lpis;
1123 int nr_ites;
1124 int sz;
1125
1126 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1127 /*
1128 * At least one bit of EventID is being used, hence a minimum
1129 * of two entries. No, the architecture doesn't let you
1130 * express an ITT with a single entry.
1131 */
1132 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1133 sz = nr_ites * its->ite_size;
1134 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1135 itt = kzalloc(sz, GFP_KERNEL);
1136 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1137 if (lpi_map)
1138 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1139
1140 if (!dev || !itt || !lpi_map || !col_map) {
1141 kfree(dev);
1142 kfree(itt);
1143 kfree(lpi_map);
1144 kfree(col_map);
1145 return NULL;
1146 }
1147
1148 __flush_dcache_area(itt, sz);
1149
1150 dev->its = its;
1151 dev->itt = itt;
1152 dev->nr_ites = nr_ites;
1153 dev->event_map.lpi_map = lpi_map;
1154 dev->event_map.col_map = col_map;
1155 dev->event_map.lpi_base = lpi_base;
1156 dev->event_map.nr_lpis = nr_lpis;
1157 dev->device_id = dev_id;
1158 INIT_LIST_HEAD(&dev->entry);
1159
1160 raw_spin_lock_irqsave(&its->lock, flags);
1161 list_add(&dev->entry, &its->its_device_list);
1162 raw_spin_unlock_irqrestore(&its->lock, flags);
1163
1164 /* Map device to its ITT */
1165 its_send_mapd(dev, 1);
1166
1167 return dev;
1168 }
1169
1170 static void its_free_device(struct its_device *its_dev)
1171 {
1172 unsigned long flags;
1173
1174 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1175 list_del(&its_dev->entry);
1176 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1177 kfree(its_dev->itt);
1178 kfree(its_dev);
1179 }
1180
1181 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1182 {
1183 int idx;
1184
1185 idx = find_first_zero_bit(dev->event_map.lpi_map,
1186 dev->event_map.nr_lpis);
1187 if (idx == dev->event_map.nr_lpis)
1188 return -ENOSPC;
1189
1190 *hwirq = dev->event_map.lpi_base + idx;
1191 set_bit(idx, dev->event_map.lpi_map);
1192
1193 return 0;
1194 }
1195
1196 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1197 int nvec, msi_alloc_info_t *info)
1198 {
1199 struct its_node *its;
1200 struct its_device *its_dev;
1201 struct msi_domain_info *msi_info;
1202 u32 dev_id;
1203
1204 /*
1205 * We ignore "dev" entierely, and rely on the dev_id that has
1206 * been passed via the scratchpad. This limits this domain's
1207 * usefulness to upper layers that definitely know that they
1208 * are built on top of the ITS.
1209 */
1210 dev_id = info->scratchpad[0].ul;
1211
1212 msi_info = msi_get_domain_info(domain);
1213 its = msi_info->data;
1214
1215 its_dev = its_find_device(its, dev_id);
1216 if (its_dev) {
1217 /*
1218 * We already have seen this ID, probably through
1219 * another alias (PCI bridge of some sort). No need to
1220 * create the device.
1221 */
1222 pr_debug("Reusing ITT for devID %x\n", dev_id);
1223 goto out;
1224 }
1225
1226 its_dev = its_create_device(its, dev_id, nvec);
1227 if (!its_dev)
1228 return -ENOMEM;
1229
1230 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
1231 out:
1232 info->scratchpad[0].ptr = its_dev;
1233 return 0;
1234 }
1235
1236 static struct msi_domain_ops its_msi_domain_ops = {
1237 .msi_prepare = its_msi_prepare,
1238 };
1239
1240 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1241 unsigned int virq,
1242 irq_hw_number_t hwirq)
1243 {
1244 struct of_phandle_args args;
1245
1246 args.np = domain->parent->of_node;
1247 args.args_count = 3;
1248 args.args[0] = GIC_IRQ_TYPE_LPI;
1249 args.args[1] = hwirq;
1250 args.args[2] = IRQ_TYPE_EDGE_RISING;
1251
1252 return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
1253 }
1254
1255 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1256 unsigned int nr_irqs, void *args)
1257 {
1258 msi_alloc_info_t *info = args;
1259 struct its_device *its_dev = info->scratchpad[0].ptr;
1260 irq_hw_number_t hwirq;
1261 int err;
1262 int i;
1263
1264 for (i = 0; i < nr_irqs; i++) {
1265 err = its_alloc_device_irq(its_dev, &hwirq);
1266 if (err)
1267 return err;
1268
1269 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1270 if (err)
1271 return err;
1272
1273 irq_domain_set_hwirq_and_chip(domain, virq + i,
1274 hwirq, &its_irq_chip, its_dev);
1275 pr_debug("ID:%d pID:%d vID:%d\n",
1276 (int)(hwirq - its_dev->event_map.lpi_base),
1277 (int) hwirq, virq + i);
1278 }
1279
1280 return 0;
1281 }
1282
1283 static void its_irq_domain_activate(struct irq_domain *domain,
1284 struct irq_data *d)
1285 {
1286 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1287 u32 event = its_get_event_id(d);
1288
1289 /* Bind the LPI to the first possible CPU */
1290 its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
1291
1292 /* Map the GIC IRQ and event to the device */
1293 its_send_mapvi(its_dev, d->hwirq, event);
1294 }
1295
1296 static void its_irq_domain_deactivate(struct irq_domain *domain,
1297 struct irq_data *d)
1298 {
1299 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1300 u32 event = its_get_event_id(d);
1301
1302 /* Stop the delivery of interrupts */
1303 its_send_discard(its_dev, event);
1304 }
1305
1306 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1307 unsigned int nr_irqs)
1308 {
1309 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1310 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1311 int i;
1312
1313 for (i = 0; i < nr_irqs; i++) {
1314 struct irq_data *data = irq_domain_get_irq_data(domain,
1315 virq + i);
1316 u32 event = its_get_event_id(data);
1317
1318 /* Mark interrupt index as unused */
1319 clear_bit(event, its_dev->event_map.lpi_map);
1320
1321 /* Nuke the entry in the domain */
1322 irq_domain_reset_irq_data(data);
1323 }
1324
1325 /* If all interrupts have been freed, start mopping the floor */
1326 if (bitmap_empty(its_dev->event_map.lpi_map,
1327 its_dev->event_map.nr_lpis)) {
1328 its_lpi_free(&its_dev->event_map);
1329
1330 /* Unmap device/itt */
1331 its_send_mapd(its_dev, 0);
1332 its_free_device(its_dev);
1333 }
1334
1335 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1336 }
1337
1338 static const struct irq_domain_ops its_domain_ops = {
1339 .alloc = its_irq_domain_alloc,
1340 .free = its_irq_domain_free,
1341 .activate = its_irq_domain_activate,
1342 .deactivate = its_irq_domain_deactivate,
1343 };
1344
1345 static int its_force_quiescent(void __iomem *base)
1346 {
1347 u32 count = 1000000; /* 1s */
1348 u32 val;
1349
1350 val = readl_relaxed(base + GITS_CTLR);
1351 if (val & GITS_CTLR_QUIESCENT)
1352 return 0;
1353
1354 /* Disable the generation of all interrupts to this ITS */
1355 val &= ~GITS_CTLR_ENABLE;
1356 writel_relaxed(val, base + GITS_CTLR);
1357
1358 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1359 while (1) {
1360 val = readl_relaxed(base + GITS_CTLR);
1361 if (val & GITS_CTLR_QUIESCENT)
1362 return 0;
1363
1364 count--;
1365 if (!count)
1366 return -EBUSY;
1367
1368 cpu_relax();
1369 udelay(1);
1370 }
1371 }
1372
1373 static int its_probe(struct device_node *node, struct irq_domain *parent)
1374 {
1375 struct resource res;
1376 struct its_node *its;
1377 void __iomem *its_base;
1378 struct irq_domain *inner_domain;
1379 u32 val;
1380 u64 baser, tmp;
1381 int err;
1382
1383 err = of_address_to_resource(node, 0, &res);
1384 if (err) {
1385 pr_warn("%s: no regs?\n", node->full_name);
1386 return -ENXIO;
1387 }
1388
1389 its_base = ioremap(res.start, resource_size(&res));
1390 if (!its_base) {
1391 pr_warn("%s: unable to map registers\n", node->full_name);
1392 return -ENOMEM;
1393 }
1394
1395 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1396 if (val != 0x30 && val != 0x40) {
1397 pr_warn("%s: no ITS detected, giving up\n", node->full_name);
1398 err = -ENODEV;
1399 goto out_unmap;
1400 }
1401
1402 err = its_force_quiescent(its_base);
1403 if (err) {
1404 pr_warn("%s: failed to quiesce, giving up\n",
1405 node->full_name);
1406 goto out_unmap;
1407 }
1408
1409 pr_info("ITS: %s\n", node->full_name);
1410
1411 its = kzalloc(sizeof(*its), GFP_KERNEL);
1412 if (!its) {
1413 err = -ENOMEM;
1414 goto out_unmap;
1415 }
1416
1417 raw_spin_lock_init(&its->lock);
1418 INIT_LIST_HEAD(&its->entry);
1419 INIT_LIST_HEAD(&its->its_device_list);
1420 its->base = its_base;
1421 its->phys_base = res.start;
1422 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1423
1424 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1425 if (!its->cmd_base) {
1426 err = -ENOMEM;
1427 goto out_free_its;
1428 }
1429 its->cmd_write = its->cmd_base;
1430
1431 err = its_alloc_tables(node->full_name, its);
1432 if (err)
1433 goto out_free_cmd;
1434
1435 err = its_alloc_collections(its);
1436 if (err)
1437 goto out_free_tables;
1438
1439 baser = (virt_to_phys(its->cmd_base) |
1440 GITS_CBASER_WaWb |
1441 GITS_CBASER_InnerShareable |
1442 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
1443 GITS_CBASER_VALID);
1444
1445 writeq_relaxed(baser, its->base + GITS_CBASER);
1446 tmp = readq_relaxed(its->base + GITS_CBASER);
1447
1448 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1449 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1450 /*
1451 * The HW reports non-shareable, we must
1452 * remove the cacheability attributes as
1453 * well.
1454 */
1455 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1456 GITS_CBASER_CACHEABILITY_MASK);
1457 baser |= GITS_CBASER_nC;
1458 writeq_relaxed(baser, its->base + GITS_CBASER);
1459 }
1460 pr_info("ITS: using cache flushing for cmd queue\n");
1461 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1462 }
1463
1464 writeq_relaxed(0, its->base + GITS_CWRITER);
1465 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1466
1467 if (of_property_read_bool(node, "msi-controller")) {
1468 struct msi_domain_info *info;
1469
1470 info = kzalloc(sizeof(*info), GFP_KERNEL);
1471 if (!info) {
1472 err = -ENOMEM;
1473 goto out_free_tables;
1474 }
1475
1476 inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
1477 if (!inner_domain) {
1478 err = -ENOMEM;
1479 kfree(info);
1480 goto out_free_tables;
1481 }
1482
1483 inner_domain->parent = parent;
1484 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1485 info->ops = &its_msi_domain_ops;
1486 info->data = its;
1487 inner_domain->host_data = info;
1488 }
1489
1490 spin_lock(&its_lock);
1491 list_add(&its->entry, &its_nodes);
1492 spin_unlock(&its_lock);
1493
1494 return 0;
1495
1496 out_free_tables:
1497 its_free_tables(its);
1498 out_free_cmd:
1499 kfree(its->cmd_base);
1500 out_free_its:
1501 kfree(its);
1502 out_unmap:
1503 iounmap(its_base);
1504 pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
1505 return err;
1506 }
1507
1508 static bool gic_rdists_supports_plpis(void)
1509 {
1510 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1511 }
1512
1513 int its_cpu_init(void)
1514 {
1515 if (!list_empty(&its_nodes)) {
1516 if (!gic_rdists_supports_plpis()) {
1517 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1518 return -ENXIO;
1519 }
1520 its_cpu_init_lpis();
1521 its_cpu_init_collection();
1522 }
1523
1524 return 0;
1525 }
1526
1527 static struct of_device_id its_device_id[] = {
1528 { .compatible = "arm,gic-v3-its", },
1529 {},
1530 };
1531
1532 int its_init(struct device_node *node, struct rdists *rdists,
1533 struct irq_domain *parent_domain)
1534 {
1535 struct device_node *np;
1536
1537 for (np = of_find_matching_node(node, its_device_id); np;
1538 np = of_find_matching_node(np, its_device_id)) {
1539 its_probe(np, parent_domain);
1540 }
1541
1542 if (list_empty(&its_nodes)) {
1543 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1544 return -ENXIO;
1545 }
1546
1547 gic_rdists = rdists;
1548 gic_root_node = node;
1549
1550 its_alloc_lpi_tables();
1551 its_lpi_init(rdists->id_bits);
1552
1553 return 0;
1554 }
This page took 0.127213 seconds and 5 git commands to generate.