Commit | Line | Data |
---|---|---|
009f1315 GC |
1 | /* |
2 | * Coherency fabric (Aurora) support for Armada 370 and XP platforms. | |
3 | * | |
4 | * Copyright (C) 2012 Marvell | |
5 | * | |
6 | * Yehuda Yitschak <yehuday@marvell.com> | |
7 | * Gregory Clement <gregory.clement@free-electrons.com> | |
8 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | |
9 | * | |
10 | * This file is licensed under the terms of the GNU General Public | |
11 | * License version 2. This program is licensed "as is" without any | |
12 | * warranty of any kind, whether express or implied. | |
13 | * | |
14 | * The Armada 370 and Armada XP SOCs have a coherency fabric which is | |
15 | * responsible for ensuring hardware coherency between all CPUs and between | |
16 | * CPUs and I/O masters. This file initializes the coherency fabric and | |
17 | * supplies basic routines for configuring and controlling hardware coherency | |
18 | */ | |
19 | ||
5ab5afd8 TP |
20 | #define pr_fmt(fmt) "mvebu-coherency: " fmt |
21 | ||
009f1315 GC |
22 | #include <linux/kernel.h> |
23 | #include <linux/init.h> | |
24 | #include <linux/of_address.h> | |
25 | #include <linux/io.h> | |
26 | #include <linux/smp.h> | |
e60304f8 GC |
27 | #include <linux/dma-mapping.h> |
28 | #include <linux/platform_device.h> | |
5ab5afd8 TP |
29 | #include <linux/slab.h> |
30 | #include <linux/mbus.h> | |
31 | #include <linux/clk.h> | |
009f1315 | 32 | #include <asm/smp_plat.h> |
580ff0ee | 33 | #include <asm/cacheflush.h> |
009f1315 | 34 | #include "armada-370-xp.h" |
b12634e3 | 35 | #include "coherency.h" |
009f1315 | 36 | |
8bd26e3a | 37 | unsigned long coherency_phys_base; |
ccd6a131 | 38 | void __iomem *coherency_base; |
e60304f8 | 39 | static void __iomem *coherency_cpu_base; |
009f1315 GC |
40 | |
41 | /* Coherency fabric registers */ | |
42 | #define COHERENCY_FABRIC_CFG_OFFSET 0x4 | |
43 | ||
e60304f8 GC |
44 | #define IO_SYNC_BARRIER_CTL_OFFSET 0x0 |
45 | ||
924d38f4 | 46 | enum { |
501f928e | 47 | COHERENCY_FABRIC_TYPE_NONE, |
924d38f4 | 48 | COHERENCY_FABRIC_TYPE_ARMADA_370_XP, |
77fa4b9a | 49 | COHERENCY_FABRIC_TYPE_ARMADA_375, |
d0de9323 | 50 | COHERENCY_FABRIC_TYPE_ARMADA_380, |
924d38f4 TP |
51 | }; |
52 | ||
009f1315 | 53 | static struct of_device_id of_coherency_table[] = { |
924d38f4 TP |
54 | {.compatible = "marvell,coherency-fabric", |
55 | .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, | |
77fa4b9a TP |
56 | {.compatible = "marvell,armada-375-coherency-fabric", |
57 | .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 }, | |
d0de9323 TP |
58 | {.compatible = "marvell,armada-380-coherency-fabric", |
59 | .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 }, | |
009f1315 GC |
60 | { /* end of list */ }, |
61 | }; | |
62 | ||
009f1315 | 63 | /* Function defined in coherency_ll.S */ |
b41375f7 | 64 | int ll_set_cpu_coherent(void); |
009f1315 | 65 | |
952f4ca7 | 66 | int set_cpu_coherent(void) |
009f1315 GC |
67 | { |
68 | if (!coherency_base) { | |
b41375f7 | 69 | pr_warn("Can't make current CPU cache coherent.\n"); |
009f1315 GC |
70 | pr_warn("Coherency fabric is not initialized\n"); |
71 | return 1; | |
72 | } | |
73 | ||
b41375f7 | 74 | return ll_set_cpu_coherent(); |
009f1315 GC |
75 | } |
76 | ||
5ab5afd8 TP |
77 | /* |
78 | * The below code implements the I/O coherency workaround on Armada | |
79 | * 375. This workaround consists in using the two channels of the | |
80 | * first XOR engine to trigger a XOR transaction that serves as the | |
81 | * I/O coherency barrier. | |
82 | */ | |
83 | ||
84 | static void __iomem *xor_base, *xor_high_base; | |
85 | static dma_addr_t coherency_wa_buf_phys[CONFIG_NR_CPUS]; | |
86 | static void *coherency_wa_buf[CONFIG_NR_CPUS]; | |
87 | static bool coherency_wa_enabled; | |
88 | ||
89 | #define XOR_CONFIG(chan) (0x10 + (chan * 4)) | |
90 | #define XOR_ACTIVATION(chan) (0x20 + (chan * 4)) | |
91 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) | |
92 | #define WINDOW_BASE(w) (0x250 + ((w) << 2)) | |
93 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) | |
94 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) | |
95 | #define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) | |
96 | #define XOR_DEST_POINTER(chan) (0x2B0 + (chan * 4)) | |
97 | #define XOR_BLOCK_SIZE(chan) (0x2C0 + (chan * 4)) | |
98 | #define XOR_INIT_VALUE_LOW 0x2E0 | |
99 | #define XOR_INIT_VALUE_HIGH 0x2E4 | |
100 | ||
101 | static inline void mvebu_hwcc_armada375_sync_io_barrier_wa(void) | |
102 | { | |
103 | int idx = smp_processor_id(); | |
104 | ||
105 | /* Write '1' to the first word of the buffer */ | |
106 | writel(0x1, coherency_wa_buf[idx]); | |
107 | ||
108 | /* Wait until the engine is idle */ | |
109 | while ((readl(xor_base + XOR_ACTIVATION(idx)) >> 4) & 0x3) | |
110 | ; | |
111 | ||
112 | dmb(); | |
113 | ||
114 | /* Trigger channel */ | |
115 | writel(0x1, xor_base + XOR_ACTIVATION(idx)); | |
116 | ||
117 | /* Poll the data until it is cleared by the XOR transaction */ | |
118 | while (readl(coherency_wa_buf[idx])) | |
119 | ; | |
120 | } | |
121 | ||
122 | static void __init armada_375_coherency_init_wa(void) | |
123 | { | |
124 | const struct mbus_dram_target_info *dram; | |
125 | struct device_node *xor_node; | |
126 | struct property *xor_status; | |
127 | struct clk *xor_clk; | |
128 | u32 win_enable = 0; | |
129 | int i; | |
130 | ||
131 | pr_warn("enabling coherency workaround for Armada 375 Z1, one XOR engine disabled\n"); | |
132 | ||
133 | /* | |
134 | * Since the workaround uses one XOR engine, we grab a | |
135 | * reference to its Device Tree node first. | |
136 | */ | |
137 | xor_node = of_find_compatible_node(NULL, NULL, "marvell,orion-xor"); | |
138 | BUG_ON(!xor_node); | |
139 | ||
140 | /* | |
141 | * Then we mark it as disabled so that the real XOR driver | |
142 | * will not use it. | |
143 | */ | |
144 | xor_status = kzalloc(sizeof(struct property), GFP_KERNEL); | |
145 | BUG_ON(!xor_status); | |
146 | ||
147 | xor_status->value = kstrdup("disabled", GFP_KERNEL); | |
148 | BUG_ON(!xor_status->value); | |
149 | ||
150 | xor_status->length = 8; | |
151 | xor_status->name = kstrdup("status", GFP_KERNEL); | |
152 | BUG_ON(!xor_status->name); | |
153 | ||
154 | of_update_property(xor_node, xor_status); | |
155 | ||
156 | /* | |
157 | * And we remap the registers, get the clock, and do the | |
158 | * initial configuration of the XOR engine. | |
159 | */ | |
160 | xor_base = of_iomap(xor_node, 0); | |
161 | xor_high_base = of_iomap(xor_node, 1); | |
162 | ||
163 | xor_clk = of_clk_get_by_name(xor_node, NULL); | |
164 | BUG_ON(!xor_clk); | |
165 | ||
166 | clk_prepare_enable(xor_clk); | |
167 | ||
168 | dram = mv_mbus_dram_info(); | |
169 | ||
170 | for (i = 0; i < 8; i++) { | |
171 | writel(0, xor_base + WINDOW_BASE(i)); | |
172 | writel(0, xor_base + WINDOW_SIZE(i)); | |
173 | if (i < 4) | |
174 | writel(0, xor_base + WINDOW_REMAP_HIGH(i)); | |
175 | } | |
176 | ||
177 | for (i = 0; i < dram->num_cs; i++) { | |
178 | const struct mbus_dram_window *cs = dram->cs + i; | |
179 | writel((cs->base & 0xffff0000) | | |
180 | (cs->mbus_attr << 8) | | |
181 | dram->mbus_dram_target_id, xor_base + WINDOW_BASE(i)); | |
182 | writel((cs->size - 1) & 0xffff0000, xor_base + WINDOW_SIZE(i)); | |
183 | ||
184 | win_enable |= (1 << i); | |
185 | win_enable |= 3 << (16 + (2 * i)); | |
186 | } | |
187 | ||
188 | writel(win_enable, xor_base + WINDOW_BAR_ENABLE(0)); | |
189 | writel(win_enable, xor_base + WINDOW_BAR_ENABLE(1)); | |
190 | writel(0, xor_base + WINDOW_OVERRIDE_CTRL(0)); | |
191 | writel(0, xor_base + WINDOW_OVERRIDE_CTRL(1)); | |
192 | ||
193 | for (i = 0; i < CONFIG_NR_CPUS; i++) { | |
194 | coherency_wa_buf[i] = kzalloc(PAGE_SIZE, GFP_KERNEL); | |
195 | BUG_ON(!coherency_wa_buf[i]); | |
196 | ||
197 | /* | |
198 | * We can't use the DMA mapping API, since we don't | |
199 | * have a valid 'struct device' pointer | |
200 | */ | |
201 | coherency_wa_buf_phys[i] = | |
202 | virt_to_phys(coherency_wa_buf[i]); | |
203 | BUG_ON(!coherency_wa_buf_phys[i]); | |
204 | ||
205 | /* | |
206 | * Configure the XOR engine for memset operation, with | |
207 | * a 128 bytes block size | |
208 | */ | |
209 | writel(0x444, xor_base + XOR_CONFIG(i)); | |
210 | writel(128, xor_base + XOR_BLOCK_SIZE(i)); | |
211 | writel(coherency_wa_buf_phys[i], | |
212 | xor_base + XOR_DEST_POINTER(i)); | |
213 | } | |
214 | ||
215 | writel(0x0, xor_base + XOR_INIT_VALUE_LOW); | |
216 | writel(0x0, xor_base + XOR_INIT_VALUE_HIGH); | |
217 | ||
218 | coherency_wa_enabled = true; | |
219 | } | |
220 | ||
e60304f8 GC |
221 | static inline void mvebu_hwcc_sync_io_barrier(void) |
222 | { | |
5ab5afd8 TP |
223 | if (coherency_wa_enabled) { |
224 | mvebu_hwcc_armada375_sync_io_barrier_wa(); | |
225 | return; | |
226 | } | |
227 | ||
e60304f8 GC |
228 | writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET); |
229 | while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1); | |
230 | } | |
231 | ||
232 | static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page, | |
233 | unsigned long offset, size_t size, | |
234 | enum dma_data_direction dir, | |
235 | struct dma_attrs *attrs) | |
236 | { | |
237 | if (dir != DMA_TO_DEVICE) | |
238 | mvebu_hwcc_sync_io_barrier(); | |
239 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | |
240 | } | |
241 | ||
242 | ||
243 | static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | |
244 | size_t size, enum dma_data_direction dir, | |
245 | struct dma_attrs *attrs) | |
246 | { | |
247 | if (dir != DMA_TO_DEVICE) | |
248 | mvebu_hwcc_sync_io_barrier(); | |
249 | } | |
250 | ||
251 | static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle, | |
252 | size_t size, enum dma_data_direction dir) | |
253 | { | |
254 | if (dir != DMA_TO_DEVICE) | |
255 | mvebu_hwcc_sync_io_barrier(); | |
256 | } | |
257 | ||
258 | static struct dma_map_ops mvebu_hwcc_dma_ops = { | |
259 | .alloc = arm_dma_alloc, | |
260 | .free = arm_dma_free, | |
261 | .mmap = arm_dma_mmap, | |
262 | .map_page = mvebu_hwcc_dma_map_page, | |
263 | .unmap_page = mvebu_hwcc_dma_unmap_page, | |
264 | .get_sgtable = arm_dma_get_sgtable, | |
265 | .map_sg = arm_dma_map_sg, | |
266 | .unmap_sg = arm_dma_unmap_sg, | |
267 | .sync_single_for_cpu = mvebu_hwcc_dma_sync, | |
268 | .sync_single_for_device = mvebu_hwcc_dma_sync, | |
269 | .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, | |
270 | .sync_sg_for_device = arm_dma_sync_sg_for_device, | |
271 | .set_dma_mask = arm_dma_set_mask, | |
272 | }; | |
273 | ||
274 | static int mvebu_hwcc_platform_notifier(struct notifier_block *nb, | |
275 | unsigned long event, void *__dev) | |
276 | { | |
277 | struct device *dev = __dev; | |
278 | ||
279 | if (event != BUS_NOTIFY_ADD_DEVICE) | |
280 | return NOTIFY_DONE; | |
281 | set_dma_ops(dev, &mvebu_hwcc_dma_ops); | |
282 | ||
283 | return NOTIFY_OK; | |
284 | } | |
285 | ||
286 | static struct notifier_block mvebu_hwcc_platform_nb = { | |
287 | .notifier_call = mvebu_hwcc_platform_notifier, | |
288 | }; | |
289 | ||
924d38f4 TP |
290 | static void __init armada_370_coherency_init(struct device_node *np) |
291 | { | |
292 | struct resource res; | |
293 | ||
294 | of_address_to_resource(np, 0, &res); | |
295 | coherency_phys_base = res.start; | |
296 | /* | |
297 | * Ensure secondary CPUs will see the updated value, | |
298 | * which they read before they join the coherency | |
299 | * fabric, and therefore before they are coherent with | |
300 | * the boot CPU cache. | |
301 | */ | |
302 | sync_cache_w(&coherency_phys_base); | |
303 | coherency_base = of_iomap(np, 0); | |
304 | coherency_cpu_base = of_iomap(np, 1); | |
952f4ca7 | 305 | set_cpu_coherent(); |
924d38f4 TP |
306 | } |
307 | ||
d0de9323 | 308 | static void __init armada_375_380_coherency_init(struct device_node *np) |
77fa4b9a TP |
309 | { |
310 | coherency_cpu_base = of_iomap(np, 0); | |
311 | } | |
312 | ||
501f928e | 313 | static int coherency_type(void) |
009f1315 GC |
314 | { |
315 | struct device_node *np; | |
5fbba080 | 316 | const struct of_device_id *match; |
009f1315 | 317 | |
5fbba080 | 318 | np = of_find_matching_node_and_match(NULL, of_coherency_table, &match); |
009f1315 | 319 | if (np) { |
5fbba080 | 320 | int type = (int) match->data; |
924d38f4 | 321 | |
501f928e | 322 | /* Armada 370/XP coherency works in both UP and SMP */ |
924d38f4 | 323 | if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) |
501f928e | 324 | return type; |
924d38f4 | 325 | |
77fa4b9a TP |
326 | /* Armada 375 coherency works only on SMP */ |
327 | else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 && is_smp()) | |
328 | return type; | |
329 | ||
d0de9323 TP |
330 | /* Armada 380 coherency works only on SMP */ |
331 | else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380 && is_smp()) | |
332 | return type; | |
009f1315 GC |
333 | } |
334 | ||
501f928e | 335 | return COHERENCY_FABRIC_TYPE_NONE; |
009f1315 | 336 | } |
865e0527 | 337 | |
501f928e | 338 | int coherency_available(void) |
865e0527 | 339 | { |
501f928e TP |
340 | return coherency_type() != COHERENCY_FABRIC_TYPE_NONE; |
341 | } | |
342 | ||
343 | int __init coherency_init(void) | |
344 | { | |
345 | int type = coherency_type(); | |
abe511ac JZ |
346 | struct device_node *np; |
347 | ||
348 | np = of_find_matching_node(NULL, of_coherency_table); | |
501f928e TP |
349 | |
350 | if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) | |
351 | armada_370_coherency_init(np); | |
d0de9323 TP |
352 | else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 || |
353 | type == COHERENCY_FABRIC_TYPE_ARMADA_380) | |
354 | armada_375_380_coherency_init(np); | |
501f928e TP |
355 | |
356 | return 0; | |
357 | } | |
358 | ||
359 | static int __init coherency_late_init(void) | |
360 | { | |
5ab5afd8 TP |
361 | int type = coherency_type(); |
362 | ||
363 | if (type == COHERENCY_FABRIC_TYPE_NONE) | |
364 | return 0; | |
365 | ||
366 | if (type == COHERENCY_FABRIC_TYPE_ARMADA_375) | |
367 | armada_375_coherency_init_wa(); | |
368 | ||
369 | bus_register_notifier(&platform_bus_type, | |
370 | &mvebu_hwcc_platform_nb); | |
371 | ||
865e0527 TP |
372 | return 0; |
373 | } | |
374 | ||
375 | postcore_initcall(coherency_late_init); |