Merge branch 'pxa-plat' into devel
[deliverable/linux.git] / arch / ppc / syslib / mv64x60.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Common routines for the Marvell/Galileo Discovery line of host bridges
3 * (gt64260, mv64360, mv64460, ...).
4 *
5 * Author: Mark A. Greer <mgreer@mvista.com>
6 *
7 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
8 * the terms of the GNU General Public License version 2. This program
9 * is licensed "as is" without any warranty of any kind, whether express
10 * or implied.
11 */
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/pci.h>
15#include <linux/slab.h>
16#include <linux/module.h>
461e6667 17#include <linux/mutex.h>
1da177e4 18#include <linux/string.h>
1da177e4
LT
19#include <linux/spinlock.h>
20#include <linux/mv643xx.h>
d052d1be 21#include <linux/platform_device.h>
1da177e4
LT
22
23#include <asm/byteorder.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/uaccess.h>
27#include <asm/machdep.h>
28#include <asm/pci-bridge.h>
29#include <asm/delay.h>
30#include <asm/mv64x60.h>
31
32
d01c08c9 33u8 mv64x60_pci_exclude_bridge = 1;
a9f6a0dd 34DEFINE_SPINLOCK(mv64x60_lock);
1da177e4 35
d01c08c9 36static phys_addr_t mv64x60_bridge_pbase;
a7625d6e 37static void __iomem *mv64x60_bridge_vbase;
1da177e4 38static u32 mv64x60_bridge_type = MV64x60_TYPE_INVALID;
d01c08c9
MG
39static u32 mv64x60_bridge_rev;
40#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
41static struct pci_controller sysfs_hose_a;
42#endif
1da177e4
LT
43
44static u32 gt64260_translate_size(u32 base, u32 size, u32 num_bits);
45static u32 gt64260_untranslate_size(u32 base, u32 size, u32 num_bits);
46static void gt64260_set_pci2mem_window(struct pci_controller *hose, u32 bus,
47 u32 window, u32 base);
48static void gt64260_set_pci2regs_window(struct mv64x60_handle *bh,
49 struct pci_controller *hose, u32 bus, u32 base);
50static u32 gt64260_is_enabled_32bit(struct mv64x60_handle *bh, u32 window);
51static void gt64260_enable_window_32bit(struct mv64x60_handle *bh, u32 window);
52static void gt64260_disable_window_32bit(struct mv64x60_handle *bh, u32 window);
53static void gt64260_enable_window_64bit(struct mv64x60_handle *bh, u32 window);
54static void gt64260_disable_window_64bit(struct mv64x60_handle *bh, u32 window);
55static void gt64260_disable_all_windows(struct mv64x60_handle *bh,
56 struct mv64x60_setup_info *si);
57static void gt64260a_chip_specific_init(struct mv64x60_handle *bh,
58 struct mv64x60_setup_info *si);
59static void gt64260b_chip_specific_init(struct mv64x60_handle *bh,
60 struct mv64x60_setup_info *si);
61
62static u32 mv64360_translate_size(u32 base, u32 size, u32 num_bits);
63static u32 mv64360_untranslate_size(u32 base, u32 size, u32 num_bits);
64static void mv64360_set_pci2mem_window(struct pci_controller *hose, u32 bus,
65 u32 window, u32 base);
66static void mv64360_set_pci2regs_window(struct mv64x60_handle *bh,
67 struct pci_controller *hose, u32 bus, u32 base);
68static u32 mv64360_is_enabled_32bit(struct mv64x60_handle *bh, u32 window);
69static void mv64360_enable_window_32bit(struct mv64x60_handle *bh, u32 window);
70static void mv64360_disable_window_32bit(struct mv64x60_handle *bh, u32 window);
71static void mv64360_enable_window_64bit(struct mv64x60_handle *bh, u32 window);
72static void mv64360_disable_window_64bit(struct mv64x60_handle *bh, u32 window);
73static void mv64360_disable_all_windows(struct mv64x60_handle *bh,
74 struct mv64x60_setup_info *si);
75static void mv64360_config_io2mem_windows(struct mv64x60_handle *bh,
76 struct mv64x60_setup_info *si,
77 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
78static void mv64360_set_mpsc2regs_window(struct mv64x60_handle *bh, u32 base);
79static void mv64360_chip_specific_init(struct mv64x60_handle *bh,
80 struct mv64x60_setup_info *si);
81static void mv64460_chip_specific_init(struct mv64x60_handle *bh,
82 struct mv64x60_setup_info *si);
83
84
85/*
86 * Define tables that have the chip-specific info for each type of
87 * Marvell bridge chip.
88 */
89static struct mv64x60_chip_info gt64260a_ci __initdata = { /* GT64260A */
90 .translate_size = gt64260_translate_size,
91 .untranslate_size = gt64260_untranslate_size,
92 .set_pci2mem_window = gt64260_set_pci2mem_window,
93 .set_pci2regs_window = gt64260_set_pci2regs_window,
94 .is_enabled_32bit = gt64260_is_enabled_32bit,
95 .enable_window_32bit = gt64260_enable_window_32bit,
96 .disable_window_32bit = gt64260_disable_window_32bit,
97 .enable_window_64bit = gt64260_enable_window_64bit,
98 .disable_window_64bit = gt64260_disable_window_64bit,
99 .disable_all_windows = gt64260_disable_all_windows,
100 .chip_specific_init = gt64260a_chip_specific_init,
101 .window_tab_32bit = gt64260_32bit_windows,
102 .window_tab_64bit = gt64260_64bit_windows,
103};
104
105static struct mv64x60_chip_info gt64260b_ci __initdata = { /* GT64260B */
106 .translate_size = gt64260_translate_size,
107 .untranslate_size = gt64260_untranslate_size,
108 .set_pci2mem_window = gt64260_set_pci2mem_window,
109 .set_pci2regs_window = gt64260_set_pci2regs_window,
110 .is_enabled_32bit = gt64260_is_enabled_32bit,
111 .enable_window_32bit = gt64260_enable_window_32bit,
112 .disable_window_32bit = gt64260_disable_window_32bit,
113 .enable_window_64bit = gt64260_enable_window_64bit,
114 .disable_window_64bit = gt64260_disable_window_64bit,
115 .disable_all_windows = gt64260_disable_all_windows,
116 .chip_specific_init = gt64260b_chip_specific_init,
117 .window_tab_32bit = gt64260_32bit_windows,
118 .window_tab_64bit = gt64260_64bit_windows,
119};
120
121static struct mv64x60_chip_info mv64360_ci __initdata = { /* MV64360 */
122 .translate_size = mv64360_translate_size,
123 .untranslate_size = mv64360_untranslate_size,
124 .set_pci2mem_window = mv64360_set_pci2mem_window,
125 .set_pci2regs_window = mv64360_set_pci2regs_window,
126 .is_enabled_32bit = mv64360_is_enabled_32bit,
127 .enable_window_32bit = mv64360_enable_window_32bit,
128 .disable_window_32bit = mv64360_disable_window_32bit,
129 .enable_window_64bit = mv64360_enable_window_64bit,
130 .disable_window_64bit = mv64360_disable_window_64bit,
131 .disable_all_windows = mv64360_disable_all_windows,
132 .config_io2mem_windows = mv64360_config_io2mem_windows,
133 .set_mpsc2regs_window = mv64360_set_mpsc2regs_window,
134 .chip_specific_init = mv64360_chip_specific_init,
135 .window_tab_32bit = mv64360_32bit_windows,
136 .window_tab_64bit = mv64360_64bit_windows,
137};
138
139static struct mv64x60_chip_info mv64460_ci __initdata = { /* MV64460 */
140 .translate_size = mv64360_translate_size,
141 .untranslate_size = mv64360_untranslate_size,
142 .set_pci2mem_window = mv64360_set_pci2mem_window,
143 .set_pci2regs_window = mv64360_set_pci2regs_window,
144 .is_enabled_32bit = mv64360_is_enabled_32bit,
145 .enable_window_32bit = mv64360_enable_window_32bit,
146 .disable_window_32bit = mv64360_disable_window_32bit,
147 .enable_window_64bit = mv64360_enable_window_64bit,
148 .disable_window_64bit = mv64360_disable_window_64bit,
149 .disable_all_windows = mv64360_disable_all_windows,
150 .config_io2mem_windows = mv64360_config_io2mem_windows,
151 .set_mpsc2regs_window = mv64360_set_mpsc2regs_window,
152 .chip_specific_init = mv64460_chip_specific_init,
153 .window_tab_32bit = mv64360_32bit_windows,
154 .window_tab_64bit = mv64360_64bit_windows,
155};
156
157/*
158 *****************************************************************************
159 *
160 * Platform Device Definitions
161 *
162 *****************************************************************************
163 */
164#ifdef CONFIG_SERIAL_MPSC
165static struct mpsc_shared_pdata mv64x60_mpsc_shared_pdata = {
166 .mrr_val = 0x3ffffe38,
167 .rcrr_val = 0,
168 .tcrr_val = 0,
169 .intr_cause_val = 0,
170 .intr_mask_val = 0,
171};
172
173static struct resource mv64x60_mpsc_shared_resources[] = {
174 /* Do not change the order of the IORESOURCE_MEM resources */
175 [0] = {
176 .name = "mpsc routing base",
177 .start = MV64x60_MPSC_ROUTING_OFFSET,
178 .end = MV64x60_MPSC_ROUTING_OFFSET +
179 MPSC_ROUTING_REG_BLOCK_SIZE - 1,
180 .flags = IORESOURCE_MEM,
181 },
182 [1] = {
183 .name = "sdma intr base",
184 .start = MV64x60_SDMA_INTR_OFFSET,
185 .end = MV64x60_SDMA_INTR_OFFSET +
186 MPSC_SDMA_INTR_REG_BLOCK_SIZE - 1,
187 .flags = IORESOURCE_MEM,
188 },
189};
190
191static struct platform_device mpsc_shared_device = { /* Shared device */
192 .name = MPSC_SHARED_NAME,
193 .id = 0,
194 .num_resources = ARRAY_SIZE(mv64x60_mpsc_shared_resources),
195 .resource = mv64x60_mpsc_shared_resources,
196 .dev = {
197 .platform_data = &mv64x60_mpsc_shared_pdata,
198 },
199};
200
201static struct mpsc_pdata mv64x60_mpsc0_pdata = {
202 .mirror_regs = 0,
203 .cache_mgmt = 0,
204 .max_idle = 0,
205 .default_baud = 9600,
206 .default_bits = 8,
207 .default_parity = 'n',
208 .default_flow = 'n',
209 .chr_1_val = 0x00000000,
210 .chr_2_val = 0x00000000,
211 .chr_10_val = 0x00000003,
212 .mpcr_val = 0,
213 .bcr_val = 0,
214 .brg_can_tune = 0,
215 .brg_clk_src = 8, /* Default to TCLK */
216 .brg_clk_freq = 100000000, /* Default to 100 MHz */
217};
218
219static struct resource mv64x60_mpsc0_resources[] = {
220 /* Do not change the order of the IORESOURCE_MEM resources */
221 [0] = {
222 .name = "mpsc 0 base",
223 .start = MV64x60_MPSC_0_OFFSET,
224 .end = MV64x60_MPSC_0_OFFSET + MPSC_REG_BLOCK_SIZE - 1,
225 .flags = IORESOURCE_MEM,
226 },
227 [1] = {
228 .name = "sdma 0 base",
229 .start = MV64x60_SDMA_0_OFFSET,
230 .end = MV64x60_SDMA_0_OFFSET + MPSC_SDMA_REG_BLOCK_SIZE - 1,
231 .flags = IORESOURCE_MEM,
232 },
233 [2] = {
234 .name = "brg 0 base",
235 .start = MV64x60_BRG_0_OFFSET,
236 .end = MV64x60_BRG_0_OFFSET + MPSC_BRG_REG_BLOCK_SIZE - 1,
237 .flags = IORESOURCE_MEM,
238 },
239 [3] = {
240 .name = "sdma 0 irq",
241 .start = MV64x60_IRQ_SDMA_0,
242 .end = MV64x60_IRQ_SDMA_0,
243 .flags = IORESOURCE_IRQ,
244 },
245};
246
247static struct platform_device mpsc0_device = {
248 .name = MPSC_CTLR_NAME,
249 .id = 0,
250 .num_resources = ARRAY_SIZE(mv64x60_mpsc0_resources),
251 .resource = mv64x60_mpsc0_resources,
252 .dev = {
253 .platform_data = &mv64x60_mpsc0_pdata,
254 },
255};
256
257static struct mpsc_pdata mv64x60_mpsc1_pdata = {
258 .mirror_regs = 0,
259 .cache_mgmt = 0,
260 .max_idle = 0,
261 .default_baud = 9600,
262 .default_bits = 8,
263 .default_parity = 'n',
264 .default_flow = 'n',
265 .chr_1_val = 0x00000000,
266 .chr_1_val = 0x00000000,
267 .chr_2_val = 0x00000000,
268 .chr_10_val = 0x00000003,
269 .mpcr_val = 0,
270 .bcr_val = 0,
271 .brg_can_tune = 0,
272 .brg_clk_src = 8, /* Default to TCLK */
273 .brg_clk_freq = 100000000, /* Default to 100 MHz */
274};
275
276static struct resource mv64x60_mpsc1_resources[] = {
277 /* Do not change the order of the IORESOURCE_MEM resources */
278 [0] = {
279 .name = "mpsc 1 base",
280 .start = MV64x60_MPSC_1_OFFSET,
281 .end = MV64x60_MPSC_1_OFFSET + MPSC_REG_BLOCK_SIZE - 1,
282 .flags = IORESOURCE_MEM,
283 },
284 [1] = {
285 .name = "sdma 1 base",
286 .start = MV64x60_SDMA_1_OFFSET,
287 .end = MV64x60_SDMA_1_OFFSET + MPSC_SDMA_REG_BLOCK_SIZE - 1,
288 .flags = IORESOURCE_MEM,
289 },
290 [2] = {
291 .name = "brg 1 base",
292 .start = MV64x60_BRG_1_OFFSET,
293 .end = MV64x60_BRG_1_OFFSET + MPSC_BRG_REG_BLOCK_SIZE - 1,
294 .flags = IORESOURCE_MEM,
295 },
296 [3] = {
297 .name = "sdma 1 irq",
298 .start = MV64360_IRQ_SDMA_1,
299 .end = MV64360_IRQ_SDMA_1,
300 .flags = IORESOURCE_IRQ,
301 },
302};
303
304static struct platform_device mpsc1_device = {
305 .name = MPSC_CTLR_NAME,
306 .id = 1,
307 .num_resources = ARRAY_SIZE(mv64x60_mpsc1_resources),
308 .resource = mv64x60_mpsc1_resources,
309 .dev = {
310 .platform_data = &mv64x60_mpsc1_pdata,
311 },
312};
313#endif
314
6651a5c3 315#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
1da177e4
LT
316static struct resource mv64x60_eth_shared_resources[] = {
317 [0] = {
318 .name = "ethernet shared base",
319 .start = MV643XX_ETH_SHARED_REGS,
320 .end = MV643XX_ETH_SHARED_REGS +
321 MV643XX_ETH_SHARED_REGS_SIZE - 1,
322 .flags = IORESOURCE_MEM,
323 },
324};
325
326static struct platform_device mv64x60_eth_shared_device = {
327 .name = MV643XX_ETH_SHARED_NAME,
328 .id = 0,
329 .num_resources = ARRAY_SIZE(mv64x60_eth_shared_resources),
330 .resource = mv64x60_eth_shared_resources,
331};
332
333#ifdef CONFIG_MV643XX_ETH_0
334static struct resource mv64x60_eth0_resources[] = {
335 [0] = {
336 .name = "eth0 irq",
337 .start = MV64x60_IRQ_ETH_0,
338 .end = MV64x60_IRQ_ETH_0,
339 .flags = IORESOURCE_IRQ,
340 },
341};
342
84dd619e
DF
343static struct mv643xx_eth_platform_data eth0_pd = {
344 .port_number = 0,
345};
1da177e4
LT
346
347static struct platform_device eth0_device = {
348 .name = MV643XX_ETH_NAME,
349 .id = 0,
350 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
351 .resource = mv64x60_eth0_resources,
352 .dev = {
353 .platform_data = &eth0_pd,
354 },
355};
356#endif
357
358#ifdef CONFIG_MV643XX_ETH_1
359static struct resource mv64x60_eth1_resources[] = {
360 [0] = {
361 .name = "eth1 irq",
362 .start = MV64x60_IRQ_ETH_1,
363 .end = MV64x60_IRQ_ETH_1,
364 .flags = IORESOURCE_IRQ,
365 },
366};
367
84dd619e
DF
368static struct mv643xx_eth_platform_data eth1_pd = {
369 .port_number = 1,
370};
1da177e4
LT
371
372static struct platform_device eth1_device = {
373 .name = MV643XX_ETH_NAME,
374 .id = 1,
375 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
376 .resource = mv64x60_eth1_resources,
377 .dev = {
378 .platform_data = &eth1_pd,
379 },
380};
381#endif
382
383#ifdef CONFIG_MV643XX_ETH_2
384static struct resource mv64x60_eth2_resources[] = {
385 [0] = {
386 .name = "eth2 irq",
387 .start = MV64x60_IRQ_ETH_2,
388 .end = MV64x60_IRQ_ETH_2,
389 .flags = IORESOURCE_IRQ,
390 },
391};
392
84dd619e
DF
393static struct mv643xx_eth_platform_data eth2_pd = {
394 .port_number = 2,
395};
1da177e4
LT
396
397static struct platform_device eth2_device = {
398 .name = MV643XX_ETH_NAME,
399 .id = 2,
400 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
401 .resource = mv64x60_eth2_resources,
402 .dev = {
403 .platform_data = &eth2_pd,
404 },
405};
406#endif
407#endif
408
409#ifdef CONFIG_I2C_MV64XXX
410static struct mv64xxx_i2c_pdata mv64xxx_i2c_pdata = {
411 .freq_m = 8,
412 .freq_n = 3,
413 .timeout = 1000, /* Default timeout of 1 second */
1da177e4
LT
414};
415
416static struct resource mv64xxx_i2c_resources[] = {
417 /* Do not change the order of the IORESOURCE_MEM resources */
418 [0] = {
419 .name = "mv64xxx i2c base",
420 .start = MV64XXX_I2C_OFFSET,
421 .end = MV64XXX_I2C_OFFSET + MV64XXX_I2C_REG_BLOCK_SIZE - 1,
422 .flags = IORESOURCE_MEM,
423 },
424 [1] = {
425 .name = "mv64xxx i2c irq",
426 .start = MV64x60_IRQ_I2C,
427 .end = MV64x60_IRQ_I2C,
428 .flags = IORESOURCE_IRQ,
429 },
430};
431
432static struct platform_device i2c_device = {
433 .name = MV64XXX_I2C_CTLR_NAME,
434 .id = 0,
435 .num_resources = ARRAY_SIZE(mv64xxx_i2c_resources),
436 .resource = mv64xxx_i2c_resources,
437 .dev = {
438 .platform_data = &mv64xxx_i2c_pdata,
439 },
440};
441#endif
442
422db8d2
DF
443#ifdef CONFIG_WATCHDOG
444static struct mv64x60_wdt_pdata mv64x60_wdt_pdata = {
445 .timeout = 10, /* default watchdog expiry in seconds */
446 .bus_clk = 133, /* default bus clock in MHz */
447};
448
449static struct resource mv64x60_wdt_resources[] = {
450 [0] = {
451 .name = "mv64x60 wdt base",
452 .start = MV64x60_WDT_WDC,
453 .end = MV64x60_WDT_WDC + 8 - 1, /* two 32-bit registers */
454 .flags = IORESOURCE_MEM,
455 },
456};
457
458static struct platform_device wdt_device = {
459 .name = MV64x60_WDT_NAME,
460 .id = 0,
461 .num_resources = ARRAY_SIZE(mv64x60_wdt_resources),
462 .resource = mv64x60_wdt_resources,
463 .dev = {
464 .platform_data = &mv64x60_wdt_pdata,
465 },
466};
467#endif
468
d01c08c9
MG
469#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
470static struct mv64xxx_pdata mv64xxx_pdata = {
471 .hs_reg_valid = 0,
472};
473
474static struct platform_device mv64xxx_device = { /* general mv64x60 stuff */
475 .name = MV64XXX_DEV_NAME,
476 .id = 0,
477 .dev = {
478 .platform_data = &mv64xxx_pdata,
479 },
480};
481#endif
482
1da177e4
LT
483static struct platform_device *mv64x60_pd_devs[] __initdata = {
484#ifdef CONFIG_SERIAL_MPSC
485 &mpsc_shared_device,
486 &mpsc0_device,
487 &mpsc1_device,
488#endif
6651a5c3 489#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
1da177e4
LT
490 &mv64x60_eth_shared_device,
491#endif
492#ifdef CONFIG_MV643XX_ETH_0
493 &eth0_device,
494#endif
495#ifdef CONFIG_MV643XX_ETH_1
496 &eth1_device,
497#endif
498#ifdef CONFIG_MV643XX_ETH_2
499 &eth2_device,
500#endif
501#ifdef CONFIG_I2C_MV64XXX
502 &i2c_device,
503#endif
422db8d2
DF
504#ifdef CONFIG_MV64X60_WDT
505 &wdt_device,
506#endif
d01c08c9
MG
507#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
508 &mv64xxx_device,
509#endif
1da177e4
LT
510};
511
512/*
513 *****************************************************************************
514 *
515 * Bridge Initialization Routines
516 *
517 *****************************************************************************
518 */
519/*
520 * mv64x60_init()
521 *
a8de5ce9 522 * Initialize the bridge based on setting passed in via 'si'. The bridge
1da177e4
LT
523 * handle, 'bh', will be set so that it can be used to make subsequent
524 * calls to routines in this file.
525 */
526int __init
527mv64x60_init(struct mv64x60_handle *bh, struct mv64x60_setup_info *si)
528{
529 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
530
531 if (ppc_md.progress)
532 ppc_md.progress("mv64x60 initialization", 0x0);
533
534 spin_lock_init(&mv64x60_lock);
535 mv64x60_early_init(bh, si);
536
537 if (mv64x60_get_type(bh) || mv64x60_setup_for_chip(bh)) {
538 iounmap(bh->v_base);
539 bh->v_base = 0;
540 if (ppc_md.progress)
541 ppc_md.progress("mv64x60_init: Can't determine chip",0);
542 return -1;
543 }
544
545 bh->ci->disable_all_windows(bh, si);
546 mv64x60_get_mem_windows(bh, mem_windows);
547 mv64x60_config_cpu2mem_windows(bh, si, mem_windows);
548
549 if (bh->ci->config_io2mem_windows)
550 bh->ci->config_io2mem_windows(bh, si, mem_windows);
551 if (bh->ci->set_mpsc2regs_window)
552 bh->ci->set_mpsc2regs_window(bh, si->phys_reg_base);
553
554 if (si->pci_1.enable_bus) {
555 bh->io_base_b = (u32)ioremap(si->pci_1.pci_io.cpu_base,
556 si->pci_1.pci_io.size);
557 isa_io_base = bh->io_base_b;
558 }
559
560 if (si->pci_0.enable_bus) {
561 bh->io_base_a = (u32)ioremap(si->pci_0.pci_io.cpu_base,
562 si->pci_0.pci_io.size);
563 isa_io_base = bh->io_base_a;
564
565 mv64x60_alloc_hose(bh, MV64x60_PCI0_CONFIG_ADDR,
566 MV64x60_PCI0_CONFIG_DATA, &bh->hose_a);
567 mv64x60_config_resources(bh->hose_a, &si->pci_0, bh->io_base_a);
568 mv64x60_config_pci_params(bh->hose_a, &si->pci_0);
569
570 mv64x60_config_cpu2pci_windows(bh, &si->pci_0, 0);
571 mv64x60_config_pci2mem_windows(bh, bh->hose_a, &si->pci_0, 0,
572 mem_windows);
573 bh->ci->set_pci2regs_window(bh, bh->hose_a, 0,
574 si->phys_reg_base);
575 }
576
577 if (si->pci_1.enable_bus) {
578 mv64x60_alloc_hose(bh, MV64x60_PCI1_CONFIG_ADDR,
579 MV64x60_PCI1_CONFIG_DATA, &bh->hose_b);
580 mv64x60_config_resources(bh->hose_b, &si->pci_1, bh->io_base_b);
581 mv64x60_config_pci_params(bh->hose_b, &si->pci_1);
582
583 mv64x60_config_cpu2pci_windows(bh, &si->pci_1, 1);
584 mv64x60_config_pci2mem_windows(bh, bh->hose_b, &si->pci_1, 1,
585 mem_windows);
586 bh->ci->set_pci2regs_window(bh, bh->hose_b, 1,
587 si->phys_reg_base);
588 }
589
590 bh->ci->chip_specific_init(bh, si);
591 mv64x60_pd_fixup(bh, mv64x60_pd_devs, ARRAY_SIZE(mv64x60_pd_devs));
592
593 return 0;
594}
595
596/*
597 * mv64x60_early_init()
598 *
599 * Do some bridge work that must take place before we start messing with
600 * the bridge for real.
601 */
602void __init
603mv64x60_early_init(struct mv64x60_handle *bh, struct mv64x60_setup_info *si)
604{
605 struct pci_controller hose_a, hose_b;
606
607 memset(bh, 0, sizeof(*bh));
608
609 bh->p_base = si->phys_reg_base;
610 bh->v_base = ioremap(bh->p_base, MV64x60_INTERNAL_SPACE_SIZE);
611
612 mv64x60_bridge_pbase = bh->p_base;
613 mv64x60_bridge_vbase = bh->v_base;
614
615 /* Assuming pci mode [reserved] bits 4:5 on 64260 are 0 */
616 bh->pci_mode_a = mv64x60_read(bh, MV64x60_PCI0_MODE) &
617 MV64x60_PCIMODE_MASK;
618 bh->pci_mode_b = mv64x60_read(bh, MV64x60_PCI1_MODE) &
619 MV64x60_PCIMODE_MASK;
620
621 /* Need temporary hose structs to call mv64x60_set_bus() */
622 memset(&hose_a, 0, sizeof(hose_a));
623 memset(&hose_b, 0, sizeof(hose_b));
624 setup_indirect_pci_nomap(&hose_a, bh->v_base + MV64x60_PCI0_CONFIG_ADDR,
625 bh->v_base + MV64x60_PCI0_CONFIG_DATA);
626 setup_indirect_pci_nomap(&hose_b, bh->v_base + MV64x60_PCI1_CONFIG_ADDR,
627 bh->v_base + MV64x60_PCI1_CONFIG_DATA);
628 bh->hose_a = &hose_a;
629 bh->hose_b = &hose_b;
630
d01c08c9
MG
631#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
632 /* Save a copy of hose_a for sysfs functions -- hack */
633 memcpy(&sysfs_hose_a, &hose_a, sizeof(hose_a));
634#endif
635
1da177e4
LT
636 mv64x60_set_bus(bh, 0, 0);
637 mv64x60_set_bus(bh, 1, 0);
638
639 bh->hose_a = NULL;
640 bh->hose_b = NULL;
641
642 /* Clear bit 0 of PCI addr decode control so PCI->CPU remap 1:1 */
643 mv64x60_clr_bits(bh, MV64x60_PCI0_PCI_DECODE_CNTL, 0x00000001);
644 mv64x60_clr_bits(bh, MV64x60_PCI1_PCI_DECODE_CNTL, 0x00000001);
645
646 /* Bit 12 MUST be 0; set bit 27--don't auto-update cpu remap regs */
647 mv64x60_clr_bits(bh, MV64x60_CPU_CONFIG, (1<<12));
648 mv64x60_set_bits(bh, MV64x60_CPU_CONFIG, (1<<27));
649
650 mv64x60_set_bits(bh, MV64x60_PCI0_TO_RETRY, 0xffff);
651 mv64x60_set_bits(bh, MV64x60_PCI1_TO_RETRY, 0xffff);
1da177e4
LT
652}
653
654/*
655 *****************************************************************************
656 *
657 * Window Config Routines
658 *
659 *****************************************************************************
660 */
661/*
662 * mv64x60_get_32bit_window()
663 *
664 * Determine the base address and size of a 32-bit window on the bridge.
665 */
666void __init
667mv64x60_get_32bit_window(struct mv64x60_handle *bh, u32 window,
668 u32 *base, u32 *size)
669{
670 u32 val, base_reg, size_reg, base_bits, size_bits;
671 u32 (*get_from_field)(u32 val, u32 num_bits);
672
673 base_reg = bh->ci->window_tab_32bit[window].base_reg;
674
675 if (base_reg != 0) {
676 size_reg = bh->ci->window_tab_32bit[window].size_reg;
677 base_bits = bh->ci->window_tab_32bit[window].base_bits;
678 size_bits = bh->ci->window_tab_32bit[window].size_bits;
679 get_from_field= bh->ci->window_tab_32bit[window].get_from_field;
680
681 val = mv64x60_read(bh, base_reg);
682 *base = get_from_field(val, base_bits);
683
684 if (size_reg != 0) {
685 val = mv64x60_read(bh, size_reg);
686 val = get_from_field(val, size_bits);
687 *size = bh->ci->untranslate_size(*base, val, size_bits);
d01c08c9 688 } else
1da177e4 689 *size = 0;
d01c08c9 690 } else {
1da177e4
LT
691 *base = 0;
692 *size = 0;
693 }
694
695 pr_debug("get 32bit window: %d, base: 0x%x, size: 0x%x\n",
696 window, *base, *size);
1da177e4
LT
697}
698
699/*
700 * mv64x60_set_32bit_window()
701 *
702 * Set the base address and size of a 32-bit window on the bridge.
703 */
704void __init
705mv64x60_set_32bit_window(struct mv64x60_handle *bh, u32 window,
706 u32 base, u32 size, u32 other_bits)
707{
708 u32 val, base_reg, size_reg, base_bits, size_bits;
709 u32 (*map_to_field)(u32 val, u32 num_bits);
710
711 pr_debug("set 32bit window: %d, base: 0x%x, size: 0x%x, other: 0x%x\n",
712 window, base, size, other_bits);
713
714 base_reg = bh->ci->window_tab_32bit[window].base_reg;
715
716 if (base_reg != 0) {
717 size_reg = bh->ci->window_tab_32bit[window].size_reg;
718 base_bits = bh->ci->window_tab_32bit[window].base_bits;
719 size_bits = bh->ci->window_tab_32bit[window].size_bits;
720 map_to_field = bh->ci->window_tab_32bit[window].map_to_field;
721
722 val = map_to_field(base, base_bits) | other_bits;
723 mv64x60_write(bh, base_reg, val);
724
725 if (size_reg != 0) {
726 val = bh->ci->translate_size(base, size, size_bits);
727 val = map_to_field(val, size_bits);
728 mv64x60_write(bh, size_reg, val);
729 }
730
731 (void)mv64x60_read(bh, base_reg); /* Flush FIFO */
732 }
1da177e4
LT
733}
734
735/*
736 * mv64x60_get_64bit_window()
737 *
738 * Determine the base address and size of a 64-bit window on the bridge.
739 */
740void __init
741mv64x60_get_64bit_window(struct mv64x60_handle *bh, u32 window,
742 u32 *base_hi, u32 *base_lo, u32 *size)
743{
744 u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
745 u32 (*get_from_field)(u32 val, u32 num_bits);
746
747 base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
748
749 if (base_lo_reg != 0) {
750 size_reg = bh->ci->window_tab_64bit[window].size_reg;
751 base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
752 size_bits = bh->ci->window_tab_64bit[window].size_bits;
753 get_from_field= bh->ci->window_tab_64bit[window].get_from_field;
754
755 *base_hi = mv64x60_read(bh,
756 bh->ci->window_tab_64bit[window].base_hi_reg);
757
758 val = mv64x60_read(bh, base_lo_reg);
759 *base_lo = get_from_field(val, base_lo_bits);
760
761 if (size_reg != 0) {
762 val = mv64x60_read(bh, size_reg);
763 val = get_from_field(val, size_bits);
764 *size = bh->ci->untranslate_size(*base_lo, val,
765 size_bits);
d01c08c9 766 } else
1da177e4 767 *size = 0;
d01c08c9 768 } else {
1da177e4
LT
769 *base_hi = 0;
770 *base_lo = 0;
771 *size = 0;
772 }
773
774 pr_debug("get 64bit window: %d, base hi: 0x%x, base lo: 0x%x, "
775 "size: 0x%x\n", window, *base_hi, *base_lo, *size);
1da177e4
LT
776}
777
778/*
779 * mv64x60_set_64bit_window()
780 *
781 * Set the base address and size of a 64-bit window on the bridge.
782 */
783void __init
784mv64x60_set_64bit_window(struct mv64x60_handle *bh, u32 window,
785 u32 base_hi, u32 base_lo, u32 size, u32 other_bits)
786{
787 u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
788 u32 (*map_to_field)(u32 val, u32 num_bits);
789
790 pr_debug("set 64bit window: %d, base hi: 0x%x, base lo: 0x%x, "
791 "size: 0x%x, other: 0x%x\n",
792 window, base_hi, base_lo, size, other_bits);
793
794 base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
795
796 if (base_lo_reg != 0) {
797 size_reg = bh->ci->window_tab_64bit[window].size_reg;
798 base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
799 size_bits = bh->ci->window_tab_64bit[window].size_bits;
800 map_to_field = bh->ci->window_tab_64bit[window].map_to_field;
801
802 mv64x60_write(bh, bh->ci->window_tab_64bit[window].base_hi_reg,
803 base_hi);
804
805 val = map_to_field(base_lo, base_lo_bits) | other_bits;
806 mv64x60_write(bh, base_lo_reg, val);
807
808 if (size_reg != 0) {
809 val = bh->ci->translate_size(base_lo, size, size_bits);
810 val = map_to_field(val, size_bits);
811 mv64x60_write(bh, size_reg, val);
812 }
813
814 (void)mv64x60_read(bh, base_lo_reg); /* Flush FIFO */
815 }
1da177e4
LT
816}
817
818/*
819 * mv64x60_mask()
820 *
821 * Take the high-order 'num_bits' of 'val' & mask off low bits.
822 */
823u32 __init
824mv64x60_mask(u32 val, u32 num_bits)
825{
826 return val & (0xffffffff << (32 - num_bits));
827}
828
829/*
830 * mv64x60_shift_left()
831 *
832 * Take the low-order 'num_bits' of 'val', shift left to align at bit 31 (MSB).
833 */
834u32 __init
835mv64x60_shift_left(u32 val, u32 num_bits)
836{
837 return val << (32 - num_bits);
838}
839
840/*
841 * mv64x60_shift_right()
842 *
843 * Take the high-order 'num_bits' of 'val', shift right to align at bit 0 (LSB).
844 */
845u32 __init
846mv64x60_shift_right(u32 val, u32 num_bits)
847{
848 return val >> (32 - num_bits);
849}
850
851/*
852 *****************************************************************************
853 *
854 * Chip Identification Routines
855 *
856 *****************************************************************************
857 */
858/*
859 * mv64x60_get_type()
860 *
861 * Determine the type of bridge chip we have.
862 */
863int __init
864mv64x60_get_type(struct mv64x60_handle *bh)
865{
866 struct pci_controller hose;
867 u16 val;
868 u8 save_exclude;
869
870 memset(&hose, 0, sizeof(hose));
871 setup_indirect_pci_nomap(&hose, bh->v_base + MV64x60_PCI0_CONFIG_ADDR,
872 bh->v_base + MV64x60_PCI0_CONFIG_DATA);
873
874 save_exclude = mv64x60_pci_exclude_bridge;
875 mv64x60_pci_exclude_bridge = 0;
876 /* Sanity check of bridge's Vendor ID */
877 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID, &val);
878
879 if (val != PCI_VENDOR_ID_MARVELL) {
880 mv64x60_pci_exclude_bridge = save_exclude;
881 return -1;
882 }
883
884 /* Get the revision of the chip */
885 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_CLASS_REVISION,
886 &val);
887 bh->rev = (u32)(val & 0xff);
888
889 /* Figure out the type of Marvell bridge it is */
890 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_DEVICE_ID, &val);
891 mv64x60_pci_exclude_bridge = save_exclude;
892
893 switch (val) {
894 case PCI_DEVICE_ID_MARVELL_GT64260:
895 switch (bh->rev) {
896 case GT64260_REV_A:
897 bh->type = MV64x60_TYPE_GT64260A;
898 break;
899
900 default:
901 printk(KERN_WARNING "Unsupported GT64260 rev %04x\n",
902 bh->rev);
903 /* Assume its similar to a 'B' rev and fallthru */
904 case GT64260_REV_B:
905 bh->type = MV64x60_TYPE_GT64260B;
906 break;
907 }
908 break;
909
910 case PCI_DEVICE_ID_MARVELL_MV64360:
911 /* Marvell won't tell me how to distinguish a 64361 & 64362 */
912 bh->type = MV64x60_TYPE_MV64360;
913 break;
914
915 case PCI_DEVICE_ID_MARVELL_MV64460:
916 bh->type = MV64x60_TYPE_MV64460;
917 break;
918
919 default:
920 printk(KERN_ERR "Unknown Marvell bridge type %04x\n", val);
921 return -1;
922 }
923
924 /* Hang onto bridge type & rev for PIC code */
925 mv64x60_bridge_type = bh->type;
926 mv64x60_bridge_rev = bh->rev;
927
928 return 0;
929}
930
931/*
932 * mv64x60_setup_for_chip()
933 *
934 * Set 'bh' to use the proper set of routine for the bridge chip that we have.
935 */
936int __init
937mv64x60_setup_for_chip(struct mv64x60_handle *bh)
938{
939 int rc = 0;
940
941 /* Set up chip-specific info based on the chip/bridge type */
942 switch(bh->type) {
943 case MV64x60_TYPE_GT64260A:
944 bh->ci = &gt64260a_ci;
945 break;
946
947 case MV64x60_TYPE_GT64260B:
948 bh->ci = &gt64260b_ci;
949 break;
950
951 case MV64x60_TYPE_MV64360:
952 bh->ci = &mv64360_ci;
953 break;
954
955 case MV64x60_TYPE_MV64460:
956 bh->ci = &mv64460_ci;
957 break;
958
959 case MV64x60_TYPE_INVALID:
960 default:
961 if (ppc_md.progress)
962 ppc_md.progress("mv64x60: Unsupported bridge", 0x0);
963 printk(KERN_ERR "mv64x60: Unsupported bridge\n");
964 rc = -1;
965 }
966
967 return rc;
968}
969
970/*
971 * mv64x60_get_bridge_vbase()
972 *
973 * Return the virtual address of the bridge's registers.
974 */
a7625d6e 975void __iomem *
1da177e4
LT
976mv64x60_get_bridge_vbase(void)
977{
978 return mv64x60_bridge_vbase;
979}
980
981/*
982 * mv64x60_get_bridge_type()
983 *
984 * Return the type of bridge on the platform.
985 */
986u32
987mv64x60_get_bridge_type(void)
988{
989 return mv64x60_bridge_type;
990}
991
992/*
993 * mv64x60_get_bridge_rev()
994 *
995 * Return the revision of the bridge on the platform.
996 */
997u32
998mv64x60_get_bridge_rev(void)
999{
1000 return mv64x60_bridge_rev;
1001}
1002
1003/*
1004 *****************************************************************************
1005 *
1006 * System Memory Window Related Routines
1007 *
1008 *****************************************************************************
1009 */
1010/*
1011 * mv64x60_get_mem_size()
1012 *
1013 * Calculate the amount of memory that the memory controller is set up for.
1014 * This should only be used by board-specific code if there is no other
1015 * way to determine the amount of memory in the system.
1016 */
1017u32 __init
1018mv64x60_get_mem_size(u32 bridge_base, u32 chip_type)
1019{
1020 struct mv64x60_handle bh;
1021 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
1022 u32 rc = 0;
1023
1024 memset(&bh, 0, sizeof(bh));
1025
1026 bh.type = chip_type;
1027 bh.v_base = (void *)bridge_base;
1028
1029 if (!mv64x60_setup_for_chip(&bh)) {
1030 mv64x60_get_mem_windows(&bh, mem_windows);
1031 rc = mv64x60_calc_mem_size(&bh, mem_windows);
1032 }
1033
1034 return rc;
1035}
1036
1037/*
1038 * mv64x60_get_mem_windows()
1039 *
1040 * Get the values in the memory controller & return in the 'mem_windows' array.
1041 */
1042void __init
1043mv64x60_get_mem_windows(struct mv64x60_handle *bh,
1044 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1045{
1046 u32 i, win;
1047
1048 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1049 if (bh->ci->is_enabled_32bit(bh, win))
1050 mv64x60_get_32bit_window(bh, win,
1051 &mem_windows[i][0], &mem_windows[i][1]);
1052 else {
1053 mem_windows[i][0] = 0;
1054 mem_windows[i][1] = 0;
1055 }
1da177e4
LT
1056}
1057
1058/*
1059 * mv64x60_calc_mem_size()
1060 *
1061 * Using the memory controller register values in 'mem_windows', determine
1062 * how much memory it is set up for.
1063 */
1064u32 __init
1065mv64x60_calc_mem_size(struct mv64x60_handle *bh,
1066 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1067{
1068 u32 i, total = 0;
1069
1070 for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++)
1071 total += mem_windows[i][1];
1072
1073 return total;
1074}
1075
1076/*
1077 *****************************************************************************
1078 *
1079 * CPU->System MEM, PCI Config Routines
1080 *
1081 *****************************************************************************
1082 */
1083/*
1084 * mv64x60_config_cpu2mem_windows()
1085 *
1086 * Configure CPU->Memory windows on the bridge.
1087 */
1088static u32 prot_tab[] __initdata = {
1089 MV64x60_CPU_PROT_0_WIN, MV64x60_CPU_PROT_1_WIN,
1090 MV64x60_CPU_PROT_2_WIN, MV64x60_CPU_PROT_3_WIN
1091};
1092
1093static u32 cpu_snoop_tab[] __initdata = {
1094 MV64x60_CPU_SNOOP_0_WIN, MV64x60_CPU_SNOOP_1_WIN,
1095 MV64x60_CPU_SNOOP_2_WIN, MV64x60_CPU_SNOOP_3_WIN
1096};
1097
1098void __init
1099mv64x60_config_cpu2mem_windows(struct mv64x60_handle *bh,
1100 struct mv64x60_setup_info *si,
1101 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1102{
1103 u32 i, win;
1104
1105 /* Set CPU protection & snoop windows */
1106 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1107 if (bh->ci->is_enabled_32bit(bh, win)) {
1108 mv64x60_set_32bit_window(bh, prot_tab[i],
1109 mem_windows[i][0], mem_windows[i][1],
1110 si->cpu_prot_options[i]);
1111 bh->ci->enable_window_32bit(bh, prot_tab[i]);
1112
1113 if (bh->ci->window_tab_32bit[cpu_snoop_tab[i]].
1114 base_reg != 0) {
1115 mv64x60_set_32bit_window(bh, cpu_snoop_tab[i],
1116 mem_windows[i][0], mem_windows[i][1],
1117 si->cpu_snoop_options[i]);
1118 bh->ci->enable_window_32bit(bh,
1119 cpu_snoop_tab[i]);
1120 }
1121
1122 }
1da177e4
LT
1123}
1124
1125/*
1126 * mv64x60_config_cpu2pci_windows()
1127 *
1128 * Configure the CPU->PCI windows for one of the PCI buses.
1129 */
1130static u32 win_tab[2][4] __initdata = {
1131 { MV64x60_CPU2PCI0_IO_WIN, MV64x60_CPU2PCI0_MEM_0_WIN,
1132 MV64x60_CPU2PCI0_MEM_1_WIN, MV64x60_CPU2PCI0_MEM_2_WIN },
1133 { MV64x60_CPU2PCI1_IO_WIN, MV64x60_CPU2PCI1_MEM_0_WIN,
1134 MV64x60_CPU2PCI1_MEM_1_WIN, MV64x60_CPU2PCI1_MEM_2_WIN },
1135};
1136
1137static u32 remap_tab[2][4] __initdata = {
1138 { MV64x60_CPU2PCI0_IO_REMAP_WIN, MV64x60_CPU2PCI0_MEM_0_REMAP_WIN,
1139 MV64x60_CPU2PCI0_MEM_1_REMAP_WIN, MV64x60_CPU2PCI0_MEM_2_REMAP_WIN },
1140 { MV64x60_CPU2PCI1_IO_REMAP_WIN, MV64x60_CPU2PCI1_MEM_0_REMAP_WIN,
1141 MV64x60_CPU2PCI1_MEM_1_REMAP_WIN, MV64x60_CPU2PCI1_MEM_2_REMAP_WIN }
1142};
1143
1144void __init
1145mv64x60_config_cpu2pci_windows(struct mv64x60_handle *bh,
1146 struct mv64x60_pci_info *pi, u32 bus)
1147{
1148 int i;
1149
1150 if (pi->pci_io.size > 0) {
1151 mv64x60_set_32bit_window(bh, win_tab[bus][0],
1152 pi->pci_io.cpu_base, pi->pci_io.size, pi->pci_io.swap);
1153 mv64x60_set_32bit_window(bh, remap_tab[bus][0],
1154 pi->pci_io.pci_base_lo, 0, 0);
1155 bh->ci->enable_window_32bit(bh, win_tab[bus][0]);
d01c08c9 1156 } else /* Actually, the window should already be disabled */
1da177e4
LT
1157 bh->ci->disable_window_32bit(bh, win_tab[bus][0]);
1158
1159 for (i=0; i<3; i++)
1160 if (pi->pci_mem[i].size > 0) {
1161 mv64x60_set_32bit_window(bh, win_tab[bus][i+1],
1162 pi->pci_mem[i].cpu_base, pi->pci_mem[i].size,
1163 pi->pci_mem[i].swap);
1164 mv64x60_set_64bit_window(bh, remap_tab[bus][i+1],
1165 pi->pci_mem[i].pci_base_hi,
1166 pi->pci_mem[i].pci_base_lo, 0, 0);
1167 bh->ci->enable_window_32bit(bh, win_tab[bus][i+1]);
d01c08c9 1168 } else /* Actually, the window should already be disabled */
1da177e4 1169 bh->ci->disable_window_32bit(bh, win_tab[bus][i+1]);
1da177e4
LT
1170}
1171
1172/*
1173 *****************************************************************************
1174 *
1175 * PCI->System MEM Config Routines
1176 *
1177 *****************************************************************************
1178 */
1179/*
1180 * mv64x60_config_pci2mem_windows()
1181 *
1182 * Configure the PCI->Memory windows on the bridge.
1183 */
1184static u32 pci_acc_tab[2][4] __initdata = {
1185 { MV64x60_PCI02MEM_ACC_CNTL_0_WIN, MV64x60_PCI02MEM_ACC_CNTL_1_WIN,
1186 MV64x60_PCI02MEM_ACC_CNTL_2_WIN, MV64x60_PCI02MEM_ACC_CNTL_3_WIN },
1187 { MV64x60_PCI12MEM_ACC_CNTL_0_WIN, MV64x60_PCI12MEM_ACC_CNTL_1_WIN,
1188 MV64x60_PCI12MEM_ACC_CNTL_2_WIN, MV64x60_PCI12MEM_ACC_CNTL_3_WIN }
1189};
1190
1191static u32 pci_snoop_tab[2][4] __initdata = {
1192 { MV64x60_PCI02MEM_SNOOP_0_WIN, MV64x60_PCI02MEM_SNOOP_1_WIN,
1193 MV64x60_PCI02MEM_SNOOP_2_WIN, MV64x60_PCI02MEM_SNOOP_3_WIN },
1194 { MV64x60_PCI12MEM_SNOOP_0_WIN, MV64x60_PCI12MEM_SNOOP_1_WIN,
1195 MV64x60_PCI12MEM_SNOOP_2_WIN, MV64x60_PCI12MEM_SNOOP_3_WIN }
1196};
1197
1198static u32 pci_size_tab[2][4] __initdata = {
1199 { MV64x60_PCI0_MEM_0_SIZE, MV64x60_PCI0_MEM_1_SIZE,
1200 MV64x60_PCI0_MEM_2_SIZE, MV64x60_PCI0_MEM_3_SIZE },
1201 { MV64x60_PCI1_MEM_0_SIZE, MV64x60_PCI1_MEM_1_SIZE,
1202 MV64x60_PCI1_MEM_2_SIZE, MV64x60_PCI1_MEM_3_SIZE }
1203};
1204
1205void __init
1206mv64x60_config_pci2mem_windows(struct mv64x60_handle *bh,
1207 struct pci_controller *hose, struct mv64x60_pci_info *pi,
1208 u32 bus, u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1209{
1210 u32 i, win;
1211
1212 /*
1213 * Set the access control, snoop, BAR size, and window base addresses.
1214 * PCI->MEM windows base addresses will match exactly what the
1215 * CPU->MEM windows are.
1216 */
1217 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1218 if (bh->ci->is_enabled_32bit(bh, win)) {
1219 mv64x60_set_64bit_window(bh,
1220 pci_acc_tab[bus][i], 0,
1221 mem_windows[i][0], mem_windows[i][1],
1222 pi->acc_cntl_options[i]);
1223 bh->ci->enable_window_64bit(bh, pci_acc_tab[bus][i]);
1224
1225 if (bh->ci->window_tab_64bit[
1226 pci_snoop_tab[bus][i]].base_lo_reg != 0) {
1227
1228 mv64x60_set_64bit_window(bh,
1229 pci_snoop_tab[bus][i], 0,
1230 mem_windows[i][0], mem_windows[i][1],
1231 pi->snoop_options[i]);
1232 bh->ci->enable_window_64bit(bh,
1233 pci_snoop_tab[bus][i]);
1234 }
1235
1236 bh->ci->set_pci2mem_window(hose, bus, i,
1237 mem_windows[i][0]);
1238 mv64x60_write(bh, pci_size_tab[bus][i],
1239 mv64x60_mask(mem_windows[i][1] - 1, 20));
1240
1241 /* Enable the window */
1242 mv64x60_clr_bits(bh, ((bus == 0) ?
1243 MV64x60_PCI0_BAR_ENABLE :
1244 MV64x60_PCI1_BAR_ENABLE), (1 << i));
1245 }
1da177e4
LT
1246}
1247
1248/*
1249 *****************************************************************************
1250 *
1251 * Hose & Resource Alloc/Init Routines
1252 *
1253 *****************************************************************************
1254 */
1255/*
1256 * mv64x60_alloc_hoses()
1257 *
1258 * Allocate the PCI hose structures for the bridge's PCI buses.
1259 */
1260void __init
1261mv64x60_alloc_hose(struct mv64x60_handle *bh, u32 cfg_addr, u32 cfg_data,
1262 struct pci_controller **hose)
1263{
1264 *hose = pcibios_alloc_controller();
1265 setup_indirect_pci_nomap(*hose, bh->v_base + cfg_addr,
1266 bh->v_base + cfg_data);
1da177e4
LT
1267}
1268
1269/*
1270 * mv64x60_config_resources()
1271 *
1272 * Calculate the offsets, etc. for the hose structures to reflect all of
1273 * the address remapping that happens as you go from CPU->PCI and PCI->MEM.
1274 */
1275void __init
1276mv64x60_config_resources(struct pci_controller *hose,
1277 struct mv64x60_pci_info *pi, u32 io_base)
1278{
1279 int i;
1280 /* 2 hoses; 4 resources/hose; string <= 64 bytes */
1281 static char s[2][4][64];
1282
1283 if (pi->pci_io.size != 0) {
1284 sprintf(s[hose->index][0], "PCI hose %d I/O Space",
1285 hose->index);
1286 pci_init_resource(&hose->io_resource, io_base - isa_io_base,
1287 io_base - isa_io_base + pi->pci_io.size - 1,
1288 IORESOURCE_IO, s[hose->index][0]);
1289 hose->io_space.start = pi->pci_io.pci_base_lo;
1290 hose->io_space.end = pi->pci_io.pci_base_lo + pi->pci_io.size-1;
1291 hose->io_base_phys = pi->pci_io.cpu_base;
1292 hose->io_base_virt = (void *)isa_io_base;
1293 }
1294
1295 for (i=0; i<3; i++)
1296 if (pi->pci_mem[i].size != 0) {
1297 sprintf(s[hose->index][i+1], "PCI hose %d MEM Space %d",
1298 hose->index, i);
1299 pci_init_resource(&hose->mem_resources[i],
1300 pi->pci_mem[i].cpu_base,
1301 pi->pci_mem[i].cpu_base + pi->pci_mem[i].size-1,
1302 IORESOURCE_MEM, s[hose->index][i+1]);
1303 }
1304
1305 hose->mem_space.end = pi->pci_mem[0].pci_base_lo +
1306 pi->pci_mem[0].size - 1;
1307 hose->pci_mem_offset = pi->pci_mem[0].cpu_base -
1308 pi->pci_mem[0].pci_base_lo;
1da177e4
LT
1309}
1310
1311/*
1312 * mv64x60_config_pci_params()
1313 *
1314 * Configure a hose's PCI config space parameters.
1315 */
1316void __init
1317mv64x60_config_pci_params(struct pci_controller *hose,
1318 struct mv64x60_pci_info *pi)
1319{
1320 u32 devfn;
1321 u16 u16_val;
1322 u8 save_exclude;
1323
1324 devfn = PCI_DEVFN(0,0);
1325
1326 save_exclude = mv64x60_pci_exclude_bridge;
1327 mv64x60_pci_exclude_bridge = 0;
1328
1329 /* Set class code to indicate host bridge */
1330 u16_val = PCI_CLASS_BRIDGE_HOST; /* 0x0600 (host bridge) */
1331 early_write_config_word(hose, 0, devfn, PCI_CLASS_DEVICE, u16_val);
1332
1333 /* Enable bridge to be PCI master & respond to PCI MEM cycles */
1334 early_read_config_word(hose, 0, devfn, PCI_COMMAND, &u16_val);
1335 u16_val &= ~(PCI_COMMAND_IO | PCI_COMMAND_INVALIDATE |
1336 PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
1337 u16_val |= pi->pci_cmd_bits | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
1338 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
1339
1340 /* Set latency timer, cache line size, clear BIST */
7dffb720 1341 u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2);
1da177e4
LT
1342 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
1343
1344 mv64x60_pci_exclude_bridge = save_exclude;
1da177e4
LT
1345}
1346
1347/*
1348 *****************************************************************************
1349 *
1350 * PCI Related Routine
1351 *
1352 *****************************************************************************
1353 */
1354/*
1355 * mv64x60_set_bus()
1356 *
1357 * Set the bus number for the hose directly under the bridge.
1358 */
1359void __init
1360mv64x60_set_bus(struct mv64x60_handle *bh, u32 bus, u32 child_bus)
1361{
1362 struct pci_controller *hose;
1363 u32 pci_mode, p2p_cfg, pci_cfg_offset, val;
1364 u8 save_exclude;
1365
1366 if (bus == 0) {
1367 pci_mode = bh->pci_mode_a;
1368 p2p_cfg = MV64x60_PCI0_P2P_CONFIG;
1369 pci_cfg_offset = 0x64;
1370 hose = bh->hose_a;
d01c08c9 1371 } else {
1da177e4
LT
1372 pci_mode = bh->pci_mode_b;
1373 p2p_cfg = MV64x60_PCI1_P2P_CONFIG;
1374 pci_cfg_offset = 0xe4;
1375 hose = bh->hose_b;
1376 }
1377
1378 child_bus &= 0xff;
1379 val = mv64x60_read(bh, p2p_cfg);
1380
1381 if (pci_mode == MV64x60_PCIMODE_CONVENTIONAL) {
1382 val &= 0xe0000000; /* Force dev num to 0, turn off P2P bridge */
1383 val |= (child_bus << 16) | 0xff;
1384 mv64x60_write(bh, p2p_cfg, val);
1385 (void)mv64x60_read(bh, p2p_cfg); /* Flush FIFO */
d01c08c9 1386 } else { /* PCI-X */
1da177e4
LT
1387 /*
1388 * Need to use the current bus/dev number (that's in the
1389 * P2P CONFIG reg) to access the bridge's pci config space.
1390 */
1391 save_exclude = mv64x60_pci_exclude_bridge;
1392 mv64x60_pci_exclude_bridge = 0;
1393 early_write_config_dword(hose, (val & 0x00ff0000) >> 16,
1394 PCI_DEVFN(((val & 0x1f000000) >> 24), 0),
1395 pci_cfg_offset, child_bus << 8);
1396 mv64x60_pci_exclude_bridge = save_exclude;
1397 }
1da177e4
LT
1398}
1399
1400/*
1401 * mv64x60_pci_exclude_device()
1402 *
1403 * This routine is used to make the bridge not appear when the
1404 * PCI subsystem is accessing PCI devices (in PCI config space).
1405 */
1406int
1407mv64x60_pci_exclude_device(u8 bus, u8 devfn)
1408{
1409 struct pci_controller *hose;
1410
1411 hose = pci_bus_to_hose(bus);
1412
1413 /* Skip slot 0 on both hoses */
1414 if ((mv64x60_pci_exclude_bridge == 1) && (PCI_SLOT(devfn) == 0) &&
1415 (hose->first_busno == bus))
1416
1417 return PCIBIOS_DEVICE_NOT_FOUND;
1418 else
1419 return PCIBIOS_SUCCESSFUL;
1420} /* mv64x60_pci_exclude_device() */
1421
1422/*
1423 *****************************************************************************
1424 *
1425 * Platform Device Routines
1426 *
1427 *****************************************************************************
1428 */
1429
1430/*
1431 * mv64x60_pd_fixup()
1432 *
1433 * Need to add the base addr of where the bridge's regs are mapped in the
1434 * physical addr space so drivers can ioremap() them.
1435 */
1436void __init
1437mv64x60_pd_fixup(struct mv64x60_handle *bh, struct platform_device *pd_devs[],
1438 u32 entries)
1439{
1440 struct resource *r;
1441 u32 i, j;
1442
1443 for (i=0; i<entries; i++) {
1444 j = 0;
1445
1446 while ((r = platform_get_resource(pd_devs[i],IORESOURCE_MEM,j))
1447 != NULL) {
1448
1449 r->start += bh->p_base;
1450 r->end += bh->p_base;
1451 j++;
1452 }
1453 }
1da177e4
LT
1454}
1455
1456/*
1457 * mv64x60_add_pds()
1458 *
1459 * Add the mv64x60 platform devices to the list of platform devices.
1460 */
1461static int __init
1462mv64x60_add_pds(void)
1463{
1464 return platform_add_devices(mv64x60_pd_devs,
1465 ARRAY_SIZE(mv64x60_pd_devs));
1466}
1467arch_initcall(mv64x60_add_pds);
1468
1469/*
1470 *****************************************************************************
1471 *
1472 * GT64260-Specific Routines
1473 *
1474 *****************************************************************************
1475 */
1476/*
1477 * gt64260_translate_size()
1478 *
1479 * On the GT64260, the size register is really the "top" address of the window.
1480 */
1481static u32 __init
1482gt64260_translate_size(u32 base, u32 size, u32 num_bits)
1483{
1484 return base + mv64x60_mask(size - 1, num_bits);
1485}
1486
1487/*
1488 * gt64260_untranslate_size()
1489 *
1490 * Translate the top address of a window into a window size.
1491 */
1492static u32 __init
1493gt64260_untranslate_size(u32 base, u32 size, u32 num_bits)
1494{
1495 if (size >= base)
1496 size = size - base + (1 << (32 - num_bits));
1497 else
1498 size = 0;
1499
1500 return size;
1501}
1502
1503/*
1504 * gt64260_set_pci2mem_window()
1505 *
1506 * The PCI->MEM window registers are actually in PCI config space so need
1507 * to set them by setting the correct config space BARs.
1508 */
1509static u32 gt64260_reg_addrs[2][4] __initdata = {
1510 { 0x10, 0x14, 0x18, 0x1c }, { 0x90, 0x94, 0x98, 0x9c }
1511};
1512
1513static void __init
1514gt64260_set_pci2mem_window(struct pci_controller *hose, u32 bus, u32 window,
1515 u32 base)
1516{
1517 u8 save_exclude;
1518
1519 pr_debug("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
1520 hose->index, base);
1521
1522 save_exclude = mv64x60_pci_exclude_bridge;
1523 mv64x60_pci_exclude_bridge = 0;
1524 early_write_config_dword(hose, 0, PCI_DEVFN(0, 0),
1525 gt64260_reg_addrs[bus][window], mv64x60_mask(base, 20) | 0x8);
1526 mv64x60_pci_exclude_bridge = save_exclude;
1da177e4
LT
1527}
1528
1529/*
1530 * gt64260_set_pci2regs_window()
1531 *
1532 * Set where the bridge's registers appear in PCI MEM space.
1533 */
1534static u32 gt64260_offset[2] __initdata = {0x20, 0xa0};
1535
1536static void __init
1537gt64260_set_pci2regs_window(struct mv64x60_handle *bh,
1538 struct pci_controller *hose, u32 bus, u32 base)
1539{
1540 u8 save_exclude;
1541
1542 pr_debug("set pci->internal regs hose: %d, base: 0x%x\n", hose->index,
1543 base);
1544
1545 save_exclude = mv64x60_pci_exclude_bridge;
1546 mv64x60_pci_exclude_bridge = 0;
1547 early_write_config_dword(hose, 0, PCI_DEVFN(0,0), gt64260_offset[bus],
1548 (base << 16));
1549 mv64x60_pci_exclude_bridge = save_exclude;
1da177e4
LT
1550}
1551
1552/*
1553 * gt64260_is_enabled_32bit()
1554 *
1555 * On a GT64260, a window is enabled iff its top address is >= to its base
1556 * address.
1557 */
1558static u32 __init
1559gt64260_is_enabled_32bit(struct mv64x60_handle *bh, u32 window)
1560{
1561 u32 rc = 0;
1562
1563 if ((gt64260_32bit_windows[window].base_reg != 0) &&
1564 (gt64260_32bit_windows[window].size_reg != 0) &&
1565 ((mv64x60_read(bh, gt64260_32bit_windows[window].size_reg) &
1566 ((1 << gt64260_32bit_windows[window].size_bits) - 1)) >=
1567 (mv64x60_read(bh, gt64260_32bit_windows[window].base_reg) &
1568 ((1 << gt64260_32bit_windows[window].base_bits) - 1))))
1569
1570 rc = 1;
1571
1572 return rc;
1573}
1574
1575/*
1576 * gt64260_enable_window_32bit()
1577 *
1578 * On the GT64260, a window is enabled iff the top address is >= to the base
1579 * address of the window. Since the window has already been configured by
1580 * the time this routine is called, we have nothing to do here.
1581 */
1582static void __init
1583gt64260_enable_window_32bit(struct mv64x60_handle *bh, u32 window)
1584{
1585 pr_debug("enable 32bit window: %d\n", window);
1da177e4
LT
1586}
1587
1588/*
1589 * gt64260_disable_window_32bit()
1590 *
1591 * On a GT64260, you disable a window by setting its top address to be less
1592 * than its base address.
1593 */
1594static void __init
1595gt64260_disable_window_32bit(struct mv64x60_handle *bh, u32 window)
1596{
1597 pr_debug("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
1598 window, gt64260_32bit_windows[window].base_reg,
1599 gt64260_32bit_windows[window].size_reg);
1600
1601 if ((gt64260_32bit_windows[window].base_reg != 0) &&
1602 (gt64260_32bit_windows[window].size_reg != 0)) {
1603
1604 /* To disable, make bottom reg higher than top reg */
1605 mv64x60_write(bh, gt64260_32bit_windows[window].base_reg,0xfff);
1606 mv64x60_write(bh, gt64260_32bit_windows[window].size_reg, 0);
1607 }
1da177e4
LT
1608}
1609
1610/*
1611 * gt64260_enable_window_64bit()
1612 *
1613 * On the GT64260, a window is enabled iff the top address is >= to the base
1614 * address of the window. Since the window has already been configured by
1615 * the time this routine is called, we have nothing to do here.
1616 */
1617static void __init
1618gt64260_enable_window_64bit(struct mv64x60_handle *bh, u32 window)
1619{
1620 pr_debug("enable 64bit window: %d\n", window);
1da177e4
LT
1621}
1622
1623/*
1624 * gt64260_disable_window_64bit()
1625 *
1626 * On a GT64260, you disable a window by setting its top address to be less
1627 * than its base address.
1628 */
1629static void __init
1630gt64260_disable_window_64bit(struct mv64x60_handle *bh, u32 window)
1631{
1632 pr_debug("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
1633 window, gt64260_64bit_windows[window].base_lo_reg,
1634 gt64260_64bit_windows[window].size_reg);
1635
1636 if ((gt64260_64bit_windows[window].base_lo_reg != 0) &&
1637 (gt64260_64bit_windows[window].size_reg != 0)) {
1638
1639 /* To disable, make bottom reg higher than top reg */
1640 mv64x60_write(bh, gt64260_64bit_windows[window].base_lo_reg,
1641 0xfff);
1642 mv64x60_write(bh, gt64260_64bit_windows[window].base_hi_reg, 0);
1643 mv64x60_write(bh, gt64260_64bit_windows[window].size_reg, 0);
1644 }
1da177e4
LT
1645}
1646
1647/*
1648 * gt64260_disable_all_windows()
1649 *
1650 * The GT64260 has several windows that aren't represented in the table of
1651 * windows at the top of this file. This routine turns all of them off
1652 * except for the memory controller windows, of course.
1653 */
1654static void __init
1655gt64260_disable_all_windows(struct mv64x60_handle *bh,
1656 struct mv64x60_setup_info *si)
1657{
1658 u32 i, preserve;
1659
1660 /* Disable 32bit windows (don't disable cpu->mem windows) */
1661 for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
1662 if (i < 32)
1663 preserve = si->window_preserve_mask_32_lo & (1 << i);
1664 else
1665 preserve = si->window_preserve_mask_32_hi & (1<<(i-32));
1666
1667 if (!preserve)
1668 gt64260_disable_window_32bit(bh, i);
1669 }
1670
1671 /* Disable 64bit windows */
1672 for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++)
1673 if (!(si->window_preserve_mask_64 & (1<<i)))
1674 gt64260_disable_window_64bit(bh, i);
1675
1676 /* Turn off cpu protection windows not in gt64260_32bit_windows[] */
1677 mv64x60_write(bh, GT64260_CPU_PROT_BASE_4, 0xfff);
1678 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_4, 0);
1679 mv64x60_write(bh, GT64260_CPU_PROT_BASE_5, 0xfff);
1680 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_5, 0);
1681 mv64x60_write(bh, GT64260_CPU_PROT_BASE_6, 0xfff);
1682 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_6, 0);
1683 mv64x60_write(bh, GT64260_CPU_PROT_BASE_7, 0xfff);
1684 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_7, 0);
1685
1686 /* Turn off PCI->MEM access cntl wins not in gt64260_64bit_windows[] */
1687 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0xfff);
1688 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_HI, 0);
1689 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_SIZE, 0);
1690 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0xfff);
1691 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_HI, 0);
1692 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_SIZE, 0);
1693 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_LO, 0xfff);
1694 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_HI, 0);
1695 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_SIZE, 0);
1696 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_LO, 0xfff);
1697 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_HI, 0);
1698 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_SIZE, 0);
1699
1700 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0xfff);
1701 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_HI, 0);
1702 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_SIZE, 0);
1703 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0xfff);
1704 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_HI, 0);
1705 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_SIZE, 0);
1706 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_LO, 0xfff);
1707 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_HI, 0);
1708 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_SIZE, 0);
1709 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_LO, 0xfff);
1710 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_HI, 0);
1711 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_SIZE, 0);
1712
1713 /* Disable all PCI-><whatever> windows */
1714 mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x07fffdff);
1715 mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x07fffdff);
1716
1717 /*
1718 * Some firmwares enable a bunch of intr sources
1719 * for the PCI INT output pins.
1720 */
1721 mv64x60_write(bh, GT64260_IC_CPU_INTR_MASK_LO, 0);
1722 mv64x60_write(bh, GT64260_IC_CPU_INTR_MASK_HI, 0);
1723 mv64x60_write(bh, GT64260_IC_PCI0_INTR_MASK_LO, 0);
1724 mv64x60_write(bh, GT64260_IC_PCI0_INTR_MASK_HI, 0);
1725 mv64x60_write(bh, GT64260_IC_PCI1_INTR_MASK_LO, 0);
1726 mv64x60_write(bh, GT64260_IC_PCI1_INTR_MASK_HI, 0);
1727 mv64x60_write(bh, GT64260_IC_CPU_INT_0_MASK, 0);
1728 mv64x60_write(bh, GT64260_IC_CPU_INT_1_MASK, 0);
1729 mv64x60_write(bh, GT64260_IC_CPU_INT_2_MASK, 0);
1730 mv64x60_write(bh, GT64260_IC_CPU_INT_3_MASK, 0);
1da177e4
LT
1731}
1732
1733/*
1734 * gt64260a_chip_specific_init()
1735 *
a8de5ce9 1736 * Implement errata workarounds for the GT64260A.
1da177e4
LT
1737 */
1738static void __init
1739gt64260a_chip_specific_init(struct mv64x60_handle *bh,
1740 struct mv64x60_setup_info *si)
1741{
1742#ifdef CONFIG_SERIAL_MPSC
1743 struct resource *r;
1744#endif
1745#if !defined(CONFIG_NOT_COHERENT_CACHE)
1746 u32 val;
1747 u8 save_exclude;
1748#endif
1749
1750 if (si->pci_0.enable_bus)
1751 mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
1752 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1753
1754 if (si->pci_1.enable_bus)
1755 mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
1756 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1757
1758 /*
1759 * Dave Wilhardt found that bit 4 in the PCI Command registers must
1760 * be set if you are using cache coherency.
1761 */
1762#if !defined(CONFIG_NOT_COHERENT_CACHE)
1763 /* Res #MEM-4 -- cpu read buffer to buffer 1 */
1764 if ((mv64x60_read(bh, MV64x60_CPU_MODE) & 0xf0) == 0x40)
1765 mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
1766
1767 save_exclude = mv64x60_pci_exclude_bridge;
1768 mv64x60_pci_exclude_bridge = 0;
1769 if (si->pci_0.enable_bus) {
1770 early_read_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1771 PCI_COMMAND, &val);
1772 val |= PCI_COMMAND_INVALIDATE;
1773 early_write_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1774 PCI_COMMAND, val);
1775 }
1776
1777 if (si->pci_1.enable_bus) {
1778 early_read_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1779 PCI_COMMAND, &val);
1780 val |= PCI_COMMAND_INVALIDATE;
1781 early_write_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1782 PCI_COMMAND, val);
1783 }
1784 mv64x60_pci_exclude_bridge = save_exclude;
1785#endif
1786
1787 /* Disable buffer/descriptor snooping */
1788 mv64x60_clr_bits(bh, 0xf280, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1789 mv64x60_clr_bits(bh, 0xf2c0, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1790
1791#ifdef CONFIG_SERIAL_MPSC
1792 mv64x60_mpsc0_pdata.mirror_regs = 1;
1793 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1794 mv64x60_mpsc1_pdata.mirror_regs = 1;
1795 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1796
1797 if ((r = platform_get_resource(&mpsc1_device, IORESOURCE_IRQ, 0))
d01c08c9 1798 != NULL) {
1da177e4
LT
1799 r->start = MV64x60_IRQ_SDMA_0;
1800 r->end = MV64x60_IRQ_SDMA_0;
1801 }
1802#endif
1da177e4
LT
1803}
1804
1805/*
1806 * gt64260b_chip_specific_init()
1807 *
a8de5ce9 1808 * Implement errata workarounds for the GT64260B.
1da177e4
LT
1809 */
1810static void __init
1811gt64260b_chip_specific_init(struct mv64x60_handle *bh,
1812 struct mv64x60_setup_info *si)
1813{
1814#ifdef CONFIG_SERIAL_MPSC
1815 struct resource *r;
1816#endif
1817#if !defined(CONFIG_NOT_COHERENT_CACHE)
1818 u32 val;
1819 u8 save_exclude;
1820#endif
1821
1822 if (si->pci_0.enable_bus)
1823 mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
1824 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1825
1826 if (si->pci_1.enable_bus)
1827 mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
1828 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1829
1830 /*
1831 * Dave Wilhardt found that bit 4 in the PCI Command registers must
1832 * be set if you are using cache coherency.
1833 */
1834#if !defined(CONFIG_NOT_COHERENT_CACHE)
1835 mv64x60_set_bits(bh, GT64260_CPU_WB_PRIORITY_BUFFER_DEPTH, 0xf);
1836
1837 /* Res #MEM-4 -- cpu read buffer to buffer 1 */
1838 if ((mv64x60_read(bh, MV64x60_CPU_MODE) & 0xf0) == 0x40)
1839 mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
1840
1841 save_exclude = mv64x60_pci_exclude_bridge;
1842 mv64x60_pci_exclude_bridge = 0;
1843 if (si->pci_0.enable_bus) {
1844 early_read_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1845 PCI_COMMAND, &val);
1846 val |= PCI_COMMAND_INVALIDATE;
1847 early_write_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1848 PCI_COMMAND, val);
1849 }
1850
1851 if (si->pci_1.enable_bus) {
1852 early_read_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1853 PCI_COMMAND, &val);
1854 val |= PCI_COMMAND_INVALIDATE;
1855 early_write_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1856 PCI_COMMAND, val);
1857 }
1858 mv64x60_pci_exclude_bridge = save_exclude;
1859#endif
1860
1861 /* Disable buffer/descriptor snooping */
1862 mv64x60_clr_bits(bh, 0xf280, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1863 mv64x60_clr_bits(bh, 0xf2c0, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1864
1865#ifdef CONFIG_SERIAL_MPSC
1866 /*
1867 * The 64260B is not supposed to have the bug where the MPSC & ENET
1868 * can't access cache coherent regions. However, testing has shown
1869 * that the MPSC, at least, still has this bug.
1870 */
1871 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1872 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1873
1874 if ((r = platform_get_resource(&mpsc1_device, IORESOURCE_IRQ, 0))
d01c08c9 1875 != NULL) {
1da177e4
LT
1876 r->start = MV64x60_IRQ_SDMA_0;
1877 r->end = MV64x60_IRQ_SDMA_0;
1878 }
1879#endif
1da177e4
LT
1880}
1881
1882/*
1883 *****************************************************************************
1884 *
1885 * MV64360-Specific Routines
1886 *
1887 *****************************************************************************
1888 */
1889/*
1890 * mv64360_translate_size()
1891 *
1892 * On the MV64360, the size register is set similar to the size you get
1893 * from a pci config space BAR register. That is, programmed from LSB to MSB
1894 * as a sequence of 1's followed by a sequence of 0's. IOW, "size -1" with the
1895 * assumption that the size is a power of 2.
1896 */
1897static u32 __init
1898mv64360_translate_size(u32 base_addr, u32 size, u32 num_bits)
1899{
1900 return mv64x60_mask(size - 1, num_bits);
1901}
1902
1903/*
1904 * mv64360_untranslate_size()
1905 *
1906 * Translate the size register value of a window into a window size.
1907 */
1908static u32 __init
1909mv64360_untranslate_size(u32 base_addr, u32 size, u32 num_bits)
1910{
1911 if (size > 0) {
1912 size >>= (32 - num_bits);
1913 size++;
1914 size <<= (32 - num_bits);
1915 }
1916
1917 return size;
1918}
1919
1920/*
1921 * mv64360_set_pci2mem_window()
1922 *
1923 * The PCI->MEM window registers are actually in PCI config space so need
1924 * to set them by setting the correct config space BARs.
1925 */
1926struct {
1927 u32 fcn;
1928 u32 base_hi_bar;
1929 u32 base_lo_bar;
1930} static mv64360_reg_addrs[2][4] __initdata = {
1931 {{ 0, 0x14, 0x10 }, { 0, 0x1c, 0x18 },
1932 { 1, 0x14, 0x10 }, { 1, 0x1c, 0x18 }},
1933 {{ 0, 0x94, 0x90 }, { 0, 0x9c, 0x98 },
1934 { 1, 0x94, 0x90 }, { 1, 0x9c, 0x98 }}
1935};
1936
1937static void __init
1938mv64360_set_pci2mem_window(struct pci_controller *hose, u32 bus, u32 window,
1939 u32 base)
1940{
1941 u8 save_exclude;
1942
1943 pr_debug("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
1944 hose->index, base);
1945
1946 save_exclude = mv64x60_pci_exclude_bridge;
1947 mv64x60_pci_exclude_bridge = 0;
1948 early_write_config_dword(hose, 0,
1949 PCI_DEVFN(0, mv64360_reg_addrs[bus][window].fcn),
1950 mv64360_reg_addrs[bus][window].base_hi_bar, 0);
1951 early_write_config_dword(hose, 0,
1952 PCI_DEVFN(0, mv64360_reg_addrs[bus][window].fcn),
1953 mv64360_reg_addrs[bus][window].base_lo_bar,
1954 mv64x60_mask(base,20) | 0xc);
1955 mv64x60_pci_exclude_bridge = save_exclude;
1da177e4
LT
1956}
1957
1958/*
1959 * mv64360_set_pci2regs_window()
1960 *
1961 * Set where the bridge's registers appear in PCI MEM space.
1962 */
1963static u32 mv64360_offset[2][2] __initdata = {{0x20, 0x24}, {0xa0, 0xa4}};
1964
1965static void __init
1966mv64360_set_pci2regs_window(struct mv64x60_handle *bh,
1967 struct pci_controller *hose, u32 bus, u32 base)
1968{
1969 u8 save_exclude;
1970
1971 pr_debug("set pci->internal regs hose: %d, base: 0x%x\n", hose->index,
1972 base);
1973
1974 save_exclude = mv64x60_pci_exclude_bridge;
1975 mv64x60_pci_exclude_bridge = 0;
1976 early_write_config_dword(hose, 0, PCI_DEVFN(0,0),
1977 mv64360_offset[bus][0], (base << 16));
1978 early_write_config_dword(hose, 0, PCI_DEVFN(0,0),
1979 mv64360_offset[bus][1], 0);
1980 mv64x60_pci_exclude_bridge = save_exclude;
1da177e4
LT
1981}
1982
1983/*
1984 * mv64360_is_enabled_32bit()
1985 *
1986 * On a MV64360, a window is enabled by either clearing a bit in the
1987 * CPU BAR Enable reg or setting a bit in the window's base reg.
1988 * Note that this doesn't work for windows on the PCI slave side but we don't
1989 * check those so its okay.
1990 */
1991static u32 __init
1992mv64360_is_enabled_32bit(struct mv64x60_handle *bh, u32 window)
1993{
1994 u32 extra, rc = 0;
1995
1996 if (((mv64360_32bit_windows[window].base_reg != 0) &&
1997 (mv64360_32bit_windows[window].size_reg != 0)) ||
1998 (window == MV64x60_CPU2SRAM_WIN)) {
1999
2000 extra = mv64360_32bit_windows[window].extra;
2001
2002 switch (extra & MV64x60_EXTRA_MASK) {
2003 case MV64x60_EXTRA_CPUWIN_ENAB:
2004 rc = (mv64x60_read(bh, MV64360_CPU_BAR_ENABLE) &
2005 (1 << (extra & 0x1f))) == 0;
2006 break;
2007
2008 case MV64x60_EXTRA_CPUPROT_ENAB:
2009 rc = (mv64x60_read(bh,
2010 mv64360_32bit_windows[window].base_reg) &
2011 (1 << (extra & 0x1f))) != 0;
2012 break;
2013
2014 case MV64x60_EXTRA_ENET_ENAB:
2015 rc = (mv64x60_read(bh, MV64360_ENET2MEM_BAR_ENABLE) &
2016 (1 << (extra & 0x7))) == 0;
2017 break;
2018
2019 case MV64x60_EXTRA_MPSC_ENAB:
2020 rc = (mv64x60_read(bh, MV64360_MPSC2MEM_BAR_ENABLE) &
2021 (1 << (extra & 0x3))) == 0;
2022 break;
2023
2024 case MV64x60_EXTRA_IDMA_ENAB:
2025 rc = (mv64x60_read(bh, MV64360_IDMA2MEM_BAR_ENABLE) &
2026 (1 << (extra & 0x7))) == 0;
2027 break;
2028
2029 default:
2030 printk(KERN_ERR "mv64360_is_enabled: %s\n",
2031 "32bit table corrupted");
2032 }
2033 }
2034
2035 return rc;
2036}
2037
2038/*
2039 * mv64360_enable_window_32bit()
2040 *
2041 * On a MV64360, a window is enabled by either clearing a bit in the
2042 * CPU BAR Enable reg or setting a bit in the window's base reg.
2043 */
2044static void __init
2045mv64360_enable_window_32bit(struct mv64x60_handle *bh, u32 window)
2046{
2047 u32 extra;
2048
2049 pr_debug("enable 32bit window: %d\n", window);
2050
2051 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2052 (mv64360_32bit_windows[window].size_reg != 0)) ||
2053 (window == MV64x60_CPU2SRAM_WIN)) {
2054
2055 extra = mv64360_32bit_windows[window].extra;
2056
2057 switch (extra & MV64x60_EXTRA_MASK) {
2058 case MV64x60_EXTRA_CPUWIN_ENAB:
2059 mv64x60_clr_bits(bh, MV64360_CPU_BAR_ENABLE,
2060 (1 << (extra & 0x1f)));
2061 break;
2062
2063 case MV64x60_EXTRA_CPUPROT_ENAB:
2064 mv64x60_set_bits(bh,
2065 mv64360_32bit_windows[window].base_reg,
2066 (1 << (extra & 0x1f)));
2067 break;
2068
2069 case MV64x60_EXTRA_ENET_ENAB:
2070 mv64x60_clr_bits(bh, MV64360_ENET2MEM_BAR_ENABLE,
2071 (1 << (extra & 0x7)));
2072 break;
2073
2074 case MV64x60_EXTRA_MPSC_ENAB:
2075 mv64x60_clr_bits(bh, MV64360_MPSC2MEM_BAR_ENABLE,
2076 (1 << (extra & 0x3)));
2077 break;
2078
2079 case MV64x60_EXTRA_IDMA_ENAB:
2080 mv64x60_clr_bits(bh, MV64360_IDMA2MEM_BAR_ENABLE,
2081 (1 << (extra & 0x7)));
2082 break;
2083
2084 default:
2085 printk(KERN_ERR "mv64360_enable: %s\n",
2086 "32bit table corrupted");
2087 }
2088 }
1da177e4
LT
2089}
2090
2091/*
2092 * mv64360_disable_window_32bit()
2093 *
2094 * On a MV64360, a window is disabled by either setting a bit in the
2095 * CPU BAR Enable reg or clearing a bit in the window's base reg.
2096 */
2097static void __init
2098mv64360_disable_window_32bit(struct mv64x60_handle *bh, u32 window)
2099{
2100 u32 extra;
2101
2102 pr_debug("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
2103 window, mv64360_32bit_windows[window].base_reg,
2104 mv64360_32bit_windows[window].size_reg);
2105
2106 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2107 (mv64360_32bit_windows[window].size_reg != 0)) ||
2108 (window == MV64x60_CPU2SRAM_WIN)) {
2109
2110 extra = mv64360_32bit_windows[window].extra;
2111
2112 switch (extra & MV64x60_EXTRA_MASK) {
2113 case MV64x60_EXTRA_CPUWIN_ENAB:
2114 mv64x60_set_bits(bh, MV64360_CPU_BAR_ENABLE,
2115 (1 << (extra & 0x1f)));
2116 break;
2117
2118 case MV64x60_EXTRA_CPUPROT_ENAB:
2119 mv64x60_clr_bits(bh,
2120 mv64360_32bit_windows[window].base_reg,
2121 (1 << (extra & 0x1f)));
2122 break;
2123
2124 case MV64x60_EXTRA_ENET_ENAB:
2125 mv64x60_set_bits(bh, MV64360_ENET2MEM_BAR_ENABLE,
2126 (1 << (extra & 0x7)));
2127 break;
2128
2129 case MV64x60_EXTRA_MPSC_ENAB:
2130 mv64x60_set_bits(bh, MV64360_MPSC2MEM_BAR_ENABLE,
2131 (1 << (extra & 0x3)));
2132 break;
2133
2134 case MV64x60_EXTRA_IDMA_ENAB:
2135 mv64x60_set_bits(bh, MV64360_IDMA2MEM_BAR_ENABLE,
2136 (1 << (extra & 0x7)));
2137 break;
2138
2139 default:
2140 printk(KERN_ERR "mv64360_disable: %s\n",
2141 "32bit table corrupted");
2142 }
2143 }
1da177e4
LT
2144}
2145
2146/*
2147 * mv64360_enable_window_64bit()
2148 *
2149 * On the MV64360, a 64-bit window is enabled by setting a bit in the window's
2150 * base reg.
2151 */
2152static void __init
2153mv64360_enable_window_64bit(struct mv64x60_handle *bh, u32 window)
2154{
2155 pr_debug("enable 64bit window: %d\n", window);
2156
2157 if ((mv64360_64bit_windows[window].base_lo_reg!= 0) &&
2158 (mv64360_64bit_windows[window].size_reg != 0)) {
2159
2160 if ((mv64360_64bit_windows[window].extra & MV64x60_EXTRA_MASK)
d01c08c9 2161 == MV64x60_EXTRA_PCIACC_ENAB)
1da177e4
LT
2162 mv64x60_set_bits(bh,
2163 mv64360_64bit_windows[window].base_lo_reg,
2164 (1 << (mv64360_64bit_windows[window].extra &
2165 0x1f)));
2166 else
2167 printk(KERN_ERR "mv64360_enable: %s\n",
2168 "64bit table corrupted");
2169 }
1da177e4
LT
2170}
2171
2172/*
2173 * mv64360_disable_window_64bit()
2174 *
2175 * On a MV64360, a 64-bit window is disabled by clearing a bit in the window's
2176 * base reg.
2177 */
2178static void __init
2179mv64360_disable_window_64bit(struct mv64x60_handle *bh, u32 window)
2180{
2181 pr_debug("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
2182 window, mv64360_64bit_windows[window].base_lo_reg,
2183 mv64360_64bit_windows[window].size_reg);
2184
2185 if ((mv64360_64bit_windows[window].base_lo_reg != 0) &&
d01c08c9 2186 (mv64360_64bit_windows[window].size_reg != 0)) {
1da177e4 2187 if ((mv64360_64bit_windows[window].extra & MV64x60_EXTRA_MASK)
d01c08c9 2188 == MV64x60_EXTRA_PCIACC_ENAB)
1da177e4
LT
2189 mv64x60_clr_bits(bh,
2190 mv64360_64bit_windows[window].base_lo_reg,
2191 (1 << (mv64360_64bit_windows[window].extra &
2192 0x1f)));
2193 else
2194 printk(KERN_ERR "mv64360_disable: %s\n",
2195 "64bit table corrupted");
2196 }
1da177e4
LT
2197}
2198
2199/*
2200 * mv64360_disable_all_windows()
2201 *
2202 * The MV64360 has a few windows that aren't represented in the table of
2203 * windows at the top of this file. This routine turns all of them off
2204 * except for the memory controller windows, of course.
2205 */
2206static void __init
2207mv64360_disable_all_windows(struct mv64x60_handle *bh,
2208 struct mv64x60_setup_info *si)
2209{
2210 u32 preserve, i;
2211
2212 /* Disable 32bit windows (don't disable cpu->mem windows) */
2213 for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
2214 if (i < 32)
2215 preserve = si->window_preserve_mask_32_lo & (1 << i);
2216 else
2217 preserve = si->window_preserve_mask_32_hi & (1<<(i-32));
2218
2219 if (!preserve)
2220 mv64360_disable_window_32bit(bh, i);
2221 }
2222
2223 /* Disable 64bit windows */
2224 for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++)
2225 if (!(si->window_preserve_mask_64 & (1<<i)))
2226 mv64360_disable_window_64bit(bh, i);
2227
2228 /* Turn off PCI->MEM access cntl wins not in mv64360_64bit_windows[] */
2229 mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0);
2230 mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0);
2231 mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0);
2232 mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0);
2233
2234 /* Disable all PCI-><whatever> windows */
2235 mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x0000f9ff);
2236 mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x0000f9ff);
1da177e4
LT
2237}
2238
2239/*
2240 * mv64360_config_io2mem_windows()
2241 *
2242 * ENET, MPSC, and IDMA ctlrs on the MV64[34]60 have separate windows that
2243 * must be set up so that the respective ctlr can access system memory.
2244 */
2245static u32 enet_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2246 MV64x60_ENET2MEM_0_WIN, MV64x60_ENET2MEM_1_WIN,
2247 MV64x60_ENET2MEM_2_WIN, MV64x60_ENET2MEM_3_WIN,
2248};
2249
2250static u32 mpsc_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2251 MV64x60_MPSC2MEM_0_WIN, MV64x60_MPSC2MEM_1_WIN,
2252 MV64x60_MPSC2MEM_2_WIN, MV64x60_MPSC2MEM_3_WIN,
2253};
2254
2255static u32 idma_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2256 MV64x60_IDMA2MEM_0_WIN, MV64x60_IDMA2MEM_1_WIN,
2257 MV64x60_IDMA2MEM_2_WIN, MV64x60_IDMA2MEM_3_WIN,
2258};
2259
2260static u32 dram_selects[MV64x60_CPU2MEM_WINDOWS] __initdata =
2261 { 0xe, 0xd, 0xb, 0x7 };
2262
2263static void __init
2264mv64360_config_io2mem_windows(struct mv64x60_handle *bh,
2265 struct mv64x60_setup_info *si,
2266 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
2267{
2268 u32 i, win;
2269
2270 pr_debug("config_io2regs_windows: enet, mpsc, idma -> bridge regs\n");
2271
2272 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_0, 0);
2273 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_1, 0);
2274 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_2, 0);
2275
2276 mv64x60_write(bh, MV64360_MPSC2MEM_ACC_PROT_0, 0);
2277 mv64x60_write(bh, MV64360_MPSC2MEM_ACC_PROT_1, 0);
2278
2279 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_0, 0);
2280 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_1, 0);
2281 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_2, 0);
2282 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_3, 0);
2283
2284 /* Assume that mem ctlr has no more windows than embedded I/O ctlr */
2285 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
2286 if (bh->ci->is_enabled_32bit(bh, win)) {
2287 mv64x60_set_32bit_window(bh, enet_tab[i],
2288 mem_windows[i][0], mem_windows[i][1],
2289 (dram_selects[i] << 8) |
2290 (si->enet_options[i] & 0x3000));
2291 bh->ci->enable_window_32bit(bh, enet_tab[i]);
2292
2293 /* Give enet r/w access to memory region */
2294 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_0,
2295 (0x3 << (i << 1)));
2296 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_1,
2297 (0x3 << (i << 1)));
2298 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_2,
2299 (0x3 << (i << 1)));
2300
2301 mv64x60_set_32bit_window(bh, mpsc_tab[i],
2302 mem_windows[i][0], mem_windows[i][1],
2303 (dram_selects[i] << 8) |
2304 (si->mpsc_options[i] & 0x3000));
2305 bh->ci->enable_window_32bit(bh, mpsc_tab[i]);
2306
2307 /* Give mpsc r/w access to memory region */
2308 mv64x60_set_bits(bh, MV64360_MPSC2MEM_ACC_PROT_0,
2309 (0x3 << (i << 1)));
2310 mv64x60_set_bits(bh, MV64360_MPSC2MEM_ACC_PROT_1,
2311 (0x3 << (i << 1)));
2312
2313 mv64x60_set_32bit_window(bh, idma_tab[i],
2314 mem_windows[i][0], mem_windows[i][1],
2315 (dram_selects[i] << 8) |
2316 (si->idma_options[i] & 0x3000));
2317 bh->ci->enable_window_32bit(bh, idma_tab[i]);
2318
2319 /* Give idma r/w access to memory region */
2320 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_0,
2321 (0x3 << (i << 1)));
2322 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_1,
2323 (0x3 << (i << 1)));
2324 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_2,
2325 (0x3 << (i << 1)));
2326 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_3,
2327 (0x3 << (i << 1)));
2328 }
1da177e4
LT
2329}
2330
2331/*
2332 * mv64360_set_mpsc2regs_window()
2333 *
2334 * MPSC has a window to the bridge's internal registers. Call this routine
2335 * to change that window so it doesn't conflict with the windows mapping the
2336 * mpsc to system memory.
2337 */
2338static void __init
2339mv64360_set_mpsc2regs_window(struct mv64x60_handle *bh, u32 base)
2340{
2341 pr_debug("set mpsc->internal regs, base: 0x%x\n", base);
1da177e4 2342 mv64x60_write(bh, MV64360_MPSC2REGS_BASE, base & 0xffff0000);
1da177e4
LT
2343}
2344
2345/*
2346 * mv64360_chip_specific_init()
2347 *
a8de5ce9 2348 * Implement errata workarounds for the MV64360.
1da177e4
LT
2349 */
2350static void __init
2351mv64360_chip_specific_init(struct mv64x60_handle *bh,
2352 struct mv64x60_setup_info *si)
2353{
d01c08c9
MG
2354#if !defined(CONFIG_NOT_COHERENT_CACHE)
2355 mv64x60_set_bits(bh, MV64360_D_UNIT_CONTROL_HIGH, (1<<24));
2356#endif
1da177e4
LT
2357#ifdef CONFIG_SERIAL_MPSC
2358 mv64x60_mpsc0_pdata.brg_can_tune = 1;
2359 mv64x60_mpsc0_pdata.cache_mgmt = 1;
2360 mv64x60_mpsc1_pdata.brg_can_tune = 1;
2361 mv64x60_mpsc1_pdata.cache_mgmt = 1;
2362#endif
1da177e4
LT
2363}
2364
2365/*
2366 * mv64460_chip_specific_init()
2367 *
a8de5ce9 2368 * Implement errata workarounds for the MV64460.
1da177e4
LT
2369 */
2370static void __init
2371mv64460_chip_specific_init(struct mv64x60_handle *bh,
2372 struct mv64x60_setup_info *si)
2373{
d01c08c9
MG
2374#if !defined(CONFIG_NOT_COHERENT_CACHE)
2375 mv64x60_set_bits(bh, MV64360_D_UNIT_CONTROL_HIGH, (1<<24) | (1<<25));
2376 mv64x60_set_bits(bh, MV64460_D_UNIT_MMASK, (1<<1) | (1<<4));
2377#endif
1da177e4
LT
2378#ifdef CONFIG_SERIAL_MPSC
2379 mv64x60_mpsc0_pdata.brg_can_tune = 1;
d01c08c9 2380 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1da177e4 2381 mv64x60_mpsc1_pdata.brg_can_tune = 1;
d01c08c9 2382 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1da177e4 2383#endif
1da177e4 2384}
d01c08c9
MG
2385
2386
2387#if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
2388/* Export the hotswap register via sysfs for enum event monitoring */
2389#define VAL_LEN_MAX 11 /* 32-bit hex or dec stringified number + '\n' */
2390
461e6667 2391static DEFINE_MUTEX(mv64xxx_hs_lock);
d01c08c9
MG
2392
2393static ssize_t
2394mv64xxx_hs_reg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
2395{
2396 u32 v;
2397 u8 save_exclude;
2398
2399 if (off > 0)
2400 return 0;
2401 if (count < VAL_LEN_MAX)
2402 return -EINVAL;
2403
461e6667 2404 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
d01c08c9
MG
2405 return -ERESTARTSYS;
2406 save_exclude = mv64x60_pci_exclude_bridge;
2407 mv64x60_pci_exclude_bridge = 0;
2408 early_read_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2409 MV64360_PCICFG_CPCI_HOTSWAP, &v);
2410 mv64x60_pci_exclude_bridge = save_exclude;
461e6667 2411 mutex_unlock(&mv64xxx_hs_lock);
d01c08c9
MG
2412
2413 return sprintf(buf, "0x%08x\n", v);
2414}
2415
2416static ssize_t
2417mv64xxx_hs_reg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
2418{
2419 u32 v;
2420 u8 save_exclude;
2421
2422 if (off > 0)
2423 return 0;
2424 if (count <= 0)
2425 return -EINVAL;
2426
2427 if (sscanf(buf, "%i", &v) == 1) {
461e6667 2428 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
d01c08c9
MG
2429 return -ERESTARTSYS;
2430 save_exclude = mv64x60_pci_exclude_bridge;
2431 mv64x60_pci_exclude_bridge = 0;
2432 early_write_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2433 MV64360_PCICFG_CPCI_HOTSWAP, v);
2434 mv64x60_pci_exclude_bridge = save_exclude;
461e6667 2435 mutex_unlock(&mv64xxx_hs_lock);
d01c08c9
MG
2436 }
2437 else
2438 count = -EINVAL;
2439
2440 return count;
2441}
2442
2443static struct bin_attribute mv64xxx_hs_reg_attr = { /* Hotswap register */
2444 .attr = {
2445 .name = "hs_reg",
2446 .mode = S_IRUGO | S_IWUSR,
d01c08c9
MG
2447 },
2448 .size = VAL_LEN_MAX,
2449 .read = mv64xxx_hs_reg_read,
2450 .write = mv64xxx_hs_reg_write,
2451};
2452
2453/* Provide sysfs file indicating if this platform supports the hs_reg */
2454static ssize_t
2455mv64xxx_hs_reg_valid_show(struct device *dev, struct device_attribute *attr,
2456 char *buf)
2457{
2458 struct platform_device *pdev;
2459 struct mv64xxx_pdata *pdp;
2460 u32 v;
2461
2462 pdev = container_of(dev, struct platform_device, dev);
2463 pdp = (struct mv64xxx_pdata *)pdev->dev.platform_data;
2464
461e6667 2465 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
d01c08c9
MG
2466 return -ERESTARTSYS;
2467 v = pdp->hs_reg_valid;
461e6667 2468 mutex_unlock(&mv64xxx_hs_lock);
d01c08c9
MG
2469
2470 return sprintf(buf, "%i\n", v);
2471}
2472static DEVICE_ATTR(hs_reg_valid, S_IRUGO, mv64xxx_hs_reg_valid_show, NULL);
2473
2474static int __init
2475mv64xxx_sysfs_init(void)
2476{
2477 sysfs_create_bin_file(&mv64xxx_device.dev.kobj, &mv64xxx_hs_reg_attr);
2478 sysfs_create_file(&mv64xxx_device.dev.kobj,&dev_attr_hs_reg_valid.attr);
2479 return 0;
2480}
2481subsys_initcall(mv64xxx_sysfs_init);
2482#endif
This page took 0.427219 seconds and 5 git commands to generate.