Merge branch 'nvmf-4.8-rc' of git://git.infradead.org/nvme-fabrics into for-linus
[deliverable/linux.git] / arch / alpha / kernel / core_irongate.c
1 /*
2 * linux/arch/alpha/kernel/core_irongate.c
3 *
4 * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
5 *
6 * Copyright (C) 1999 Alpha Processor, Inc.,
7 * (David Daniel, Stig Telfer, Soohoon Lee)
8 *
9 * Code common to all IRONGATE core logic chips.
10 */
11
12 #define __EXTERN_INLINE inline
13 #include <asm/io.h>
14 #include <asm/core_irongate.h>
15 #undef __EXTERN_INLINE
16
17 #include <linux/types.h>
18 #include <linux/pci.h>
19 #include <linux/sched.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/bootmem.h>
23
24 #include <asm/ptrace.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27
28 #include "proto.h"
29 #include "pci_impl.h"
30
31 /*
32 * BIOS32-style PCI interface:
33 */
34
35 #define DEBUG_CONFIG 0
36
37 #if DEBUG_CONFIG
38 # define DBG_CFG(args) printk args
39 #else
40 # define DBG_CFG(args)
41 #endif
42
43 igcsr32 *IronECC;
44
45 /*
46 * Given a bus, device, and function number, compute resulting
47 * configuration space address accordingly. It is therefore not safe
48 * to have concurrent invocations to configuration space access
49 * routines, but there really shouldn't be any need for this.
50 *
51 * addr[31:24] reserved
52 * addr[23:16] bus number (8 bits = 128 possible buses)
53 * addr[15:11] Device number (5 bits)
54 * addr[10: 8] function number
55 * addr[ 7: 2] register number
56 *
57 * For IRONGATE:
58 * if (bus = addr[23:16]) == 0
59 * then
60 * type 0 config cycle:
61 * addr_on_pci[31:11] = id selection for device = addr[15:11]
62 * addr_on_pci[10: 2] = addr[10: 2] ???
63 * addr_on_pci[ 1: 0] = 00
64 * else
65 * type 1 config cycle (pass on with no decoding):
66 * addr_on_pci[31:24] = 0
67 * addr_on_pci[23: 2] = addr[23: 2]
68 * addr_on_pci[ 1: 0] = 01
69 * fi
70 *
71 * Notes:
72 * The function number selects which function of a multi-function device
73 * (e.g., SCSI and Ethernet).
74 *
75 * The register selects a DWORD (32 bit) register offset. Hence it
76 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
77 * bits.
78 */
79
80 static int
81 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
82 unsigned long *pci_addr, unsigned char *type1)
83 {
84 unsigned long addr;
85 u8 bus = pbus->number;
86
87 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
88 "pci_addr=0x%p, type1=0x%p)\n",
89 bus, device_fn, where, pci_addr, type1));
90
91 *type1 = (bus != 0);
92
93 addr = (bus << 16) | (device_fn << 8) | where;
94 addr |= IRONGATE_CONF;
95
96 *pci_addr = addr;
97 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
98 return 0;
99 }
100
101 static int
102 irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
103 int size, u32 *value)
104 {
105 unsigned long addr;
106 unsigned char type1;
107
108 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
109 return PCIBIOS_DEVICE_NOT_FOUND;
110
111 switch (size) {
112 case 1:
113 *value = __kernel_ldbu(*(vucp)addr);
114 break;
115 case 2:
116 *value = __kernel_ldwu(*(vusp)addr);
117 break;
118 case 4:
119 *value = *(vuip)addr;
120 break;
121 }
122
123 return PCIBIOS_SUCCESSFUL;
124 }
125
126 static int
127 irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
128 int size, u32 value)
129 {
130 unsigned long addr;
131 unsigned char type1;
132
133 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
134 return PCIBIOS_DEVICE_NOT_FOUND;
135
136 switch (size) {
137 case 1:
138 __kernel_stb(value, *(vucp)addr);
139 mb();
140 __kernel_ldbu(*(vucp)addr);
141 break;
142 case 2:
143 __kernel_stw(value, *(vusp)addr);
144 mb();
145 __kernel_ldwu(*(vusp)addr);
146 break;
147 case 4:
148 *(vuip)addr = value;
149 mb();
150 *(vuip)addr;
151 break;
152 }
153
154 return PCIBIOS_SUCCESSFUL;
155 }
156
157 struct pci_ops irongate_pci_ops =
158 {
159 .read = irongate_read_config,
160 .write = irongate_write_config,
161 };
162 \f
163 int
164 irongate_pci_clr_err(void)
165 {
166 unsigned int nmi_ctl=0;
167 unsigned int IRONGATE_jd;
168
169 again:
170 IRONGATE_jd = IRONGATE0->stat_cmd;
171 printk("Iron stat_cmd %x\n", IRONGATE_jd);
172 IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
173 mb();
174 IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */
175
176 IRONGATE_jd = *IronECC;
177 printk("Iron ECC %x\n", IRONGATE_jd);
178 *IronECC = IRONGATE_jd; /* write again clears error bits */
179 mb();
180 IRONGATE_jd = *IronECC; /* re-read to force write */
181
182 /* Clear ALI NMI */
183 nmi_ctl = inb(0x61);
184 nmi_ctl |= 0x0c;
185 outb(nmi_ctl, 0x61);
186 nmi_ctl &= ~0x0c;
187 outb(nmi_ctl, 0x61);
188
189 IRONGATE_jd = *IronECC;
190 if (IRONGATE_jd & 0x300) goto again;
191
192 return 0;
193 }
194
195 #define IRONGATE_3GB 0xc0000000UL
196
197 /* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some
198 memory for PCI. At this point we just reserve memory above 3Gb. Most
199 of this memory will be freed after PCI setup is done. */
200 static void __init
201 albacore_init_arch(void)
202 {
203 unsigned long memtop = max_low_pfn << PAGE_SHIFT;
204 unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
205 struct percpu_struct *cpu;
206 int pal_rev, pal_var;
207
208 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
209 pal_rev = cpu->pal_revision & 0xffff;
210 pal_var = (cpu->pal_revision >> 16) & 0xff;
211
212 /* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up
213 the CPU incorrectly (leave speculative stores enabled),
214 which causes memory corruption under certain conditions.
215 Issue a warning for such consoles. */
216 if (alpha_using_srm &&
217 (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2)))
218 printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
219 "or later\n");
220
221 if (pci_mem > IRONGATE_3GB)
222 pci_mem = IRONGATE_3GB;
223 IRONGATE0->pci_mem = pci_mem;
224 alpha_mv.min_mem_address = pci_mem;
225 if (memtop > pci_mem) {
226 #ifdef CONFIG_BLK_DEV_INITRD
227 extern unsigned long initrd_start, initrd_end;
228 extern void *move_initrd(unsigned long);
229
230 /* Move the initrd out of the way. */
231 if (initrd_end && __pa(initrd_end) > pci_mem) {
232 unsigned long size;
233
234 size = initrd_end - initrd_start;
235 free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
236 PAGE_ALIGN(size));
237 if (!move_initrd(pci_mem))
238 printk("irongate_init_arch: initrd too big "
239 "(%ldK)\ndisabling initrd\n",
240 size / 1024);
241 }
242 #endif
243 reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop -
244 pci_mem, BOOTMEM_DEFAULT);
245 printk("irongate_init_arch: temporarily reserving "
246 "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
247 }
248 }
249
250 static void __init
251 irongate_setup_agp(void)
252 {
253 /* Disable the GART window. AGPGART doesn't work due to yet
254 unresolved memory coherency issues... */
255 IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
256 alpha_agpgart_size = 0;
257 }
258
259 void __init
260 irongate_init_arch(void)
261 {
262 struct pci_controller *hose;
263 int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */
264
265 IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
266
267 irongate_pci_clr_err();
268
269 if (amd761)
270 albacore_init_arch();
271
272 irongate_setup_agp();
273
274 /*
275 * Create our single hose.
276 */
277
278 pci_isa_hose = hose = alloc_pci_controller();
279 hose->io_space = &ioport_resource;
280 hose->mem_space = &iomem_resource;
281 hose->index = 0;
282
283 /* This is for userland consumption. For some reason, the 40-bit
284 PIO bias that we use in the kernel through KSEG didn't work for
285 the page table based user mappings. So make sure we get the
286 43-bit PIO bias. */
287 hose->sparse_mem_base = 0;
288 hose->sparse_io_base = 0;
289 hose->dense_mem_base
290 = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
291 hose->dense_io_base
292 = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
293
294 hose->sg_isa = hose->sg_pci = NULL;
295 __direct_map_base = 0;
296 __direct_map_size = 0xffffffff;
297 }
298
299 /*
300 * IO map and AGP support
301 */
302 #include <linux/vmalloc.h>
303 #include <linux/agp_backend.h>
304 #include <linux/agpgart.h>
305 #include <linux/export.h>
306 #include <asm/pgalloc.h>
307
308 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
309 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
310
311 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
312 #define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
313
314 void __iomem *
315 irongate_ioremap(unsigned long addr, unsigned long size)
316 {
317 struct vm_struct *area;
318 unsigned long vaddr;
319 unsigned long baddr, last;
320 u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
321 unsigned long gart_bus_addr;
322
323 if (!alpha_agpgart_size)
324 return (void __iomem *)(addr + IRONGATE_MEM);
325
326 gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
327 PCI_BASE_ADDRESS_MEM_MASK;
328
329 /*
330 * Check for within the AGP aperture...
331 */
332 do {
333 /*
334 * Check the AGP area
335 */
336 if (addr >= gart_bus_addr && addr + size - 1 <
337 gart_bus_addr + alpha_agpgart_size)
338 break;
339
340 /*
341 * Not found - assume legacy ioremap
342 */
343 return (void __iomem *)(addr + IRONGATE_MEM);
344 } while(0);
345
346 mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
347 PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
348
349 gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
350
351 /*
352 * Adjust the limits (mappings must be page aligned)
353 */
354 if (addr & ~PAGE_MASK) {
355 printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
356 addr);
357 return (void __iomem *)(addr + IRONGATE_MEM);
358 }
359 last = addr + size - 1;
360 size = PAGE_ALIGN(last) - addr;
361
362 #if 0
363 printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
364 printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr);
365 printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size);
366 printk("irongate_ioremap: mmio_regs %p\n", mmio_regs);
367 printk("irongate_ioremap: gatt_pages %p\n", gatt_pages);
368
369 for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
370 {
371 cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
372 pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
373 printk("irongate_ioremap: cur_gatt %p pte 0x%x\n",
374 cur_gatt, pte);
375 }
376 #endif
377
378 /*
379 * Map it
380 */
381 area = get_vm_area(size, VM_IOREMAP);
382 if (!area) return NULL;
383
384 for(baddr = addr, vaddr = (unsigned long)area->addr;
385 baddr <= last;
386 baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
387 {
388 cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
389 pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
390
391 if (__alpha_remap_area_pages(vaddr,
392 pte, PAGE_SIZE, 0)) {
393 printk("AGP ioremap: FAILED to map...\n");
394 vfree(area->addr);
395 return NULL;
396 }
397 }
398
399 flush_tlb_all();
400
401 vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
402 #if 0
403 printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
404 addr, size, vaddr);
405 #endif
406 return (void __iomem *)vaddr;
407 }
408 EXPORT_SYMBOL(irongate_ioremap);
409
410 void
411 irongate_iounmap(volatile void __iomem *xaddr)
412 {
413 unsigned long addr = (unsigned long) xaddr;
414 if (((long)addr >> 41) == -2)
415 return; /* kseg map, nothing to do */
416 if (addr)
417 return vfree((void *)(PAGE_MASK & addr));
418 }
419 EXPORT_SYMBOL(irongate_iounmap);
This page took 0.039253 seconds and 5 git commands to generate.