Commit | Line | Data |
---|---|---|
5a0015d6 | 1 | /* |
f30c2269 | 2 | * arch/xtensa/kernel/pci.c |
5a0015d6 CZ |
3 | * |
4 | * PCI bios-type initialisation for PCI machines | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation; either version 2 of the License, or (at your | |
9 | * option) any later version. | |
10 | * | |
11 | * Copyright (C) 2001-2005 Tensilica Inc. | |
12 | * | |
13 | * Based largely on work from Cort (ppc/kernel/pci.c) | |
14 | * IO functions copied from sparc. | |
15 | * | |
16 | * Chris Zankel <chris@zankel.net> | |
17 | * | |
18 | */ | |
19 | ||
5a0015d6 CZ |
20 | #include <linux/kernel.h> |
21 | #include <linux/pci.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/errno.h> | |
27 | #include <linux/bootmem.h> | |
28 | ||
29 | #include <asm/pci-bridge.h> | |
30 | #include <asm/platform.h> | |
31 | ||
32 | #undef DEBUG | |
33 | ||
34 | #ifdef DEBUG | |
35 | #define DBG(x...) printk(x) | |
36 | #else | |
37 | #define DBG(x...) | |
38 | #endif | |
39 | ||
40 | /* PCI Controller */ | |
41 | ||
42 | ||
43 | /* | |
44 | * pcibios_alloc_controller | |
45 | * pcibios_enable_device | |
46 | * pcibios_fixups | |
47 | * pcibios_align_resource | |
48 | * pcibios_fixup_bus | |
5a0015d6 CZ |
49 | * pci_bus_add_device |
50 | * pci_mmap_page_range | |
51 | */ | |
52 | ||
53 | struct pci_controller* pci_ctrl_head; | |
54 | struct pci_controller** pci_ctrl_tail = &pci_ctrl_head; | |
55 | ||
56 | static int pci_bus_count; | |
57 | ||
5a0015d6 CZ |
58 | /* |
59 | * We need to avoid collisions with `mirrored' VGA ports | |
60 | * and other strange ISA hardware, so we always want the | |
61 | * addresses to be allocated in the 0x000-0x0ff region | |
62 | * modulo 0x400. | |
63 | * | |
64 | * Why? Because some silly external IO cards only decode | |
65 | * the low 10 bits of the IO address. The 0x00-0xff region | |
66 | * is reserved for motherboard devices that decode all 16 | |
67 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | |
68 | * but we want to try to avoid allocating at 0x2900-0x2bff | |
69 | * which might have be mirrored at 0x0100-0x03ff.. | |
70 | */ | |
b26b2d49 | 71 | resource_size_t |
3b7a17fc DB |
72 | pcibios_align_resource(void *data, const struct resource *res, |
73 | resource_size_t size, resource_size_t align) | |
5a0015d6 CZ |
74 | { |
75 | struct pci_dev *dev = data; | |
b26b2d49 | 76 | resource_size_t start = res->start; |
5a0015d6 CZ |
77 | |
78 | if (res->flags & IORESOURCE_IO) { | |
5a0015d6 | 79 | if (size > 0x100) { |
fd95ee73 MF |
80 | pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n", |
81 | pci_name(dev), dev->resource - res, | |
82 | size); | |
5a0015d6 CZ |
83 | } |
84 | ||
b26b2d49 | 85 | if (start & 0x300) |
5a0015d6 | 86 | start = (start + 0x3ff) & ~0x3ff; |
5a0015d6 | 87 | } |
b26b2d49 DB |
88 | |
89 | return start; | |
5a0015d6 CZ |
90 | } |
91 | ||
92 | int | |
93 | pcibios_enable_resources(struct pci_dev *dev, int mask) | |
94 | { | |
95 | u16 cmd, old_cmd; | |
96 | int idx; | |
97 | struct resource *r; | |
98 | ||
99 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
100 | old_cmd = cmd; | |
101 | for(idx=0; idx<6; idx++) { | |
102 | r = &dev->resource[idx]; | |
103 | if (!r->start && r->end) { | |
104 | printk (KERN_ERR "PCI: Device %s not available because " | |
9ec55a9b | 105 | "of resource collisions\n", pci_name(dev)); |
5a0015d6 CZ |
106 | return -EINVAL; |
107 | } | |
108 | if (r->flags & IORESOURCE_IO) | |
109 | cmd |= PCI_COMMAND_IO; | |
110 | if (r->flags & IORESOURCE_MEM) | |
111 | cmd |= PCI_COMMAND_MEMORY; | |
112 | } | |
113 | if (dev->resource[PCI_ROM_RESOURCE].start) | |
114 | cmd |= PCI_COMMAND_MEMORY; | |
115 | if (cmd != old_cmd) { | |
116 | printk("PCI: Enabling device %s (%04x -> %04x)\n", | |
9ec55a9b | 117 | pci_name(dev), old_cmd, cmd); |
5a0015d6 CZ |
118 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
119 | } | |
120 | return 0; | |
121 | } | |
122 | ||
123 | struct pci_controller * __init pcibios_alloc_controller(void) | |
124 | { | |
125 | struct pci_controller *pci_ctrl; | |
126 | ||
127 | pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl)); | |
128 | memset(pci_ctrl, 0, sizeof(struct pci_controller)); | |
129 | ||
130 | *pci_ctrl_tail = pci_ctrl; | |
131 | pci_ctrl_tail = &pci_ctrl->next; | |
132 | ||
133 | return pci_ctrl; | |
134 | } | |
135 | ||
7ec303a7 BH |
136 | static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, |
137 | struct list_head *resources) | |
138 | { | |
139 | struct resource *res; | |
140 | unsigned long io_offset; | |
141 | int i; | |
142 | ||
143 | io_offset = (unsigned long)pci_ctrl->io_space.base; | |
144 | res = &pci_ctrl->io_resource; | |
145 | if (!res->flags) { | |
146 | if (io_offset) | |
147 | printk (KERN_ERR "I/O resource not set for host" | |
148 | " bridge %d\n", pci_ctrl->index); | |
149 | res->start = 0; | |
150 | res->end = IO_SPACE_LIMIT; | |
151 | res->flags = IORESOURCE_IO; | |
152 | } | |
153 | res->start += io_offset; | |
154 | res->end += io_offset; | |
4ba2aef3 | 155 | pci_add_resource_offset(resources, res, io_offset); |
7ec303a7 BH |
156 | |
157 | for (i = 0; i < 3; i++) { | |
158 | res = &pci_ctrl->mem_resources[i]; | |
159 | if (!res->flags) { | |
160 | if (i > 0) | |
161 | continue; | |
162 | printk(KERN_ERR "Memory resource not set for " | |
163 | "host bridge %d\n", pci_ctrl->index); | |
164 | res->start = 0; | |
165 | res->end = ~0U; | |
166 | res->flags = IORESOURCE_MEM; | |
167 | } | |
168 | pci_add_resource(resources, res); | |
169 | } | |
170 | } | |
171 | ||
5a0015d6 CZ |
172 | static int __init pcibios_init(void) |
173 | { | |
174 | struct pci_controller *pci_ctrl; | |
7ec303a7 | 175 | struct list_head resources; |
5a0015d6 | 176 | struct pci_bus *bus; |
b97ea289 | 177 | int next_busno = 0, ret; |
5a0015d6 CZ |
178 | |
179 | printk("PCI: Probing PCI hardware\n"); | |
180 | ||
181 | /* Scan all of the recorded PCI controllers. */ | |
182 | for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { | |
183 | pci_ctrl->last_busno = 0xff; | |
7ec303a7 BH |
184 | INIT_LIST_HEAD(&resources); |
185 | pci_controller_apertures(pci_ctrl, &resources); | |
186 | bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno, | |
187 | pci_ctrl->ops, pci_ctrl, &resources); | |
b97ea289 YW |
188 | if (!bus) |
189 | continue; | |
190 | ||
5a0015d6 | 191 | pci_ctrl->bus = bus; |
b918c62e | 192 | pci_ctrl->last_busno = bus->busn_res.end; |
5a0015d6 CZ |
193 | if (next_busno <= pci_ctrl->last_busno) |
194 | next_busno = pci_ctrl->last_busno+1; | |
195 | } | |
196 | pci_bus_count = next_busno; | |
b97ea289 YW |
197 | ret = platform_pcibios_fixup(); |
198 | if (ret) | |
199 | return ret; | |
5a0015d6 | 200 | |
b97ea289 YW |
201 | for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { |
202 | if (pci_ctrl->bus) | |
203 | pci_bus_add_devices(pci_ctrl->bus); | |
204 | } | |
205 | ||
206 | return 0; | |
5a0015d6 CZ |
207 | } |
208 | ||
209 | subsys_initcall(pcibios_init); | |
210 | ||
fd95ee73 | 211 | void pcibios_fixup_bus(struct pci_bus *bus) |
5a0015d6 | 212 | { |
5a0015d6 CZ |
213 | } |
214 | ||
9cdce18d MS |
215 | void pcibios_set_master(struct pci_dev *dev) |
216 | { | |
217 | /* No special bus mastering setup handling */ | |
218 | } | |
219 | ||
5a0015d6 CZ |
220 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
221 | { | |
222 | u16 cmd, old_cmd; | |
223 | int idx; | |
224 | struct resource *r; | |
225 | ||
226 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
227 | old_cmd = cmd; | |
228 | for (idx=0; idx<6; idx++) { | |
229 | r = &dev->resource[idx]; | |
230 | if (!r->start && r->end) { | |
231 | printk(KERN_ERR "PCI: Device %s not available because " | |
9ec55a9b | 232 | "of resource collisions\n", pci_name(dev)); |
5a0015d6 CZ |
233 | return -EINVAL; |
234 | } | |
235 | if (r->flags & IORESOURCE_IO) | |
236 | cmd |= PCI_COMMAND_IO; | |
237 | if (r->flags & IORESOURCE_MEM) | |
238 | cmd |= PCI_COMMAND_MEMORY; | |
239 | } | |
240 | if (cmd != old_cmd) { | |
241 | printk("PCI: Enabling device %s (%04x -> %04x)\n", | |
9ec55a9b | 242 | pci_name(dev), old_cmd, cmd); |
5a0015d6 CZ |
243 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
244 | } | |
245 | ||
246 | return 0; | |
247 | } | |
248 | ||
249 | #ifdef CONFIG_PROC_FS | |
250 | ||
251 | /* | |
252 | * Return the index of the PCI controller for device pdev. | |
253 | */ | |
254 | ||
255 | int | |
256 | pci_controller_num(struct pci_dev *dev) | |
257 | { | |
258 | struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata; | |
259 | return pci_ctrl->index; | |
260 | } | |
261 | ||
262 | #endif /* CONFIG_PROC_FS */ | |
263 | ||
5a0015d6 CZ |
264 | /* |
265 | * Platform support for /proc/bus/pci/X/Y mmap()s, | |
266 | * modelled on the sparc64 implementation by Dave Miller. | |
267 | * -- paulus. | |
268 | */ | |
269 | ||
270 | /* | |
271 | * Adjust vm_pgoff of VMA such that it is the physical page offset | |
272 | * corresponding to the 32-bit pci bus offset for DEV requested by the user. | |
273 | * | |
274 | * Basically, the user finds the base address for his device which he wishes | |
275 | * to mmap. They read the 32-bit value from the config space base register, | |
276 | * add whatever PAGE_SIZE multiple offset they wish, and feed this into the | |
277 | * offset parameter of mmap on /proc/bus/pci/XXX for that device. | |
278 | * | |
279 | * Returns negative error code on failure, zero on success. | |
280 | */ | |
281 | static __inline__ int | |
282 | __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, | |
283 | enum pci_mmap_state mmap_state) | |
284 | { | |
285 | struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata; | |
286 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
287 | unsigned long io_offset = 0; | |
288 | int i, res_bit; | |
289 | ||
290 | if (pci_ctrl == 0) | |
291 | return -EINVAL; /* should never happen */ | |
292 | ||
293 | /* If memory, add on the PCI bridge address offset */ | |
294 | if (mmap_state == pci_mmap_mem) { | |
295 | res_bit = IORESOURCE_MEM; | |
296 | } else { | |
297 | io_offset = (unsigned long)pci_ctrl->io_space.base; | |
298 | offset += io_offset; | |
299 | res_bit = IORESOURCE_IO; | |
300 | } | |
301 | ||
302 | /* | |
303 | * Check that the offset requested corresponds to one of the | |
304 | * resources of the device. | |
305 | */ | |
306 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | |
307 | struct resource *rp = &dev->resource[i]; | |
308 | int flags = rp->flags; | |
309 | ||
310 | /* treat ROM as memory (should be already) */ | |
311 | if (i == PCI_ROM_RESOURCE) | |
312 | flags |= IORESOURCE_MEM; | |
313 | ||
314 | /* Active and same type? */ | |
315 | if ((flags & res_bit) == 0) | |
316 | continue; | |
317 | ||
318 | /* In the range of this resource? */ | |
319 | if (offset < (rp->start & PAGE_MASK) || offset > rp->end) | |
320 | continue; | |
321 | ||
322 | /* found it! construct the final physical address */ | |
323 | if (mmap_state == pci_mmap_io) | |
324 | offset += pci_ctrl->io_space.start - io_offset; | |
325 | vma->vm_pgoff = offset >> PAGE_SHIFT; | |
326 | return 0; | |
327 | } | |
328 | ||
329 | return -EINVAL; | |
330 | } | |
331 | ||
5a0015d6 CZ |
332 | /* |
333 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | |
334 | * device mapping. | |
335 | */ | |
336 | static __inline__ void | |
337 | __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, | |
338 | enum pci_mmap_state mmap_state, int write_combine) | |
339 | { | |
340 | int prot = pgprot_val(vma->vm_page_prot); | |
341 | ||
342 | /* Set to write-through */ | |
2e6ee5ec | 343 | prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT; |
5a0015d6 CZ |
344 | #if 0 |
345 | if (!write_combine) | |
346 | prot |= _PAGE_WRITETHRU; | |
347 | #endif | |
348 | vma->vm_page_prot = __pgprot(prot); | |
349 | } | |
350 | ||
351 | /* | |
352 | * Perform the actual remap of the pages for a PCI device mapping, as | |
353 | * appropriate for this architecture. The region in the process to map | |
354 | * is described by vm_start and vm_end members of VMA, the base physical | |
355 | * address is found in vm_pgoff. | |
356 | * The pci device structure is provided so that architectures may make mapping | |
357 | * decisions on a per-device or per-bus basis. | |
358 | * | |
359 | * Returns a negative error code on failure, zero on success. | |
360 | */ | |
361 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |
362 | enum pci_mmap_state mmap_state, | |
363 | int write_combine) | |
364 | { | |
365 | int ret; | |
366 | ||
367 | ret = __pci_mmap_make_offset(dev, vma, mmap_state); | |
368 | if (ret < 0) | |
369 | return ret; | |
370 | ||
5a0015d6 CZ |
371 | __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); |
372 | ||
288a60cf CZ |
373 | ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
374 | vma->vm_end - vma->vm_start,vma->vm_page_prot); | |
5a0015d6 CZ |
375 | |
376 | return ret; | |
377 | } |