Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
a7fdd90b | 2 | * Common prep/chrp pci routines. -- Cort |
1da177e4 LT |
3 | */ |
4 | ||
1da177e4 LT |
5 | #include <linux/kernel.h> |
6 | #include <linux/pci.h> | |
7 | #include <linux/delay.h> | |
8 | #include <linux/string.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/capability.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/bootmem.h> | |
14 | ||
15 | #include <asm/processor.h> | |
16 | #include <asm/io.h> | |
17 | #include <asm/prom.h> | |
18 | #include <asm/sections.h> | |
19 | #include <asm/pci-bridge.h> | |
20 | #include <asm/byteorder.h> | |
21 | #include <asm/irq.h> | |
22 | #include <asm/uaccess.h> | |
b60fc8bb | 23 | #include <asm/machdep.h> |
1da177e4 LT |
24 | |
25 | #undef DEBUG | |
26 | ||
27 | #ifdef DEBUG | |
28 | #define DBG(x...) printk(x) | |
29 | #else | |
30 | #define DBG(x...) | |
31 | #endif | |
32 | ||
33 | unsigned long isa_io_base = 0; | |
34 | unsigned long isa_mem_base = 0; | |
35 | unsigned long pci_dram_offset = 0; | |
36 | int pcibios_assign_bus_offset = 1; | |
37 | ||
38 | void pcibios_make_OF_bus_map(void); | |
39 | ||
40 | static int pci_relocate_bridge_resource(struct pci_bus *bus, int i); | |
41 | static int probe_resource(struct pci_bus *parent, struct resource *pr, | |
42 | struct resource *res, struct resource **conflict); | |
43 | static void update_bridge_base(struct pci_bus *bus, int i); | |
44 | static void pcibios_fixup_resources(struct pci_dev* dev); | |
45 | static void fixup_broken_pcnet32(struct pci_dev* dev); | |
46 | static int reparent_resources(struct resource *parent, struct resource *res); | |
1da177e4 | 47 | static void fixup_cpc710_pci64(struct pci_dev* dev); |
1da177e4 | 48 | |
a7fdd90b | 49 | /* By default, we don't re-assign bus numbers. |
1da177e4 | 50 | */ |
399fe2bd | 51 | int pci_assign_all_buses; |
1da177e4 LT |
52 | |
53 | struct pci_controller* hose_head; | |
54 | struct pci_controller** hose_tail = &hose_head; | |
55 | ||
56 | static int pci_bus_count; | |
57 | ||
1da177e4 LT |
58 | static void |
59 | fixup_broken_pcnet32(struct pci_dev* dev) | |
60 | { | |
61 | if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { | |
62 | dev->vendor = PCI_VENDOR_ID_AMD; | |
63 | pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); | |
1da177e4 LT |
64 | } |
65 | } | |
66 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); | |
67 | ||
68 | static void | |
69 | fixup_cpc710_pci64(struct pci_dev* dev) | |
70 | { | |
71 | /* Hide the PCI64 BARs from the kernel as their content doesn't | |
72 | * fit well in the resource management | |
73 | */ | |
74 | dev->resource[0].start = dev->resource[0].end = 0; | |
75 | dev->resource[0].flags = 0; | |
76 | dev->resource[1].start = dev->resource[1].end = 0; | |
77 | dev->resource[1].flags = 0; | |
78 | } | |
79 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); | |
80 | ||
81 | static void | |
82 | pcibios_fixup_resources(struct pci_dev *dev) | |
83 | { | |
84 | struct pci_controller* hose = (struct pci_controller *)dev->sysdata; | |
85 | int i; | |
86 | unsigned long offset; | |
87 | ||
88 | if (!hose) { | |
89 | printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev)); | |
90 | return; | |
91 | } | |
92 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
93 | struct resource *res = dev->resource + i; | |
94 | if (!res->flags) | |
95 | continue; | |
96 | if (res->end == 0xffffffff) { | |
685143ac GKH |
97 | DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n", |
98 | pci_name(dev), i, | |
99 | (unsigned long long)res->start, | |
100 | (unsigned long long)res->end); | |
1da177e4 LT |
101 | res->end -= res->start; |
102 | res->start = 0; | |
103 | res->flags |= IORESOURCE_UNSET; | |
104 | continue; | |
105 | } | |
106 | offset = 0; | |
107 | if (res->flags & IORESOURCE_MEM) { | |
108 | offset = hose->pci_mem_offset; | |
109 | } else if (res->flags & IORESOURCE_IO) { | |
110 | offset = (unsigned long) hose->io_base_virt | |
111 | - isa_io_base; | |
112 | } | |
113 | if (offset != 0) { | |
114 | res->start += offset; | |
115 | res->end += offset; | |
116 | #ifdef DEBUG | |
117 | printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n", | |
118 | i, res->flags, pci_name(dev), | |
119 | res->start - offset, res->start); | |
120 | #endif | |
121 | } | |
122 | } | |
123 | ||
124 | /* Call machine specific resource fixup */ | |
125 | if (ppc_md.pcibios_fixup_resources) | |
126 | ppc_md.pcibios_fixup_resources(dev); | |
127 | } | |
128 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); | |
129 | ||
130 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | |
131 | struct resource *res) | |
132 | { | |
133 | unsigned long offset = 0; | |
134 | struct pci_controller *hose = dev->sysdata; | |
135 | ||
136 | if (hose && res->flags & IORESOURCE_IO) | |
137 | offset = (unsigned long)hose->io_base_virt - isa_io_base; | |
138 | else if (hose && res->flags & IORESOURCE_MEM) | |
139 | offset = hose->pci_mem_offset; | |
140 | region->start = res->start - offset; | |
141 | region->end = res->end - offset; | |
142 | } | |
143 | EXPORT_SYMBOL(pcibios_resource_to_bus); | |
144 | ||
43c34735 DB |
145 | void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, |
146 | struct pci_bus_region *region) | |
147 | { | |
148 | unsigned long offset = 0; | |
149 | struct pci_controller *hose = dev->sysdata; | |
150 | ||
151 | if (hose && res->flags & IORESOURCE_IO) | |
152 | offset = (unsigned long)hose->io_base_virt - isa_io_base; | |
153 | else if (hose && res->flags & IORESOURCE_MEM) | |
154 | offset = hose->pci_mem_offset; | |
155 | res->start = region->start + offset; | |
156 | res->end = region->end + offset; | |
157 | } | |
158 | EXPORT_SYMBOL(pcibios_bus_to_resource); | |
159 | ||
1da177e4 LT |
160 | /* |
161 | * We need to avoid collisions with `mirrored' VGA ports | |
162 | * and other strange ISA hardware, so we always want the | |
163 | * addresses to be allocated in the 0x000-0x0ff region | |
164 | * modulo 0x400. | |
165 | * | |
166 | * Why? Because some silly external IO cards only decode | |
167 | * the low 10 bits of the IO address. The 0x00-0xff region | |
168 | * is reserved for motherboard devices that decode all 16 | |
169 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | |
170 | * but we want to try to avoid allocating at 0x2900-0x2bff | |
171 | * which might have be mirrored at 0x0100-0x03ff.. | |
172 | */ | |
e31dd6e4 GKH |
173 | void pcibios_align_resource(void *data, struct resource *res, |
174 | resource_size_t size, resource_size_t align) | |
1da177e4 LT |
175 | { |
176 | struct pci_dev *dev = data; | |
177 | ||
178 | if (res->flags & IORESOURCE_IO) { | |
e31dd6e4 | 179 | resource_size_t start = res->start; |
1da177e4 LT |
180 | |
181 | if (size > 0x100) { | |
182 | printk(KERN_ERR "PCI: I/O Region %s/%d too large" | |
685143ac GKH |
183 | " (%lld bytes)\n", pci_name(dev), |
184 | dev->resource - res, (unsigned long long)size); | |
1da177e4 LT |
185 | } |
186 | ||
187 | if (start & 0x300) { | |
188 | start = (start + 0x3ff) & ~0x3ff; | |
189 | res->start = start; | |
190 | } | |
191 | } | |
192 | } | |
193 | EXPORT_SYMBOL(pcibios_align_resource); | |
194 | ||
195 | /* | |
196 | * Handle resources of PCI devices. If the world were perfect, we could | |
197 | * just allocate all the resource regions and do nothing more. It isn't. | |
198 | * On the other hand, we cannot just re-allocate all devices, as it would | |
199 | * require us to know lots of host bridge internals. So we attempt to | |
200 | * keep as much of the original configuration as possible, but tweak it | |
201 | * when it's found to be wrong. | |
202 | * | |
203 | * Known BIOS problems we have to work around: | |
204 | * - I/O or memory regions not configured | |
205 | * - regions configured, but not enabled in the command register | |
206 | * - bogus I/O addresses above 64K used | |
207 | * - expansion ROMs left enabled (this may sound harmless, but given | |
208 | * the fact the PCI specs explicitly allow address decoders to be | |
209 | * shared between expansion ROMs and other resource regions, it's | |
210 | * at least dangerous) | |
211 | * | |
212 | * Our solution: | |
213 | * (1) Allocate resources for all buses behind PCI-to-PCI bridges. | |
214 | * This gives us fixed barriers on where we can allocate. | |
215 | * (2) Allocate resources for all enabled devices. If there is | |
216 | * a collision, just mark the resource as unallocated. Also | |
217 | * disable expansion ROMs during this step. | |
218 | * (3) Try to allocate resources for disabled devices. If the | |
219 | * resources were assigned correctly, everything goes well, | |
220 | * if they weren't, they won't disturb allocation of other | |
221 | * resources. | |
222 | * (4) Assign new addresses to resources which were either | |
223 | * not configured at all or misconfigured. If explicitly | |
224 | * requested by the user, configure expansion ROM address | |
225 | * as well. | |
226 | */ | |
227 | ||
228 | static void __init | |
229 | pcibios_allocate_bus_resources(struct list_head *bus_list) | |
230 | { | |
231 | struct pci_bus *bus; | |
232 | int i; | |
233 | struct resource *res, *pr; | |
234 | ||
235 | /* Depth-First Search on bus tree */ | |
236 | list_for_each_entry(bus, bus_list, node) { | |
237 | for (i = 0; i < 4; ++i) { | |
238 | if ((res = bus->resource[i]) == NULL || !res->flags | |
239 | || res->start > res->end) | |
240 | continue; | |
241 | if (bus->parent == NULL) | |
242 | pr = (res->flags & IORESOURCE_IO)? | |
243 | &ioport_resource: &iomem_resource; | |
244 | else { | |
245 | pr = pci_find_parent_resource(bus->self, res); | |
246 | if (pr == res) { | |
247 | /* this happens when the generic PCI | |
248 | * code (wrongly) decides that this | |
249 | * bridge is transparent -- paulus | |
250 | */ | |
251 | continue; | |
252 | } | |
253 | } | |
254 | ||
685143ac GKH |
255 | DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n", |
256 | (unsigned long long)res->start, | |
257 | (unsigned long long)res->end, res->flags, pr); | |
1da177e4 LT |
258 | if (pr) { |
259 | if (request_resource(pr, res) == 0) | |
260 | continue; | |
261 | /* | |
262 | * Must be a conflict with an existing entry. | |
263 | * Move that entry (or entries) under the | |
264 | * bridge resource and try again. | |
265 | */ | |
266 | if (reparent_resources(pr, res) == 0) | |
267 | continue; | |
268 | } | |
269 | printk(KERN_ERR "PCI: Cannot allocate resource region " | |
270 | "%d of PCI bridge %d\n", i, bus->number); | |
271 | if (pci_relocate_bridge_resource(bus, i)) | |
272 | bus->resource[i] = NULL; | |
273 | } | |
274 | pcibios_allocate_bus_resources(&bus->children); | |
275 | } | |
276 | } | |
277 | ||
278 | /* | |
279 | * Reparent resource children of pr that conflict with res | |
280 | * under res, and make res replace those children. | |
281 | */ | |
282 | static int __init | |
283 | reparent_resources(struct resource *parent, struct resource *res) | |
284 | { | |
285 | struct resource *p, **pp; | |
286 | struct resource **firstpp = NULL; | |
287 | ||
288 | for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { | |
289 | if (p->end < res->start) | |
290 | continue; | |
291 | if (res->end < p->start) | |
292 | break; | |
293 | if (p->start < res->start || p->end > res->end) | |
294 | return -1; /* not completely contained */ | |
295 | if (firstpp == NULL) | |
296 | firstpp = pp; | |
297 | } | |
298 | if (firstpp == NULL) | |
299 | return -1; /* didn't find any conflicting entries? */ | |
300 | res->parent = parent; | |
301 | res->child = *firstpp; | |
302 | res->sibling = *pp; | |
303 | *firstpp = res; | |
304 | *pp = NULL; | |
305 | for (p = res->child; p != NULL; p = p->sibling) { | |
306 | p->parent = res; | |
685143ac GKH |
307 | DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", |
308 | p->name, (unsigned long long)p->start, | |
309 | (unsigned long long)p->end, res->name); | |
1da177e4 LT |
310 | } |
311 | return 0; | |
312 | } | |
313 | ||
314 | /* | |
315 | * A bridge has been allocated a range which is outside the range | |
316 | * of its parent bridge, so it needs to be moved. | |
317 | */ | |
318 | static int __init | |
319 | pci_relocate_bridge_resource(struct pci_bus *bus, int i) | |
320 | { | |
321 | struct resource *res, *pr, *conflict; | |
322 | unsigned long try, size; | |
323 | int j; | |
324 | struct pci_bus *parent = bus->parent; | |
325 | ||
326 | if (parent == NULL) { | |
327 | /* shouldn't ever happen */ | |
328 | printk(KERN_ERR "PCI: can't move host bridge resource\n"); | |
329 | return -1; | |
330 | } | |
331 | res = bus->resource[i]; | |
332 | if (res == NULL) | |
333 | return -1; | |
334 | pr = NULL; | |
335 | for (j = 0; j < 4; j++) { | |
336 | struct resource *r = parent->resource[j]; | |
337 | if (!r) | |
338 | continue; | |
339 | if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) | |
340 | continue; | |
341 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) { | |
342 | pr = r; | |
343 | break; | |
344 | } | |
345 | if (res->flags & IORESOURCE_PREFETCH) | |
346 | pr = r; | |
347 | } | |
348 | if (pr == NULL) | |
349 | return -1; | |
350 | size = res->end - res->start; | |
351 | if (pr->start > pr->end || size > pr->end - pr->start) | |
352 | return -1; | |
353 | try = pr->end; | |
354 | for (;;) { | |
355 | res->start = try - size; | |
356 | res->end = try; | |
357 | if (probe_resource(bus->parent, pr, res, &conflict) == 0) | |
358 | break; | |
359 | if (conflict->start <= pr->start + size) | |
360 | return -1; | |
361 | try = conflict->start - 1; | |
362 | } | |
363 | if (request_resource(pr, res)) { | |
685143ac GKH |
364 | DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n", |
365 | (unsigned long long)res->start, | |
366 | (unsigned long long)res->end); | |
1da177e4 LT |
367 | return -1; /* "can't happen" */ |
368 | } | |
369 | update_bridge_base(bus, i); | |
685143ac GKH |
370 | printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n", |
371 | bus->number, i, (unsigned long long)res->start, | |
372 | (unsigned long long)res->end); | |
1da177e4 LT |
373 | return 0; |
374 | } | |
375 | ||
376 | static int __init | |
377 | probe_resource(struct pci_bus *parent, struct resource *pr, | |
378 | struct resource *res, struct resource **conflict) | |
379 | { | |
380 | struct pci_bus *bus; | |
381 | struct pci_dev *dev; | |
382 | struct resource *r; | |
383 | int i; | |
384 | ||
385 | for (r = pr->child; r != NULL; r = r->sibling) { | |
386 | if (r->end >= res->start && res->end >= r->start) { | |
387 | *conflict = r; | |
388 | return 1; | |
389 | } | |
390 | } | |
391 | list_for_each_entry(bus, &parent->children, node) { | |
392 | for (i = 0; i < 4; ++i) { | |
393 | if ((r = bus->resource[i]) == NULL) | |
394 | continue; | |
395 | if (!r->flags || r->start > r->end || r == res) | |
396 | continue; | |
397 | if (pci_find_parent_resource(bus->self, r) != pr) | |
398 | continue; | |
399 | if (r->end >= res->start && res->end >= r->start) { | |
400 | *conflict = r; | |
401 | return 1; | |
402 | } | |
403 | } | |
404 | } | |
405 | list_for_each_entry(dev, &parent->devices, bus_list) { | |
406 | for (i = 0; i < 6; ++i) { | |
407 | r = &dev->resource[i]; | |
408 | if (!r->flags || (r->flags & IORESOURCE_UNSET)) | |
409 | continue; | |
410 | if (pci_find_parent_resource(dev, r) != pr) | |
411 | continue; | |
412 | if (r->end >= res->start && res->end >= r->start) { | |
413 | *conflict = r; | |
414 | return 1; | |
415 | } | |
416 | } | |
417 | } | |
418 | return 0; | |
419 | } | |
420 | ||
421 | static void __init | |
422 | update_bridge_base(struct pci_bus *bus, int i) | |
423 | { | |
424 | struct resource *res = bus->resource[i]; | |
425 | u8 io_base_lo, io_limit_lo; | |
426 | u16 mem_base, mem_limit; | |
427 | u16 cmd; | |
428 | unsigned long start, end, off; | |
429 | struct pci_dev *dev = bus->self; | |
430 | struct pci_controller *hose = dev->sysdata; | |
431 | ||
432 | if (!hose) { | |
433 | printk("update_bridge_base: no hose?\n"); | |
434 | return; | |
435 | } | |
436 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
437 | pci_write_config_word(dev, PCI_COMMAND, | |
438 | cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)); | |
439 | if (res->flags & IORESOURCE_IO) { | |
440 | off = (unsigned long) hose->io_base_virt - isa_io_base; | |
441 | start = res->start - off; | |
442 | end = res->end - off; | |
443 | io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK; | |
444 | io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK; | |
445 | if (end > 0xffff) { | |
446 | pci_write_config_word(dev, PCI_IO_BASE_UPPER16, | |
447 | start >> 16); | |
448 | pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, | |
449 | end >> 16); | |
450 | io_base_lo |= PCI_IO_RANGE_TYPE_32; | |
451 | } else | |
452 | io_base_lo |= PCI_IO_RANGE_TYPE_16; | |
453 | pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo); | |
454 | pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo); | |
455 | ||
456 | } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) | |
457 | == IORESOURCE_MEM) { | |
458 | off = hose->pci_mem_offset; | |
459 | mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK; | |
460 | mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK; | |
461 | pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base); | |
462 | pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit); | |
463 | ||
464 | } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) | |
465 | == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) { | |
466 | off = hose->pci_mem_offset; | |
467 | mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK; | |
468 | mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK; | |
469 | pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base); | |
470 | pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit); | |
471 | ||
472 | } else { | |
473 | DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n", | |
474 | pci_name(dev), i, res->flags); | |
475 | } | |
476 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
477 | } | |
478 | ||
479 | static inline void alloc_resource(struct pci_dev *dev, int idx) | |
480 | { | |
481 | struct resource *pr, *r = &dev->resource[idx]; | |
482 | ||
685143ac GKH |
483 | DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n", |
484 | pci_name(dev), idx, (unsigned long long)r->start, | |
485 | (unsigned long long)r->end, r->flags); | |
1da177e4 LT |
486 | pr = pci_find_parent_resource(dev, r); |
487 | if (!pr || request_resource(pr, r) < 0) { | |
488 | printk(KERN_ERR "PCI: Cannot allocate resource region %d" | |
489 | " of device %s\n", idx, pci_name(dev)); | |
490 | if (pr) | |
685143ac GKH |
491 | DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n", |
492 | pr, (unsigned long long)pr->start, | |
493 | (unsigned long long)pr->end, pr->flags); | |
1da177e4 LT |
494 | /* We'll assign a new address later */ |
495 | r->flags |= IORESOURCE_UNSET; | |
496 | r->end -= r->start; | |
497 | r->start = 0; | |
498 | } | |
499 | } | |
500 | ||
501 | static void __init | |
502 | pcibios_allocate_resources(int pass) | |
503 | { | |
504 | struct pci_dev *dev = NULL; | |
505 | int idx, disabled; | |
506 | u16 command; | |
507 | struct resource *r; | |
508 | ||
cee02953 | 509 | for_each_pci_dev(dev) { |
1da177e4 LT |
510 | pci_read_config_word(dev, PCI_COMMAND, &command); |
511 | for (idx = 0; idx < 6; idx++) { | |
512 | r = &dev->resource[idx]; | |
513 | if (r->parent) /* Already allocated */ | |
514 | continue; | |
515 | if (!r->flags || (r->flags & IORESOURCE_UNSET)) | |
516 | continue; /* Not assigned at all */ | |
517 | if (r->flags & IORESOURCE_IO) | |
518 | disabled = !(command & PCI_COMMAND_IO); | |
519 | else | |
520 | disabled = !(command & PCI_COMMAND_MEMORY); | |
521 | if (pass == disabled) | |
522 | alloc_resource(dev, idx); | |
523 | } | |
524 | if (pass) | |
525 | continue; | |
526 | r = &dev->resource[PCI_ROM_RESOURCE]; | |
527 | if (r->flags & IORESOURCE_ROM_ENABLE) { | |
528 | /* Turn the ROM off, leave the resource region, but keep it unregistered. */ | |
529 | u32 reg; | |
530 | DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); | |
531 | r->flags &= ~IORESOURCE_ROM_ENABLE; | |
532 | pci_read_config_dword(dev, dev->rom_base_reg, ®); | |
533 | pci_write_config_dword(dev, dev->rom_base_reg, | |
534 | reg & ~PCI_ROM_ADDRESS_ENABLE); | |
535 | } | |
536 | } | |
537 | } | |
538 | ||
539 | static void __init | |
540 | pcibios_assign_resources(void) | |
541 | { | |
542 | struct pci_dev *dev = NULL; | |
543 | int idx; | |
544 | struct resource *r; | |
545 | ||
cee02953 | 546 | for_each_pci_dev(dev) { |
1da177e4 LT |
547 | int class = dev->class >> 8; |
548 | ||
549 | /* Don't touch classless devices and host bridges */ | |
550 | if (!class || class == PCI_CLASS_BRIDGE_HOST) | |
551 | continue; | |
552 | ||
553 | for (idx = 0; idx < 6; idx++) { | |
554 | r = &dev->resource[idx]; | |
555 | ||
556 | /* | |
557 | * We shall assign a new address to this resource, | |
558 | * either because the BIOS (sic) forgot to do so | |
559 | * or because we have decided the old address was | |
560 | * unusable for some reason. | |
561 | */ | |
562 | if ((r->flags & IORESOURCE_UNSET) && r->end && | |
563 | (!ppc_md.pcibios_enable_device_hook || | |
564 | !ppc_md.pcibios_enable_device_hook(dev, 1))) { | |
565 | r->flags &= ~IORESOURCE_UNSET; | |
566 | pci_assign_resource(dev, idx); | |
567 | } | |
568 | } | |
569 | ||
570 | #if 0 /* don't assign ROMs */ | |
571 | r = &dev->resource[PCI_ROM_RESOURCE]; | |
572 | r->end -= r->start; | |
573 | r->start = 0; | |
574 | if (r->end) | |
575 | pci_assign_resource(dev, PCI_ROM_RESOURCE); | |
576 | #endif | |
577 | } | |
578 | } | |
579 | ||
580 | ||
581 | int | |
582 | pcibios_enable_resources(struct pci_dev *dev, int mask) | |
583 | { | |
584 | u16 cmd, old_cmd; | |
585 | int idx; | |
586 | struct resource *r; | |
587 | ||
588 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
589 | old_cmd = cmd; | |
590 | for (idx=0; idx<6; idx++) { | |
591 | /* Only set up the requested stuff */ | |
592 | if (!(mask & (1<<idx))) | |
593 | continue; | |
594 | ||
595 | r = &dev->resource[idx]; | |
596 | if (r->flags & IORESOURCE_UNSET) { | |
597 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); | |
598 | return -EINVAL; | |
599 | } | |
600 | if (r->flags & IORESOURCE_IO) | |
601 | cmd |= PCI_COMMAND_IO; | |
602 | if (r->flags & IORESOURCE_MEM) | |
603 | cmd |= PCI_COMMAND_MEMORY; | |
604 | } | |
605 | if (dev->resource[PCI_ROM_RESOURCE].start) | |
606 | cmd |= PCI_COMMAND_MEMORY; | |
607 | if (cmd != old_cmd) { | |
608 | printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); | |
609 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
610 | } | |
611 | return 0; | |
612 | } | |
613 | ||
614 | static int next_controller_index; | |
615 | ||
616 | struct pci_controller * __init | |
617 | pcibios_alloc_controller(void) | |
618 | { | |
619 | struct pci_controller *hose; | |
620 | ||
621 | hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose)); | |
622 | memset(hose, 0, sizeof(struct pci_controller)); | |
623 | ||
624 | *hose_tail = hose; | |
625 | hose_tail = &hose->next; | |
626 | ||
627 | hose->index = next_controller_index++; | |
628 | ||
629 | return hose; | |
630 | } | |
631 | ||
fd582ec8 PM |
632 | void pcibios_make_OF_bus_map(void) |
633 | { | |
634 | } | |
1da177e4 | 635 | |
1da177e4 LT |
636 | static int __init |
637 | pcibios_init(void) | |
638 | { | |
639 | struct pci_controller *hose; | |
640 | struct pci_bus *bus; | |
641 | int next_busno; | |
642 | ||
643 | printk(KERN_INFO "PCI: Probing PCI hardware\n"); | |
644 | ||
645 | /* Scan all of the recorded PCI controllers. */ | |
646 | for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { | |
399fe2bd | 647 | if (pci_assign_all_buses) |
1da177e4 LT |
648 | hose->first_busno = next_busno; |
649 | hose->last_busno = 0xff; | |
650 | bus = pci_scan_bus(hose->first_busno, hose->ops, hose); | |
651 | hose->last_busno = bus->subordinate; | |
399fe2bd | 652 | if (pci_assign_all_buses || next_busno <= hose->last_busno) |
1da177e4 LT |
653 | next_busno = hose->last_busno + pcibios_assign_bus_offset; |
654 | } | |
655 | pci_bus_count = next_busno; | |
656 | ||
657 | /* OpenFirmware based machines need a map of OF bus | |
658 | * numbers vs. kernel bus numbers since we may have to | |
659 | * remap them. | |
660 | */ | |
399fe2bd | 661 | if (pci_assign_all_buses && have_of) |
1da177e4 LT |
662 | pcibios_make_OF_bus_map(); |
663 | ||
664 | /* Do machine dependent PCI interrupt routing */ | |
665 | if (ppc_md.pci_swizzle && ppc_md.pci_map_irq) | |
666 | pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq); | |
667 | ||
668 | /* Call machine dependent fixup */ | |
669 | if (ppc_md.pcibios_fixup) | |
670 | ppc_md.pcibios_fixup(); | |
671 | ||
672 | /* Allocate and assign resources */ | |
673 | pcibios_allocate_bus_resources(&pci_root_buses); | |
674 | pcibios_allocate_resources(0); | |
675 | pcibios_allocate_resources(1); | |
1da177e4 LT |
676 | pcibios_assign_resources(); |
677 | ||
678 | /* Call machine dependent post-init code */ | |
679 | if (ppc_md.pcibios_after_init) | |
680 | ppc_md.pcibios_after_init(); | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
685 | subsys_initcall(pcibios_init); | |
686 | ||
687 | unsigned char __init | |
688 | common_swizzle(struct pci_dev *dev, unsigned char *pinp) | |
689 | { | |
690 | struct pci_controller *hose = dev->sysdata; | |
691 | ||
692 | if (dev->bus->number != hose->first_busno) { | |
693 | u8 pin = *pinp; | |
694 | do { | |
695 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | |
696 | /* Move up the chain of bridges. */ | |
697 | dev = dev->bus->self; | |
698 | } while (dev->bus->self); | |
699 | *pinp = pin; | |
700 | ||
701 | /* The slot is the idsel of the last bridge. */ | |
702 | } | |
703 | return PCI_SLOT(dev->devfn); | |
704 | } | |
705 | ||
706 | unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, | |
707 | unsigned long start, unsigned long size) | |
708 | { | |
709 | return start; | |
710 | } | |
711 | ||
712 | void __init pcibios_fixup_bus(struct pci_bus *bus) | |
713 | { | |
714 | struct pci_controller *hose = (struct pci_controller *) bus->sysdata; | |
715 | unsigned long io_offset; | |
716 | struct resource *res; | |
717 | int i; | |
718 | ||
719 | io_offset = (unsigned long)hose->io_base_virt - isa_io_base; | |
720 | if (bus->parent == NULL) { | |
721 | /* This is a host bridge - fill in its resources */ | |
722 | hose->bus = bus; | |
723 | ||
724 | bus->resource[0] = res = &hose->io_resource; | |
725 | if (!res->flags) { | |
726 | if (io_offset) | |
727 | printk(KERN_ERR "I/O resource not set for host" | |
728 | " bridge %d\n", hose->index); | |
729 | res->start = 0; | |
730 | res->end = IO_SPACE_LIMIT; | |
731 | res->flags = IORESOURCE_IO; | |
732 | } | |
733 | res->start += io_offset; | |
734 | res->end += io_offset; | |
735 | ||
736 | for (i = 0; i < 3; ++i) { | |
737 | res = &hose->mem_resources[i]; | |
738 | if (!res->flags) { | |
739 | if (i > 0) | |
740 | continue; | |
741 | printk(KERN_ERR "Memory resource not set for " | |
742 | "host bridge %d\n", hose->index); | |
743 | res->start = hose->pci_mem_offset; | |
744 | res->end = ~0U; | |
745 | res->flags = IORESOURCE_MEM; | |
746 | } | |
747 | bus->resource[i+1] = res; | |
748 | } | |
749 | } else { | |
750 | /* This is a subordinate bridge */ | |
751 | pci_read_bridge_bases(bus); | |
752 | ||
753 | for (i = 0; i < 4; ++i) { | |
754 | if ((res = bus->resource[i]) == NULL) | |
755 | continue; | |
756 | if (!res->flags) | |
757 | continue; | |
758 | if (io_offset && (res->flags & IORESOURCE_IO)) { | |
759 | res->start += io_offset; | |
760 | res->end += io_offset; | |
761 | } else if (hose->pci_mem_offset | |
762 | && (res->flags & IORESOURCE_MEM)) { | |
763 | res->start += hose->pci_mem_offset; | |
764 | res->end += hose->pci_mem_offset; | |
765 | } | |
766 | } | |
767 | } | |
768 | ||
769 | if (ppc_md.pcibios_fixup_bus) | |
770 | ppc_md.pcibios_fixup_bus(bus); | |
771 | } | |
772 | ||
773 | char __init *pcibios_setup(char *str) | |
774 | { | |
775 | return str; | |
776 | } | |
777 | ||
778 | /* the next one is stolen from the alpha port... */ | |
779 | void __init | |
780 | pcibios_update_irq(struct pci_dev *dev, int irq) | |
781 | { | |
782 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | |
783 | /* XXX FIXME - update OF device tree node interrupt property */ | |
784 | } | |
785 | ||
786 | int pcibios_enable_device(struct pci_dev *dev, int mask) | |
787 | { | |
788 | u16 cmd, old_cmd; | |
789 | int idx; | |
790 | struct resource *r; | |
791 | ||
792 | if (ppc_md.pcibios_enable_device_hook) | |
793 | if (ppc_md.pcibios_enable_device_hook(dev, 0)) | |
794 | return -EINVAL; | |
795 | ||
796 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
797 | old_cmd = cmd; | |
798 | for (idx=0; idx<6; idx++) { | |
799 | r = &dev->resource[idx]; | |
800 | if (r->flags & IORESOURCE_UNSET) { | |
801 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); | |
802 | return -EINVAL; | |
803 | } | |
804 | if (r->flags & IORESOURCE_IO) | |
805 | cmd |= PCI_COMMAND_IO; | |
806 | if (r->flags & IORESOURCE_MEM) | |
807 | cmd |= PCI_COMMAND_MEMORY; | |
808 | } | |
809 | if (cmd != old_cmd) { | |
810 | printk("PCI: Enabling device %s (%04x -> %04x)\n", | |
811 | pci_name(dev), old_cmd, cmd); | |
812 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
813 | } | |
814 | return 0; | |
815 | } | |
816 | ||
817 | struct pci_controller* | |
818 | pci_bus_to_hose(int bus) | |
819 | { | |
820 | struct pci_controller* hose = hose_head; | |
821 | ||
822 | for (; hose; hose = hose->next) | |
823 | if (bus >= hose->first_busno && bus <= hose->last_busno) | |
824 | return hose; | |
825 | return NULL; | |
826 | } | |
827 | ||
92a11f9e | 828 | void __iomem * |
1da177e4 LT |
829 | pci_bus_io_base(unsigned int bus) |
830 | { | |
831 | struct pci_controller *hose; | |
832 | ||
833 | hose = pci_bus_to_hose(bus); | |
834 | if (!hose) | |
835 | return NULL; | |
836 | return hose->io_base_virt; | |
837 | } | |
838 | ||
839 | unsigned long | |
840 | pci_bus_io_base_phys(unsigned int bus) | |
841 | { | |
842 | struct pci_controller *hose; | |
843 | ||
844 | hose = pci_bus_to_hose(bus); | |
845 | if (!hose) | |
846 | return 0; | |
847 | return hose->io_base_phys; | |
848 | } | |
849 | ||
850 | unsigned long | |
851 | pci_bus_mem_base_phys(unsigned int bus) | |
852 | { | |
853 | struct pci_controller *hose; | |
854 | ||
855 | hose = pci_bus_to_hose(bus); | |
856 | if (!hose) | |
857 | return 0; | |
858 | return hose->pci_mem_offset; | |
859 | } | |
860 | ||
861 | unsigned long | |
862 | pci_resource_to_bus(struct pci_dev *pdev, struct resource *res) | |
863 | { | |
864 | /* Hack alert again ! See comments in chrp_pci.c | |
865 | */ | |
866 | struct pci_controller* hose = | |
867 | (struct pci_controller *)pdev->sysdata; | |
868 | if (hose && res->flags & IORESOURCE_MEM) | |
869 | return res->start - hose->pci_mem_offset; | |
870 | /* We may want to do something with IOs here... */ | |
871 | return res->start; | |
872 | } | |
873 | ||
874 | ||
875 | static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | |
396a1a58 | 876 | resource_size_t *offset, |
1da177e4 LT |
877 | enum pci_mmap_state mmap_state) |
878 | { | |
879 | struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); | |
880 | unsigned long io_offset = 0; | |
881 | int i, res_bit; | |
882 | ||
883 | if (hose == 0) | |
884 | return NULL; /* should never happen */ | |
885 | ||
886 | /* If memory, add on the PCI bridge address offset */ | |
887 | if (mmap_state == pci_mmap_mem) { | |
396a1a58 | 888 | #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ |
1da177e4 | 889 | *offset += hose->pci_mem_offset; |
396a1a58 | 890 | #endif |
1da177e4 LT |
891 | res_bit = IORESOURCE_MEM; |
892 | } else { | |
2311b1f2 | 893 | io_offset = hose->io_base_virt - ___IO_BASE; |
1da177e4 LT |
894 | *offset += io_offset; |
895 | res_bit = IORESOURCE_IO; | |
896 | } | |
897 | ||
898 | /* | |
899 | * Check that the offset requested corresponds to one of the | |
900 | * resources of the device. | |
901 | */ | |
902 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | |
903 | struct resource *rp = &dev->resource[i]; | |
904 | int flags = rp->flags; | |
905 | ||
906 | /* treat ROM as memory (should be already) */ | |
907 | if (i == PCI_ROM_RESOURCE) | |
908 | flags |= IORESOURCE_MEM; | |
909 | ||
910 | /* Active and same type? */ | |
911 | if ((flags & res_bit) == 0) | |
912 | continue; | |
913 | ||
914 | /* In the range of this resource? */ | |
915 | if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) | |
916 | continue; | |
917 | ||
918 | /* found it! construct the final physical address */ | |
919 | if (mmap_state == pci_mmap_io) | |
2311b1f2 | 920 | *offset += hose->io_base_phys - io_offset; |
1da177e4 LT |
921 | return rp; |
922 | } | |
923 | ||
924 | return NULL; | |
925 | } | |
926 | ||
927 | /* | |
928 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | |
929 | * device mapping. | |
930 | */ | |
931 | static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |
932 | pgprot_t protection, | |
933 | enum pci_mmap_state mmap_state, | |
934 | int write_combine) | |
935 | { | |
936 | unsigned long prot = pgprot_val(protection); | |
937 | ||
938 | /* Write combine is always 0 on non-memory space mappings. On | |
939 | * memory space, if the user didn't pass 1, we check for a | |
940 | * "prefetchable" resource. This is a bit hackish, but we use | |
941 | * this to workaround the inability of /sysfs to provide a write | |
942 | * combine bit | |
943 | */ | |
944 | if (mmap_state != pci_mmap_mem) | |
945 | write_combine = 0; | |
946 | else if (write_combine == 0) { | |
947 | if (rp->flags & IORESOURCE_PREFETCH) | |
948 | write_combine = 1; | |
949 | } | |
950 | ||
951 | /* XXX would be nice to have a way to ask for write-through */ | |
952 | prot |= _PAGE_NO_CACHE; | |
953 | if (write_combine) | |
954 | prot &= ~_PAGE_GUARDED; | |
955 | else | |
956 | prot |= _PAGE_GUARDED; | |
957 | ||
e31dd6e4 GKH |
958 | printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev), |
959 | (unsigned long long)rp->start, prot); | |
1da177e4 LT |
960 | |
961 | return __pgprot(prot); | |
962 | } | |
963 | ||
964 | /* | |
965 | * This one is used by /dev/mem and fbdev who have no clue about the | |
966 | * PCI device, it tries to find the PCI device first and calls the | |
967 | * above routine | |
968 | */ | |
969 | pgprot_t pci_phys_mem_access_prot(struct file *file, | |
8b150478 | 970 | unsigned long pfn, |
1da177e4 LT |
971 | unsigned long size, |
972 | pgprot_t protection) | |
973 | { | |
974 | struct pci_dev *pdev = NULL; | |
975 | struct resource *found = NULL; | |
976 | unsigned long prot = pgprot_val(protection); | |
8b150478 | 977 | unsigned long offset = pfn << PAGE_SHIFT; |
1da177e4 LT |
978 | int i; |
979 | ||
8b150478 | 980 | if (page_is_ram(pfn)) |
1da177e4 LT |
981 | return prot; |
982 | ||
983 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | |
984 | ||
985 | for_each_pci_dev(pdev) { | |
986 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | |
987 | struct resource *rp = &pdev->resource[i]; | |
988 | int flags = rp->flags; | |
989 | ||
990 | /* Active and same type? */ | |
991 | if ((flags & IORESOURCE_MEM) == 0) | |
992 | continue; | |
993 | /* In the range of this resource? */ | |
994 | if (offset < (rp->start & PAGE_MASK) || | |
995 | offset > rp->end) | |
996 | continue; | |
997 | found = rp; | |
998 | break; | |
999 | } | |
1000 | if (found) | |
1001 | break; | |
1002 | } | |
1003 | if (found) { | |
1004 | if (found->flags & IORESOURCE_PREFETCH) | |
1005 | prot &= ~_PAGE_GUARDED; | |
1006 | pci_dev_put(pdev); | |
1007 | } | |
1008 | ||
1009 | DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); | |
1010 | ||
1011 | return __pgprot(prot); | |
1012 | } | |
1013 | ||
1014 | ||
1015 | /* | |
1016 | * Perform the actual remap of the pages for a PCI device mapping, as | |
1017 | * appropriate for this architecture. The region in the process to map | |
1018 | * is described by vm_start and vm_end members of VMA, the base physical | |
1019 | * address is found in vm_pgoff. | |
1020 | * The pci device structure is provided so that architectures may make mapping | |
1021 | * decisions on a per-device or per-bus basis. | |
1022 | * | |
1023 | * Returns a negative error code on failure, zero on success. | |
1024 | */ | |
1025 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |
1026 | enum pci_mmap_state mmap_state, | |
1027 | int write_combine) | |
1028 | { | |
396a1a58 | 1029 | resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; |
1da177e4 LT |
1030 | struct resource *rp; |
1031 | int ret; | |
1032 | ||
1033 | rp = __pci_mmap_make_offset(dev, &offset, mmap_state); | |
1034 | if (rp == NULL) | |
1035 | return -EINVAL; | |
1036 | ||
1037 | vma->vm_pgoff = offset >> PAGE_SHIFT; | |
1da177e4 LT |
1038 | vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, |
1039 | vma->vm_page_prot, | |
1040 | mmap_state, write_combine); | |
1041 | ||
1042 | ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | |
1043 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | |
1044 | ||
1045 | return ret; | |
1046 | } | |
1047 | ||
1048 | /* Obsolete functions. Should be removed once the symbios driver | |
1049 | * is fixed | |
1050 | */ | |
1051 | unsigned long | |
1052 | phys_to_bus(unsigned long pa) | |
1053 | { | |
1054 | struct pci_controller *hose; | |
1055 | int i; | |
1056 | ||
1057 | for (hose = hose_head; hose; hose = hose->next) { | |
1058 | for (i = 0; i < 3; ++i) { | |
1059 | if (pa >= hose->mem_resources[i].start | |
1060 | && pa <= hose->mem_resources[i].end) { | |
1061 | /* | |
1062 | * XXX the hose->pci_mem_offset really | |
1063 | * only applies to mem_resources[0]. | |
1064 | * We need a way to store an offset for | |
1065 | * the others. -- paulus | |
1066 | */ | |
1067 | if (i == 0) | |
1068 | pa -= hose->pci_mem_offset; | |
1069 | return pa; | |
1070 | } | |
1071 | } | |
1072 | } | |
1073 | /* hmmm, didn't find it */ | |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | unsigned long | |
1078 | pci_phys_to_bus(unsigned long pa, int busnr) | |
1079 | { | |
1080 | struct pci_controller* hose = pci_bus_to_hose(busnr); | |
1081 | if (!hose) | |
1082 | return pa; | |
1083 | return pa - hose->pci_mem_offset; | |
1084 | } | |
1085 | ||
1086 | unsigned long | |
1087 | pci_bus_to_phys(unsigned int ba, int busnr) | |
1088 | { | |
1089 | struct pci_controller* hose = pci_bus_to_hose(busnr); | |
1090 | if (!hose) | |
1091 | return ba; | |
1092 | return ba + hose->pci_mem_offset; | |
1093 | } | |
1094 | ||
1095 | /* Provide information on locations of various I/O regions in physical | |
1096 | * memory. Do this on a per-card basis so that we choose the right | |
1097 | * root bridge. | |
1098 | * Note that the returned IO or memory base is a physical address | |
1099 | */ | |
1100 | ||
1101 | long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) | |
1102 | { | |
1103 | struct pci_controller* hose; | |
1104 | long result = -EOPNOTSUPP; | |
1105 | ||
1da177e4 LT |
1106 | hose = pci_bus_to_hose(bus); |
1107 | if (!hose) | |
1108 | return -ENODEV; | |
1109 | ||
1110 | switch (which) { | |
1111 | case IOBASE_BRIDGE_NUMBER: | |
1112 | return (long)hose->first_busno; | |
1113 | case IOBASE_MEMORY: | |
1114 | return (long)hose->pci_mem_offset; | |
1115 | case IOBASE_IO: | |
1116 | return (long)hose->io_base_phys; | |
1117 | case IOBASE_ISA_IO: | |
1118 | return (long)isa_io_base; | |
1119 | case IOBASE_ISA_MEM: | |
1120 | return (long)isa_mem_base; | |
1121 | } | |
1122 | ||
1123 | return result; | |
1124 | } | |
1125 | ||
2311b1f2 ME |
1126 | void pci_resource_to_user(const struct pci_dev *dev, int bar, |
1127 | const struct resource *rsrc, | |
e31dd6e4 | 1128 | resource_size_t *start, resource_size_t *end) |
2311b1f2 ME |
1129 | { |
1130 | struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); | |
396a1a58 | 1131 | resource_size_t offset = 0; |
2311b1f2 ME |
1132 | |
1133 | if (hose == NULL) | |
1134 | return; | |
1135 | ||
1136 | if (rsrc->flags & IORESOURCE_IO) | |
396a1a58 BH |
1137 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; |
1138 | ||
1139 | /* We pass a fully fixed up address to userland for MMIO instead of | |
1140 | * a BAR value because X is lame and expects to be able to use that | |
1141 | * to pass to /dev/mem ! | |
1142 | * | |
1143 | * That means that we'll have potentially 64 bits values where some | |
1144 | * userland apps only expect 32 (like X itself since it thinks only | |
1145 | * Sparc has 64 bits MMIO) but if we don't do that, we break it on | |
1146 | * 32 bits CHRPs :-( | |
1147 | * | |
1148 | * Hopefully, the sysfs insterface is immune to that gunk. Once X | |
1149 | * has been fixed (and the fix spread enough), we can re-enable the | |
1150 | * 2 lines below and pass down a BAR value to userland. In that case | |
1151 | * we'll also have to re-enable the matching code in | |
1152 | * __pci_mmap_make_offset(). | |
1153 | * | |
1154 | * BenH. | |
1155 | */ | |
1156 | #if 0 | |
1157 | else if (rsrc->flags & IORESOURCE_MEM) | |
1158 | offset = hose->pci_mem_offset; | |
1159 | #endif | |
2311b1f2 | 1160 | |
396a1a58 BH |
1161 | *start = rsrc->start - offset; |
1162 | *end = rsrc->end - offset; | |
2311b1f2 ME |
1163 | } |
1164 | ||
396a1a58 BH |
1165 | void __init pci_init_resource(struct resource *res, resource_size_t start, |
1166 | resource_size_t end, int flags, char *name) | |
1da177e4 LT |
1167 | { |
1168 | res->start = start; | |
1169 | res->end = end; | |
1170 | res->flags = flags; | |
1171 | res->name = name; | |
1172 | res->parent = NULL; | |
1173 | res->sibling = NULL; | |
1174 | res->child = NULL; | |
1175 | } | |
1176 | ||
1177 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | |
1178 | { | |
1179 | unsigned long start = pci_resource_start(dev, bar); | |
1180 | unsigned long len = pci_resource_len(dev, bar); | |
1181 | unsigned long flags = pci_resource_flags(dev, bar); | |
1182 | ||
1183 | if (!len) | |
1184 | return NULL; | |
1185 | if (max && len > max) | |
1186 | len = max; | |
1187 | if (flags & IORESOURCE_IO) | |
1188 | return ioport_map(start, len); | |
1189 | if (flags & IORESOURCE_MEM) | |
1190 | /* Not checking IORESOURCE_CACHEABLE because PPC does | |
1191 | * not currently distinguish between ioremap and | |
1192 | * ioremap_nocache. | |
1193 | */ | |
1194 | return ioremap(start, len); | |
1195 | /* What? */ | |
1196 | return NULL; | |
1197 | } | |
1198 | ||
1199 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | |
1200 | { | |
1201 | /* Nothing to do */ | |
1202 | } | |
1203 | EXPORT_SYMBOL(pci_iomap); | |
1204 | EXPORT_SYMBOL(pci_iounmap); | |
1205 | ||
f2c4583a | 1206 | unsigned long pci_address_to_pio(phys_addr_t address) |
d2dd482b BH |
1207 | { |
1208 | struct pci_controller* hose = hose_head; | |
1209 | ||
1210 | for (; hose; hose = hose->next) { | |
1211 | unsigned int size = hose->io_resource.end - | |
1212 | hose->io_resource.start + 1; | |
1213 | if (address >= hose->io_base_phys && | |
f2c4583a BH |
1214 | address < (hose->io_base_phys + size)) { |
1215 | unsigned long base = | |
1216 | (unsigned long)hose->io_base_virt - _IO_BASE; | |
1217 | return base + (address - hose->io_base_phys); | |
e5cd0404 | 1218 | } |
d2dd482b BH |
1219 | } |
1220 | return (unsigned int)-1; | |
1221 | } | |
1222 | EXPORT_SYMBOL(pci_address_to_pio); | |
1da177e4 LT |
1223 | |
1224 | /* | |
1225 | * Null PCI config access functions, for the case when we can't | |
1226 | * find a hose. | |
1227 | */ | |
1228 | #define NULL_PCI_OP(rw, size, type) \ | |
1229 | static int \ | |
1230 | null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ | |
1231 | { \ | |
1232 | return PCIBIOS_DEVICE_NOT_FOUND; \ | |
1233 | } | |
1234 | ||
1235 | static int | |
1236 | null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, | |
1237 | int len, u32 *val) | |
1238 | { | |
1239 | return PCIBIOS_DEVICE_NOT_FOUND; | |
1240 | } | |
1241 | ||
1242 | static int | |
1243 | null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, | |
1244 | int len, u32 val) | |
1245 | { | |
1246 | return PCIBIOS_DEVICE_NOT_FOUND; | |
1247 | } | |
1248 | ||
1249 | static struct pci_ops null_pci_ops = | |
1250 | { | |
1251 | null_read_config, | |
1252 | null_write_config | |
1253 | }; | |
1254 | ||
1255 | /* | |
1256 | * These functions are used early on before PCI scanning is done | |
1257 | * and all of the pci_dev and pci_bus structures have been created. | |
1258 | */ | |
1259 | static struct pci_bus * | |
1260 | fake_pci_bus(struct pci_controller *hose, int busnr) | |
1261 | { | |
1262 | static struct pci_bus bus; | |
1263 | ||
1264 | if (hose == 0) { | |
1265 | hose = pci_bus_to_hose(busnr); | |
1266 | if (hose == 0) | |
1267 | printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); | |
1268 | } | |
1269 | bus.number = busnr; | |
1270 | bus.sysdata = hose; | |
1271 | bus.ops = hose? hose->ops: &null_pci_ops; | |
1272 | return &bus; | |
1273 | } | |
1274 | ||
1275 | #define EARLY_PCI_OP(rw, size, type) \ | |
1276 | int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ | |
1277 | int devfn, int offset, type value) \ | |
1278 | { \ | |
1279 | return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ | |
1280 | devfn, offset, value); \ | |
1281 | } | |
1282 | ||
1283 | EARLY_PCI_OP(read, byte, u8 *) | |
1284 | EARLY_PCI_OP(read, word, u16 *) | |
1285 | EARLY_PCI_OP(read, dword, u32 *) | |
1286 | EARLY_PCI_OP(write, byte, u8) | |
1287 | EARLY_PCI_OP(write, word, u16) | |
1288 | EARLY_PCI_OP(write, dword, u32) |