sparc: Use asm-generic/dma-mapping-common.h
[deliverable/linux.git] / arch / sparc / kernel / pci_sun4v.c
CommitLineData
8f6a93a1
DM
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
d284142c 3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
8f6a93a1
DM
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
18397944 12#include <linux/percpu.h>
35a17eb6
DM
13#include <linux/irq.h>
14#include <linux/msi.h>
59db8102 15#include <linux/log2.h>
3822b509 16#include <linux/of_device.h>
8f6a93a1 17
8f6a93a1
DM
18#include <asm/iommu.h>
19#include <asm/irq.h>
8f6a93a1 20#include <asm/hypervisor.h>
e87dc350 21#include <asm/prom.h>
8f6a93a1
DM
22
23#include "pci_impl.h"
24#include "iommu_common.h"
25
bade5622
DM
26#include "pci_sun4v.h"
27
3822b509
DM
28#define DRIVER_NAME "pci_sun4v"
29#define PFX DRIVER_NAME ": "
30
e01c0d6d
DM
31static unsigned long vpci_major = 1;
32static unsigned long vpci_minor = 1;
33
7c8f486a 34#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
18397944 35
16ce82d8 36struct iommu_batch {
ad7ad57c 37 struct device *dev; /* Device mapping is for. */
6a32fd4d
DM
38 unsigned long prot; /* IOMMU page protections */
39 unsigned long entry; /* Index into IOTSB. */
40 u64 *pglist; /* List of physical pages */
41 unsigned long npages; /* Number of pages in list. */
18397944
DM
42};
43
ad7ad57c 44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
d3ae4b5b 45static int iommu_batch_initialized;
6a32fd4d
DM
46
47/* Interrupts must be disabled. */
ad7ad57c 48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
6a32fd4d 49{
ad7ad57c 50 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d 51
ad7ad57c 52 p->dev = dev;
6a32fd4d
DM
53 p->prot = prot;
54 p->entry = entry;
55 p->npages = 0;
56}
57
58/* Interrupts must be disabled. */
ad7ad57c 59static long iommu_batch_flush(struct iommu_batch *p)
6a32fd4d 60{
ad7ad57c 61 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
a2fb23af 62 unsigned long devhandle = pbm->devhandle;
6a32fd4d
DM
63 unsigned long prot = p->prot;
64 unsigned long entry = p->entry;
65 u64 *pglist = p->pglist;
66 unsigned long npages = p->npages;
67
d82965c1 68 while (npages != 0) {
6a32fd4d
DM
69 long num;
70
71 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist));
73 if (unlikely(num < 0)) {
74 if (printk_ratelimit())
ad7ad57c 75 printk("iommu_batch_flush: IOMMU map of "
90181136 76 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
6a32fd4d
DM
77 "status %ld\n",
78 devhandle, HV_PCI_TSBID(0, entry),
79 npages, prot, __pa(pglist), num);
80 return -1;
81 }
82
83 entry += num;
84 npages -= num;
85 pglist += num;
d82965c1 86 }
6a32fd4d
DM
87
88 p->entry = entry;
89 p->npages = 0;
90
91 return 0;
92}
93
13fa14e1
DM
94static inline void iommu_batch_new_entry(unsigned long entry)
95{
96 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98 if (p->entry + p->npages == entry)
99 return;
100 if (p->entry != ~0UL)
101 iommu_batch_flush(p);
102 p->entry = entry;
103}
104
6a32fd4d 105/* Interrupts must be disabled. */
ad7ad57c 106static inline long iommu_batch_add(u64 phys_page)
6a32fd4d 107{
ad7ad57c 108 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d
DM
109
110 BUG_ON(p->npages >= PGLIST_NENTS);
111
112 p->pglist[p->npages++] = phys_page;
113 if (p->npages == PGLIST_NENTS)
ad7ad57c 114 return iommu_batch_flush(p);
6a32fd4d
DM
115
116 return 0;
117}
118
119/* Interrupts must be disabled. */
ad7ad57c 120static inline long iommu_batch_end(void)
6a32fd4d 121{
ad7ad57c 122 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
6a32fd4d
DM
123
124 BUG_ON(p->npages >= PGLIST_NENTS);
125
ad7ad57c 126 return iommu_batch_flush(p);
6a32fd4d 127}
18397944 128
ad7ad57c
DM
129static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130 dma_addr_t *dma_addrp, gfp_t gfp)
8f6a93a1 131{
7c8f486a 132 unsigned long flags, order, first_page, npages, n;
c1b1a5f1
DM
133 struct iommu *iommu;
134 struct page *page;
18397944
DM
135 void *ret;
136 long entry;
c1b1a5f1 137 int nid;
18397944
DM
138
139 size = IO_PAGE_ALIGN(size);
140 order = get_order(size);
6a32fd4d 141 if (unlikely(order >= MAX_ORDER))
18397944
DM
142 return NULL;
143
144 npages = size >> IO_PAGE_SHIFT;
18397944 145
c1b1a5f1
DM
146 nid = dev->archdata.numa_node;
147 page = alloc_pages_node(nid, gfp, order);
148 if (unlikely(!page))
18397944 149 return NULL;
e7a0453e 150
c1b1a5f1 151 first_page = (unsigned long) page_address(page);
18397944
DM
152 memset((char *)first_page, 0, PAGE_SIZE << order);
153
ad7ad57c 154 iommu = dev->archdata.iommu;
18397944
DM
155
156 spin_lock_irqsave(&iommu->lock, flags);
d284142c 157 entry = iommu_range_alloc(dev, iommu, npages, NULL);
18397944
DM
158 spin_unlock_irqrestore(&iommu->lock, flags);
159
d284142c
DM
160 if (unlikely(entry == DMA_ERROR_CODE))
161 goto range_alloc_fail;
18397944
DM
162
163 *dma_addrp = (iommu->page_table_map_base +
164 (entry << IO_PAGE_SHIFT));
165 ret = (void *) first_page;
166 first_page = __pa(first_page);
167
6a32fd4d 168 local_irq_save(flags);
18397944 169
ad7ad57c
DM
170 iommu_batch_start(dev,
171 (HV_PCI_MAP_ATTR_READ |
172 HV_PCI_MAP_ATTR_WRITE),
173 entry);
18397944 174
6a32fd4d 175 for (n = 0; n < npages; n++) {
ad7ad57c 176 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
6a32fd4d
DM
177 if (unlikely(err < 0L))
178 goto iommu_map_fail;
179 }
18397944 180
ad7ad57c 181 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 182 goto iommu_map_fail;
18397944 183
6a32fd4d 184 local_irq_restore(flags);
18397944
DM
185
186 return ret;
6a32fd4d
DM
187
188iommu_map_fail:
189 /* Interrupts are disabled. */
190 spin_lock(&iommu->lock);
d284142c 191 iommu_range_free(iommu, *dma_addrp, npages);
6a32fd4d
DM
192 spin_unlock_irqrestore(&iommu->lock, flags);
193
d284142c 194range_alloc_fail:
6a32fd4d
DM
195 free_pages(first_page, order);
196 return NULL;
8f6a93a1
DM
197}
198
ad7ad57c
DM
199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200 dma_addr_t dvma)
8f6a93a1 201{
a2fb23af 202 struct pci_pbm_info *pbm;
16ce82d8 203 struct iommu *iommu;
7c8f486a
DM
204 unsigned long flags, order, npages, entry;
205 u32 devhandle;
18397944
DM
206
207 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c
DM
208 iommu = dev->archdata.iommu;
209 pbm = dev->archdata.host_controller;
a2fb23af 210 devhandle = pbm->devhandle;
18397944
DM
211 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213 spin_lock_irqsave(&iommu->lock, flags);
214
d284142c 215 iommu_range_free(iommu, dvma, npages);
18397944
DM
216
217 do {
218 unsigned long num;
219
220 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221 npages);
222 entry += num;
223 npages -= num;
224 } while (npages != 0);
225
226 spin_unlock_irqrestore(&iommu->lock, flags);
227
228 order = get_order(size);
229 if (order < 10)
230 free_pages((unsigned long)cpu, order);
8f6a93a1
DM
231}
232
797a7568
FT
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz,
bc0a14f1
FT
235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
8f6a93a1 237{
16ce82d8 238 struct iommu *iommu;
18397944 239 unsigned long flags, npages, oaddr;
7c8f486a 240 unsigned long i, base_paddr;
6a32fd4d 241 u32 bus_addr, ret;
18397944
DM
242 unsigned long prot;
243 long entry;
18397944 244
ad7ad57c 245 iommu = dev->archdata.iommu;
18397944 246
ad7ad57c 247 if (unlikely(direction == DMA_NONE))
18397944
DM
248 goto bad;
249
797a7568 250 oaddr = (unsigned long)(page_address(page) + offset);
18397944
DM
251 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
252 npages >>= IO_PAGE_SHIFT;
18397944
DM
253
254 spin_lock_irqsave(&iommu->lock, flags);
d284142c 255 entry = iommu_range_alloc(dev, iommu, npages, NULL);
18397944
DM
256 spin_unlock_irqrestore(&iommu->lock, flags);
257
d284142c 258 if (unlikely(entry == DMA_ERROR_CODE))
18397944
DM
259 goto bad;
260
261 bus_addr = (iommu->page_table_map_base +
262 (entry << IO_PAGE_SHIFT));
263 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
264 base_paddr = __pa(oaddr & IO_PAGE_MASK);
265 prot = HV_PCI_MAP_ATTR_READ;
ad7ad57c 266 if (direction != DMA_TO_DEVICE)
18397944
DM
267 prot |= HV_PCI_MAP_ATTR_WRITE;
268
6a32fd4d 269 local_irq_save(flags);
18397944 270
ad7ad57c 271 iommu_batch_start(dev, prot, entry);
18397944 272
6a32fd4d 273 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
ad7ad57c 274 long err = iommu_batch_add(base_paddr);
6a32fd4d
DM
275 if (unlikely(err < 0L))
276 goto iommu_map_fail;
277 }
ad7ad57c 278 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 279 goto iommu_map_fail;
18397944 280
6a32fd4d 281 local_irq_restore(flags);
18397944
DM
282
283 return ret;
284
285bad:
286 if (printk_ratelimit())
287 WARN_ON(1);
ad7ad57c 288 return DMA_ERROR_CODE;
6a32fd4d
DM
289
290iommu_map_fail:
291 /* Interrupts are disabled. */
292 spin_lock(&iommu->lock);
d284142c 293 iommu_range_free(iommu, bus_addr, npages);
6a32fd4d
DM
294 spin_unlock_irqrestore(&iommu->lock, flags);
295
ad7ad57c 296 return DMA_ERROR_CODE;
8f6a93a1
DM
297}
298
797a7568 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
bc0a14f1
FT
300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
8f6a93a1 302{
a2fb23af 303 struct pci_pbm_info *pbm;
16ce82d8 304 struct iommu *iommu;
7c8f486a 305 unsigned long flags, npages;
18397944 306 long entry;
7c8f486a 307 u32 devhandle;
18397944 308
ad7ad57c 309 if (unlikely(direction == DMA_NONE)) {
18397944
DM
310 if (printk_ratelimit())
311 WARN_ON(1);
312 return;
313 }
314
ad7ad57c
DM
315 iommu = dev->archdata.iommu;
316 pbm = dev->archdata.host_controller;
a2fb23af 317 devhandle = pbm->devhandle;
18397944
DM
318
319 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
320 npages >>= IO_PAGE_SHIFT;
321 bus_addr &= IO_PAGE_MASK;
322
323 spin_lock_irqsave(&iommu->lock, flags);
324
d284142c 325 iommu_range_free(iommu, bus_addr, npages);
18397944 326
d284142c 327 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
18397944
DM
328 do {
329 unsigned long num;
330
331 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
332 npages);
333 entry += num;
334 npages -= num;
335 } while (npages != 0);
336
337 spin_unlock_irqrestore(&iommu->lock, flags);
338}
339
ad7ad57c 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1
FT
341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
8f6a93a1 343{
13fa14e1
DM
344 struct scatterlist *s, *outs, *segstart;
345 unsigned long flags, handle, prot;
346 dma_addr_t dma_next = 0, dma_addr;
347 unsigned int max_seg_size;
f0880257 348 unsigned long seg_boundary_size;
13fa14e1 349 int outcount, incount, i;
16ce82d8 350 struct iommu *iommu;
f0880257 351 unsigned long base_shift;
13fa14e1
DM
352 long err;
353
354 BUG_ON(direction == DMA_NONE);
18397944 355
ad7ad57c 356 iommu = dev->archdata.iommu;
13fa14e1
DM
357 if (nelems == 0 || !iommu)
358 return 0;
18397944 359
13fa14e1
DM
360 prot = HV_PCI_MAP_ATTR_READ;
361 if (direction != DMA_TO_DEVICE)
362 prot |= HV_PCI_MAP_ATTR_WRITE;
18397944 363
13fa14e1
DM
364 outs = s = segstart = &sglist[0];
365 outcount = 1;
366 incount = nelems;
367 handle = 0;
18397944 368
13fa14e1
DM
369 /* Init first segment length for backout at failure */
370 outs->dma_length = 0;
18397944 371
13fa14e1 372 spin_lock_irqsave(&iommu->lock, flags);
18397944 373
13fa14e1 374 iommu_batch_start(dev, prot, ~0UL);
18397944 375
13fa14e1 376 max_seg_size = dma_get_max_seg_size(dev);
f0880257
FT
377 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
378 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
379 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
13fa14e1 380 for_each_sg(sglist, s, nelems, i) {
f0880257 381 unsigned long paddr, npages, entry, out_entry = 0, slen;
38192d52 382
13fa14e1
DM
383 slen = s->length;
384 /* Sanity check */
385 if (slen == 0) {
386 dma_next = 0;
387 continue;
388 }
389 /* Allocate iommu entries for that segment */
390 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0fcff28f 391 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
13fa14e1 392 entry = iommu_range_alloc(dev, iommu, npages, &handle);
38192d52 393
13fa14e1
DM
394 /* Handle failure */
395 if (unlikely(entry == DMA_ERROR_CODE)) {
396 if (printk_ratelimit())
397 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
398 " npages %lx\n", iommu, paddr, npages);
399 goto iommu_map_failed;
400 }
38192d52 401
13fa14e1 402 iommu_batch_new_entry(entry);
38192d52 403
13fa14e1
DM
404 /* Convert entry to a dma_addr_t */
405 dma_addr = iommu->page_table_map_base +
406 (entry << IO_PAGE_SHIFT);
407 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 408
13fa14e1 409 /* Insert into HW table */
38192d52 410 paddr &= IO_PAGE_MASK;
13fa14e1 411 while (npages--) {
38192d52 412 err = iommu_batch_add(paddr);
13fa14e1 413 if (unlikely(err < 0L))
38192d52 414 goto iommu_map_failed;
13fa14e1
DM
415 paddr += IO_PAGE_SIZE;
416 }
417
418 /* If we are in an open segment, try merging */
419 if (segstart != s) {
420 /* We cannot merge if:
421 * - allocated dma_addr isn't contiguous to previous allocation
422 */
423 if ((dma_addr != dma_next) ||
f0880257
FT
424 (outs->dma_length + s->length > max_seg_size) ||
425 (is_span_boundary(out_entry, base_shift,
426 seg_boundary_size, outs, s))) {
13fa14e1
DM
427 /* Can't merge: create a new segment */
428 segstart = s;
429 outcount++;
430 outs = sg_next(outs);
431 } else {
432 outs->dma_length += s->length;
38192d52 433 }
13fa14e1 434 }
38192d52 435
13fa14e1
DM
436 if (segstart == s) {
437 /* This is a new segment, fill entries */
438 outs->dma_address = dma_addr;
439 outs->dma_length = slen;
f0880257 440 out_entry = entry;
38192d52 441 }
13fa14e1
DM
442
443 /* Calculate next page pointer for contiguous check */
444 dma_next = dma_addr + slen;
38192d52
DM
445 }
446
447 err = iommu_batch_end();
448
6a32fd4d
DM
449 if (unlikely(err < 0L))
450 goto iommu_map_failed;
18397944 451
13fa14e1 452 spin_unlock_irqrestore(&iommu->lock, flags);
18397944 453
13fa14e1
DM
454 if (outcount < incount) {
455 outs = sg_next(outs);
456 outs->dma_address = DMA_ERROR_CODE;
457 outs->dma_length = 0;
458 }
459
460 return outcount;
6a32fd4d
DM
461
462iommu_map_failed:
13fa14e1
DM
463 for_each_sg(sglist, s, nelems, i) {
464 if (s->dma_length != 0) {
465 unsigned long vaddr, npages;
466
467 vaddr = s->dma_address & IO_PAGE_MASK;
0fcff28f
JR
468 npages = iommu_num_pages(s->dma_address, s->dma_length,
469 IO_PAGE_SIZE);
13fa14e1
DM
470 iommu_range_free(iommu, vaddr, npages);
471 /* XXX demap? XXX */
472 s->dma_address = DMA_ERROR_CODE;
473 s->dma_length = 0;
474 }
475 if (s == outs)
476 break;
477 }
6a32fd4d
DM
478 spin_unlock_irqrestore(&iommu->lock, flags);
479
480 return 0;
8f6a93a1
DM
481}
482
ad7ad57c 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1
FT
484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
8f6a93a1 486{
a2fb23af 487 struct pci_pbm_info *pbm;
13fa14e1 488 struct scatterlist *sg;
16ce82d8 489 struct iommu *iommu;
13fa14e1
DM
490 unsigned long flags;
491 u32 devhandle;
18397944 492
13fa14e1 493 BUG_ON(direction == DMA_NONE);
18397944 494
ad7ad57c
DM
495 iommu = dev->archdata.iommu;
496 pbm = dev->archdata.host_controller;
a2fb23af 497 devhandle = pbm->devhandle;
18397944 498
18397944
DM
499 spin_lock_irqsave(&iommu->lock, flags);
500
13fa14e1
DM
501 sg = sglist;
502 while (nelems--) {
503 dma_addr_t dma_handle = sg->dma_address;
504 unsigned int len = sg->dma_length;
505 unsigned long npages, entry;
506
507 if (!len)
508 break;
0fcff28f 509 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
13fa14e1
DM
510 iommu_range_free(iommu, dma_handle, npages);
511
512 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
513 while (npages) {
514 unsigned long num;
515
516 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
517 npages);
518 entry += num;
519 npages -= num;
520 }
18397944 521
13fa14e1
DM
522 sg = sg_next(sg);
523 }
18397944
DM
524
525 spin_unlock_irqrestore(&iommu->lock, flags);
8f6a93a1
DM
526}
527
ad7ad57c
DM
528static void dma_4v_sync_single_for_cpu(struct device *dev,
529 dma_addr_t bus_addr, size_t sz,
530 enum dma_data_direction direction)
8f6a93a1 531{
18397944 532 /* Nothing to do... */
8f6a93a1
DM
533}
534
ad7ad57c
DM
535static void dma_4v_sync_sg_for_cpu(struct device *dev,
536 struct scatterlist *sglist, int nelems,
537 enum dma_data_direction direction)
8f6a93a1 538{
18397944 539 /* Nothing to do... */
8f6a93a1
DM
540}
541
02f7a189 542static struct dma_map_ops sun4v_dma_ops = {
ad7ad57c
DM
543 .alloc_coherent = dma_4v_alloc_coherent,
544 .free_coherent = dma_4v_free_coherent,
797a7568
FT
545 .map_page = dma_4v_map_page,
546 .unmap_page = dma_4v_unmap_page,
ad7ad57c
DM
547 .map_sg = dma_4v_map_sg,
548 .unmap_sg = dma_4v_unmap_sg,
549 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
550 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
8f6a93a1
DM
551};
552
9a2ed5cc
DM
553static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
554 struct device *parent)
bade5622 555{
e87dc350
DM
556 struct property *prop;
557 struct device_node *dp;
558
22fecbae 559 dp = pbm->op->node;
34768bc8
DM
560 prop = of_find_property(dp, "66mhz-capable", NULL);
561 pbm->is_66mhz_capable = (prop != NULL);
e822358a 562 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
c2609267
DM
563
564 /* XXX register error interrupt handlers XXX */
bade5622
DM
565}
566
9a2ed5cc
DM
567static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
568 struct iommu *iommu)
18397944 569{
9b3627f3 570 struct iommu_arena *arena = &iommu->arena;
e7a0453e 571 unsigned long i, cnt = 0;
7c8f486a 572 u32 devhandle;
18397944
DM
573
574 devhandle = pbm->devhandle;
575 for (i = 0; i < arena->limit; i++) {
576 unsigned long ret, io_attrs, ra;
577
578 ret = pci_sun4v_iommu_getmap(devhandle,
579 HV_PCI_TSBID(0, i),
580 &io_attrs, &ra);
e7a0453e 581 if (ret == HV_EOK) {
c2a5a46b
DM
582 if (page_in_phys_avail(ra)) {
583 pci_sun4v_iommu_demap(devhandle,
584 HV_PCI_TSBID(0, i), 1);
585 } else {
586 cnt++;
587 __set_bit(i, arena->map);
588 }
e7a0453e 589 }
18397944 590 }
e7a0453e
DM
591
592 return cnt;
18397944
DM
593}
594
9a2ed5cc 595static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
bade5622 596{
8aef7278 597 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
16ce82d8 598 struct iommu *iommu = pbm->iommu;
59db8102 599 unsigned long num_tsb_entries, sz, tsbsize;
8aef7278
DM
600 u32 dma_mask, dma_offset;
601 const u32 *vdma;
602
22fecbae 603 vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
8aef7278
DM
604 if (!vdma)
605 vdma = vdma_default;
18397944 606
59db8102 607 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
3822b509
DM
608 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
609 vdma[0], vdma[1]);
610 return -EINVAL;
18397944
DM
611 };
612
59db8102
DM
613 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
614 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
615 tsbsize = num_tsb_entries * sizeof(iopte_t);
18397944
DM
616
617 dma_offset = vdma[0];
618
619 /* Setup initial software IOMMU state. */
620 spin_lock_init(&iommu->lock);
621 iommu->ctx_lowest_free = 1;
622 iommu->page_table_map_base = dma_offset;
623 iommu->dma_addr_mask = dma_mask;
624
625 /* Allocate and initialize the free area map. */
59db8102 626 sz = (num_tsb_entries + 7) / 8;
18397944 627 sz = (sz + 7UL) & ~7UL;
982c2064 628 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
18397944 629 if (!iommu->arena.map) {
3822b509
DM
630 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
631 return -ENOMEM;
18397944 632 }
18397944
DM
633 iommu->arena.limit = num_tsb_entries;
634
e7a0453e 635 sz = probe_existing_entries(pbm, iommu);
c2a5a46b
DM
636 if (sz)
637 printk("%s: Imported %lu TSB entries from OBP\n",
638 pbm->name, sz);
3822b509
DM
639
640 return 0;
bade5622
DM
641}
642
35a17eb6
DM
643#ifdef CONFIG_PCI_MSI
644struct pci_sun4v_msiq_entry {
645 u64 version_type;
646#define MSIQ_VERSION_MASK 0xffffffff00000000UL
647#define MSIQ_VERSION_SHIFT 32
648#define MSIQ_TYPE_MASK 0x00000000000000ffUL
649#define MSIQ_TYPE_SHIFT 0
650#define MSIQ_TYPE_NONE 0x00
651#define MSIQ_TYPE_MSG 0x01
652#define MSIQ_TYPE_MSI32 0x02
653#define MSIQ_TYPE_MSI64 0x03
654#define MSIQ_TYPE_INTX 0x08
655#define MSIQ_TYPE_NONE2 0xff
656
657 u64 intx_sysino;
658 u64 reserved1;
659 u64 stick;
660 u64 req_id; /* bus/device/func */
661#define MSIQ_REQID_BUS_MASK 0xff00UL
662#define MSIQ_REQID_BUS_SHIFT 8
663#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
664#define MSIQ_REQID_DEVICE_SHIFT 3
665#define MSIQ_REQID_FUNC_MASK 0x0007UL
666#define MSIQ_REQID_FUNC_SHIFT 0
667
668 u64 msi_address;
669
e5dd42e4 670 /* The format of this value is message type dependent.
35a17eb6
DM
671 * For MSI bits 15:0 are the data from the MSI packet.
672 * For MSI-X bits 31:0 are the data from the MSI packet.
673 * For MSG, the message code and message routing code where:
674 * bits 39:32 is the bus/device/fn of the msg target-id
675 * bits 18:16 is the message routing code
676 * bits 7:0 is the message code
677 * For INTx the low order 2-bits are:
678 * 00 - INTA
679 * 01 - INTB
680 * 10 - INTC
681 * 11 - INTD
682 */
683 u64 msi_data;
684
685 u64 reserved2;
686};
687
759f89e0
DM
688static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
689 unsigned long *head)
35a17eb6 690{
759f89e0 691 unsigned long err, limit;
35a17eb6 692
759f89e0 693 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
35a17eb6 694 if (unlikely(err))
759f89e0 695 return -ENXIO;
35a17eb6 696
759f89e0
DM
697 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
698 if (unlikely(*head >= limit))
699 return -EFBIG;
700
701 return 0;
702}
703
704static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
705 unsigned long msiqid, unsigned long *head,
706 unsigned long *msi)
707{
708 struct pci_sun4v_msiq_entry *ep;
709 unsigned long err, type;
710
711 /* Note: void pointer arithmetic, 'head' is a byte offset */
712 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
713 (pbm->msiq_ent_count *
714 sizeof(struct pci_sun4v_msiq_entry))) +
715 *head);
716
717 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
718 return 0;
35a17eb6 719
759f89e0
DM
720 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
721 if (unlikely(type != MSIQ_TYPE_MSI32 &&
722 type != MSIQ_TYPE_MSI64))
723 return -EINVAL;
35a17eb6 724
759f89e0
DM
725 *msi = ep->msi_data;
726
727 err = pci_sun4v_msi_setstate(pbm->devhandle,
728 ep->msi_data /* msi_num */,
729 HV_MSISTATE_IDLE);
730 if (unlikely(err))
731 return -ENXIO;
35a17eb6 732
759f89e0
DM
733 /* Clear the entry. */
734 ep->version_type &= ~MSIQ_TYPE_MASK;
35a17eb6 735
759f89e0
DM
736 (*head) += sizeof(struct pci_sun4v_msiq_entry);
737 if (*head >=
738 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
739 *head = 0;
35a17eb6 740
759f89e0 741 return 1;
35a17eb6
DM
742}
743
759f89e0
DM
744static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
745 unsigned long head)
35a17eb6 746{
759f89e0 747 unsigned long err;
35a17eb6 748
759f89e0
DM
749 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
750 if (unlikely(err))
751 return -EINVAL;
35a17eb6 752
759f89e0
DM
753 return 0;
754}
35a17eb6 755
759f89e0
DM
756static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
757 unsigned long msi, int is_msi64)
758{
759 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
760 (is_msi64 ?
761 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
762 return -ENXIO;
763 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
764 return -ENXIO;
765 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
766 return -ENXIO;
35a17eb6
DM
767 return 0;
768}
769
759f89e0 770static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
35a17eb6 771{
759f89e0
DM
772 unsigned long err, msiqid;
773
774 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
775 if (err)
776 return -ENXIO;
777
778 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
779
780 return 0;
35a17eb6
DM
781}
782
759f89e0 783static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
35a17eb6
DM
784{
785 unsigned long q_size, alloc_size, pages, order;
786 int i;
787
788 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
789 alloc_size = (pbm->msiq_num * q_size);
790 order = get_order(alloc_size);
791 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
792 if (pages == 0UL) {
793 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
794 order);
795 return -ENOMEM;
796 }
797 memset((char *)pages, 0, PAGE_SIZE << order);
798 pbm->msi_queues = (void *) pages;
799
800 for (i = 0; i < pbm->msiq_num; i++) {
801 unsigned long err, base = __pa(pages + (i * q_size));
802 unsigned long ret1, ret2;
803
804 err = pci_sun4v_msiq_conf(pbm->devhandle,
805 pbm->msiq_first + i,
806 base, pbm->msiq_ent_count);
807 if (err) {
808 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
809 err);
810 goto h_error;
811 }
812
813 err = pci_sun4v_msiq_info(pbm->devhandle,
814 pbm->msiq_first + i,
815 &ret1, &ret2);
816 if (err) {
817 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
818 err);
819 goto h_error;
820 }
821 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
822 printk(KERN_ERR "MSI: Bogus qconf "
823 "expected[%lx:%x] got[%lx:%lx]\n",
824 base, pbm->msiq_ent_count,
825 ret1, ret2);
826 goto h_error;
827 }
828 }
829
830 return 0;
831
832h_error:
833 free_pages(pages, order);
834 return -EINVAL;
835}
836
759f89e0 837static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
35a17eb6 838{
759f89e0 839 unsigned long q_size, alloc_size, pages, order;
35a17eb6
DM
840 int i;
841
759f89e0
DM
842 for (i = 0; i < pbm->msiq_num; i++) {
843 unsigned long msiqid = pbm->msiq_first + i;
35a17eb6 844
759f89e0 845 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
35a17eb6 846 }
7fe3730d 847
759f89e0
DM
848 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
849 alloc_size = (pbm->msiq_num * q_size);
850 order = get_order(alloc_size);
35a17eb6 851
759f89e0 852 pages = (unsigned long) pbm->msi_queues;
35a17eb6 853
759f89e0 854 free_pages(pages, order);
35a17eb6 855
759f89e0 856 pbm->msi_queues = NULL;
35a17eb6
DM
857}
858
759f89e0
DM
859static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
860 unsigned long msiqid,
861 unsigned long devino)
35a17eb6 862{
759f89e0 863 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
35a17eb6 864
759f89e0
DM
865 if (!virt_irq)
866 return -ENOMEM;
35a17eb6 867
759f89e0
DM
868 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
869 return -EINVAL;
870 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
871 return -EINVAL;
35a17eb6 872
759f89e0 873 return virt_irq;
35a17eb6 874}
e9870c4c 875
759f89e0
DM
876static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
877 .get_head = pci_sun4v_get_head,
878 .dequeue_msi = pci_sun4v_dequeue_msi,
879 .set_head = pci_sun4v_set_head,
880 .msi_setup = pci_sun4v_msi_setup,
881 .msi_teardown = pci_sun4v_msi_teardown,
882 .msiq_alloc = pci_sun4v_msiq_alloc,
883 .msiq_free = pci_sun4v_msiq_free,
884 .msiq_build_irq = pci_sun4v_msiq_build_irq,
885};
886
e9870c4c
DM
887static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
888{
759f89e0 889 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
e9870c4c 890}
35a17eb6
DM
891#else /* CONFIG_PCI_MSI */
892static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
893{
894}
895#endif /* !(CONFIG_PCI_MSI) */
896
9a2ed5cc
DM
897static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
898 struct of_device *op, u32 devhandle)
bade5622 899{
e822358a 900 struct device_node *dp = op->node;
3822b509 901 int err;
bade5622 902
c1b1a5f1
DM
903 pbm->numa_node = of_node_to_nid(dp);
904
ca3dd88e
DM
905 pbm->pci_ops = &sun4v_pci_ops;
906 pbm->config_space_reg_bits = 12;
34768bc8 907
6c108f12
DM
908 pbm->index = pci_num_pbms++;
909
22fecbae 910 pbm->op = op;
bade5622 911
3833789b 912 pbm->devhandle = devhandle;
bade5622 913
e87dc350 914 pbm->name = dp->full_name;
bade5622 915
e87dc350 916 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
c1b1a5f1 917 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
bade5622 918
9fd8b647 919 pci_determine_mem_io_space(pbm);
bade5622 920
cfa0652c 921 pci_get_pbm_props(pbm);
3822b509
DM
922
923 err = pci_sun4v_iommu_init(pbm);
924 if (err)
925 return err;
926
35a17eb6 927 pci_sun4v_msi_init(pbm);
3822b509 928
e822358a 929 pci_sun4v_scan_bus(pbm, &op->dev);
3822b509 930
d3ae4b5b
DM
931 pbm->next = pci_pbm_root;
932 pci_pbm_root = pbm;
933
3822b509 934 return 0;
bade5622
DM
935}
936
33b07db9 937static int __devinit pci_sun4v_probe(struct of_device *op,
3822b509 938 const struct of_device_id *match)
8f6a93a1 939{
3822b509 940 const struct linux_prom64_registers *regs;
e01c0d6d 941 static int hvapi_negotiated = 0;
34768bc8 942 struct pci_pbm_info *pbm;
3822b509 943 struct device_node *dp;
16ce82d8 944 struct iommu *iommu;
7c8f486a 945 u32 devhandle;
d7472c38 946 int i, err;
3833789b 947
3822b509
DM
948 dp = op->node;
949
e01c0d6d 950 if (!hvapi_negotiated++) {
8d2aec51
DM
951 err = sun4v_hvapi_register(HV_GRP_PCI,
952 vpci_major,
953 &vpci_minor);
e01c0d6d
DM
954
955 if (err) {
3822b509
DM
956 printk(KERN_ERR PFX "Could not register hvapi, "
957 "err=%d\n", err);
958 return err;
e01c0d6d 959 }
3822b509 960 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
e01c0d6d 961 vpci_major, vpci_minor);
ad7ad57c
DM
962
963 dma_ops = &sun4v_dma_ops;
e01c0d6d
DM
964 }
965
3822b509 966 regs = of_get_property(dp, "reg", NULL);
d7472c38 967 err = -ENODEV;
3822b509
DM
968 if (!regs) {
969 printk(KERN_ERR PFX "Could not find config registers\n");
d7472c38 970 goto out_err;
75c6d141 971 }
e87dc350 972 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
3833789b 973
d7472c38 974 err = -ENOMEM;
d3ae4b5b
DM
975 if (!iommu_batch_initialized) {
976 for_each_possible_cpu(i) {
977 unsigned long page = get_zeroed_page(GFP_KERNEL);
7c8f486a 978
d3ae4b5b
DM
979 if (!page)
980 goto out_err;
7c8f486a 981
d3ae4b5b
DM
982 per_cpu(iommu_batch, i).pglist = (u64 *) page;
983 }
984 iommu_batch_initialized = 1;
bade5622 985 }
7c8f486a 986
d3ae4b5b
DM
987 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
988 if (!pbm) {
989 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
d7472c38 990 goto out_err;
3822b509 991 }
7c8f486a 992
d3ae4b5b 993 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
3822b509 994 if (!iommu) {
d3ae4b5b 995 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
d7472c38 996 goto out_free_controller;
3822b509 997 }
7c8f486a 998
d3ae4b5b 999 pbm->iommu = iommu;
bade5622 1000
d3ae4b5b
DM
1001 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1002 if (err)
1003 goto out_free_iommu;
7c8f486a 1004
d3ae4b5b 1005 dev_set_drvdata(&op->dev, pbm);
bade5622 1006
d3ae4b5b 1007 return 0;
7c8f486a 1008
d3ae4b5b
DM
1009out_free_iommu:
1010 kfree(pbm->iommu);
d7472c38
DM
1011
1012out_free_controller:
d3ae4b5b 1013 kfree(pbm);
d7472c38
DM
1014
1015out_err:
1016 return err;
8f6a93a1 1017}
3822b509 1018
fd098316 1019static struct of_device_id __initdata pci_sun4v_match[] = {
3822b509
DM
1020 {
1021 .name = "pci",
1022 .compatible = "SUNW,sun4v-pci",
1023 },
1024 {},
1025};
1026
1027static struct of_platform_driver pci_sun4v_driver = {
1028 .name = DRIVER_NAME,
1029 .match_table = pci_sun4v_match,
1030 .probe = pci_sun4v_probe,
1031};
1032
1033static int __init pci_sun4v_init(void)
1034{
1035 return of_register_driver(&pci_sun4v_driver, &of_bus_type);
1036}
1037
1038subsys_initcall(pci_sun4v_init);
This page took 0.367164 seconds and 5 git commands to generate.