dma-mapping: use unsigned long for dma_attrs
[deliverable/linux.git] / arch / sparc / kernel / pci_sun4v.c
CommitLineData
8f6a93a1
DM
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
d284142c 3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
8f6a93a1
DM
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
18397944 12#include <linux/percpu.h>
35a17eb6
DM
13#include <linux/irq.h>
14#include <linux/msi.h>
7b64db60 15#include <linux/export.h>
59db8102 16#include <linux/log2.h>
3822b509 17#include <linux/of_device.h>
bb620c3d 18#include <linux/iommu-common.h>
8f6a93a1 19
8f6a93a1
DM
20#include <asm/iommu.h>
21#include <asm/irq.h>
8f6a93a1 22#include <asm/hypervisor.h>
e87dc350 23#include <asm/prom.h>
8f6a93a1
DM
24
25#include "pci_impl.h"
26#include "iommu_common.h"
27
bade5622
DM
28#include "pci_sun4v.h"
29
3822b509
DM
30#define DRIVER_NAME "pci_sun4v"
31#define PFX DRIVER_NAME ": "
32
e01c0d6d
DM
33static unsigned long vpci_major = 1;
34static unsigned long vpci_minor = 1;
35
7c8f486a 36#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
18397944 37
16ce82d8 38struct iommu_batch {
ad7ad57c 39 struct device *dev; /* Device mapping is for. */
6a32fd4d
DM
40 unsigned long prot; /* IOMMU page protections */
41 unsigned long entry; /* Index into IOTSB. */
42 u64 *pglist; /* List of physical pages */
43 unsigned long npages; /* Number of pages in list. */
18397944
DM
44};
45
ad7ad57c 46static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
d3ae4b5b 47static int iommu_batch_initialized;
6a32fd4d
DM
48
49/* Interrupts must be disabled. */
ad7ad57c 50static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
6a32fd4d 51{
494fc421 52 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
6a32fd4d 53
ad7ad57c 54 p->dev = dev;
6a32fd4d
DM
55 p->prot = prot;
56 p->entry = entry;
57 p->npages = 0;
58}
59
60/* Interrupts must be disabled. */
ad7ad57c 61static long iommu_batch_flush(struct iommu_batch *p)
6a32fd4d 62{
ad7ad57c 63 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
a2fb23af 64 unsigned long devhandle = pbm->devhandle;
6a32fd4d
DM
65 unsigned long prot = p->prot;
66 unsigned long entry = p->entry;
67 u64 *pglist = p->pglist;
68 unsigned long npages = p->npages;
69
d82965c1 70 while (npages != 0) {
6a32fd4d
DM
71 long num;
72
73 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
74 npages, prot, __pa(pglist));
75 if (unlikely(num < 0)) {
76 if (printk_ratelimit())
ad7ad57c 77 printk("iommu_batch_flush: IOMMU map of "
90181136 78 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
6a32fd4d
DM
79 "status %ld\n",
80 devhandle, HV_PCI_TSBID(0, entry),
81 npages, prot, __pa(pglist), num);
82 return -1;
83 }
84
85 entry += num;
86 npages -= num;
87 pglist += num;
d82965c1 88 }
6a32fd4d
DM
89
90 p->entry = entry;
91 p->npages = 0;
92
93 return 0;
94}
95
13fa14e1
DM
96static inline void iommu_batch_new_entry(unsigned long entry)
97{
494fc421 98 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
13fa14e1
DM
99
100 if (p->entry + p->npages == entry)
101 return;
102 if (p->entry != ~0UL)
103 iommu_batch_flush(p);
104 p->entry = entry;
105}
106
6a32fd4d 107/* Interrupts must be disabled. */
ad7ad57c 108static inline long iommu_batch_add(u64 phys_page)
6a32fd4d 109{
494fc421 110 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
6a32fd4d
DM
111
112 BUG_ON(p->npages >= PGLIST_NENTS);
113
114 p->pglist[p->npages++] = phys_page;
115 if (p->npages == PGLIST_NENTS)
ad7ad57c 116 return iommu_batch_flush(p);
6a32fd4d
DM
117
118 return 0;
119}
120
121/* Interrupts must be disabled. */
ad7ad57c 122static inline long iommu_batch_end(void)
6a32fd4d 123{
494fc421 124 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
6a32fd4d
DM
125
126 BUG_ON(p->npages >= PGLIST_NENTS);
127
ad7ad57c 128 return iommu_batch_flush(p);
6a32fd4d 129}
18397944 130
ad7ad57c 131static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
c416258a 132 dma_addr_t *dma_addrp, gfp_t gfp,
00085f1e 133 unsigned long attrs)
8f6a93a1 134{
7c8f486a 135 unsigned long flags, order, first_page, npages, n;
c1b1a5f1
DM
136 struct iommu *iommu;
137 struct page *page;
18397944
DM
138 void *ret;
139 long entry;
c1b1a5f1 140 int nid;
18397944
DM
141
142 size = IO_PAGE_ALIGN(size);
143 order = get_order(size);
6a32fd4d 144 if (unlikely(order >= MAX_ORDER))
18397944
DM
145 return NULL;
146
147 npages = size >> IO_PAGE_SHIFT;
18397944 148
c1b1a5f1
DM
149 nid = dev->archdata.numa_node;
150 page = alloc_pages_node(nid, gfp, order);
151 if (unlikely(!page))
18397944 152 return NULL;
e7a0453e 153
c1b1a5f1 154 first_page = (unsigned long) page_address(page);
18397944
DM
155 memset((char *)first_page, 0, PAGE_SIZE << order);
156
ad7ad57c 157 iommu = dev->archdata.iommu;
18397944 158
bb620c3d
SV
159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
160 (unsigned long)(-1), 0);
18397944 161
d618382b 162 if (unlikely(entry == IOMMU_ERROR_CODE))
d284142c 163 goto range_alloc_fail;
18397944 164
bb620c3d 165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
18397944
DM
166 ret = (void *) first_page;
167 first_page = __pa(first_page);
168
6a32fd4d 169 local_irq_save(flags);
18397944 170
ad7ad57c
DM
171 iommu_batch_start(dev,
172 (HV_PCI_MAP_ATTR_READ |
173 HV_PCI_MAP_ATTR_WRITE),
174 entry);
18397944 175
6a32fd4d 176 for (n = 0; n < npages; n++) {
ad7ad57c 177 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
6a32fd4d
DM
178 if (unlikely(err < 0L))
179 goto iommu_map_fail;
180 }
18397944 181
ad7ad57c 182 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 183 goto iommu_map_fail;
18397944 184
6a32fd4d 185 local_irq_restore(flags);
18397944
DM
186
187 return ret;
6a32fd4d
DM
188
189iommu_map_fail:
d618382b 190 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
6a32fd4d 191
d284142c 192range_alloc_fail:
6a32fd4d
DM
193 free_pages(first_page, order);
194 return NULL;
8f6a93a1
DM
195}
196
bb620c3d
SV
197static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
198 unsigned long npages)
199{
200 u32 devhandle = *(u32 *)demap_arg;
201 unsigned long num, flags;
202
203 local_irq_save(flags);
204 do {
205 num = pci_sun4v_iommu_demap(devhandle,
206 HV_PCI_TSBID(0, entry),
207 npages);
208
209 entry += num;
210 npages -= num;
211 } while (npages != 0);
212 local_irq_restore(flags);
213}
214
ad7ad57c 215static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
00085f1e 216 dma_addr_t dvma, unsigned long attrs)
8f6a93a1 217{
a2fb23af 218 struct pci_pbm_info *pbm;
16ce82d8 219 struct iommu *iommu;
bb620c3d 220 unsigned long order, npages, entry;
7c8f486a 221 u32 devhandle;
18397944
DM
222
223 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c
DM
224 iommu = dev->archdata.iommu;
225 pbm = dev->archdata.host_controller;
a2fb23af 226 devhandle = pbm->devhandle;
bb620c3d
SV
227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
228 dma_4v_iommu_demap(&devhandle, entry, npages);
d618382b 229 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
18397944
DM
230 order = get_order(size);
231 if (order < 10)
232 free_pages((unsigned long)cpu, order);
8f6a93a1
DM
233}
234
797a7568
FT
235static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
236 unsigned long offset, size_t sz,
bc0a14f1 237 enum dma_data_direction direction,
00085f1e 238 unsigned long attrs)
8f6a93a1 239{
16ce82d8 240 struct iommu *iommu;
18397944 241 unsigned long flags, npages, oaddr;
7c8f486a 242 unsigned long i, base_paddr;
6a32fd4d 243 u32 bus_addr, ret;
18397944
DM
244 unsigned long prot;
245 long entry;
18397944 246
ad7ad57c 247 iommu = dev->archdata.iommu;
18397944 248
ad7ad57c 249 if (unlikely(direction == DMA_NONE))
18397944
DM
250 goto bad;
251
797a7568 252 oaddr = (unsigned long)(page_address(page) + offset);
18397944
DM
253 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
254 npages >>= IO_PAGE_SHIFT;
18397944 255
bb620c3d
SV
256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
257 (unsigned long)(-1), 0);
18397944 258
d618382b 259 if (unlikely(entry == IOMMU_ERROR_CODE))
18397944
DM
260 goto bad;
261
bb620c3d 262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
18397944
DM
263 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
264 base_paddr = __pa(oaddr & IO_PAGE_MASK);
265 prot = HV_PCI_MAP_ATTR_READ;
ad7ad57c 266 if (direction != DMA_TO_DEVICE)
18397944
DM
267 prot |= HV_PCI_MAP_ATTR_WRITE;
268
6a32fd4d 269 local_irq_save(flags);
18397944 270
ad7ad57c 271 iommu_batch_start(dev, prot, entry);
18397944 272
6a32fd4d 273 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
ad7ad57c 274 long err = iommu_batch_add(base_paddr);
6a32fd4d
DM
275 if (unlikely(err < 0L))
276 goto iommu_map_fail;
277 }
ad7ad57c 278 if (unlikely(iommu_batch_end() < 0L))
6a32fd4d 279 goto iommu_map_fail;
18397944 280
6a32fd4d 281 local_irq_restore(flags);
18397944
DM
282
283 return ret;
284
285bad:
286 if (printk_ratelimit())
287 WARN_ON(1);
ad7ad57c 288 return DMA_ERROR_CODE;
6a32fd4d
DM
289
290iommu_map_fail:
d618382b 291 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
ad7ad57c 292 return DMA_ERROR_CODE;
8f6a93a1
DM
293}
294
797a7568 295static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
bc0a14f1 296 size_t sz, enum dma_data_direction direction,
00085f1e 297 unsigned long attrs)
8f6a93a1 298{
a2fb23af 299 struct pci_pbm_info *pbm;
16ce82d8 300 struct iommu *iommu;
bb620c3d 301 unsigned long npages;
18397944 302 long entry;
7c8f486a 303 u32 devhandle;
18397944 304
ad7ad57c 305 if (unlikely(direction == DMA_NONE)) {
18397944
DM
306 if (printk_ratelimit())
307 WARN_ON(1);
308 return;
309 }
310
ad7ad57c
DM
311 iommu = dev->archdata.iommu;
312 pbm = dev->archdata.host_controller;
a2fb23af 313 devhandle = pbm->devhandle;
18397944
DM
314
315 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
316 npages >>= IO_PAGE_SHIFT;
317 bus_addr &= IO_PAGE_MASK;
bb620c3d
SV
318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
319 dma_4v_iommu_demap(&devhandle, entry, npages);
d618382b 320 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
18397944
DM
321}
322
ad7ad57c 323static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1 324 int nelems, enum dma_data_direction direction,
00085f1e 325 unsigned long attrs)
8f6a93a1 326{
13fa14e1
DM
327 struct scatterlist *s, *outs, *segstart;
328 unsigned long flags, handle, prot;
329 dma_addr_t dma_next = 0, dma_addr;
330 unsigned int max_seg_size;
f0880257 331 unsigned long seg_boundary_size;
13fa14e1 332 int outcount, incount, i;
16ce82d8 333 struct iommu *iommu;
f0880257 334 unsigned long base_shift;
13fa14e1
DM
335 long err;
336
337 BUG_ON(direction == DMA_NONE);
18397944 338
ad7ad57c 339 iommu = dev->archdata.iommu;
13fa14e1
DM
340 if (nelems == 0 || !iommu)
341 return 0;
18397944 342
13fa14e1
DM
343 prot = HV_PCI_MAP_ATTR_READ;
344 if (direction != DMA_TO_DEVICE)
345 prot |= HV_PCI_MAP_ATTR_WRITE;
18397944 346
13fa14e1
DM
347 outs = s = segstart = &sglist[0];
348 outcount = 1;
349 incount = nelems;
350 handle = 0;
18397944 351
13fa14e1
DM
352 /* Init first segment length for backout at failure */
353 outs->dma_length = 0;
18397944 354
bb620c3d 355 local_irq_save(flags);
18397944 356
13fa14e1 357 iommu_batch_start(dev, prot, ~0UL);
18397944 358
13fa14e1 359 max_seg_size = dma_get_max_seg_size(dev);
f0880257
FT
360 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
361 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
bb620c3d 362 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
13fa14e1 363 for_each_sg(sglist, s, nelems, i) {
f0880257 364 unsigned long paddr, npages, entry, out_entry = 0, slen;
38192d52 365
13fa14e1
DM
366 slen = s->length;
367 /* Sanity check */
368 if (slen == 0) {
369 dma_next = 0;
370 continue;
371 }
372 /* Allocate iommu entries for that segment */
373 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0fcff28f 374 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
bb620c3d
SV
375 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
376 &handle, (unsigned long)(-1), 0);
38192d52 377
13fa14e1 378 /* Handle failure */
d618382b 379 if (unlikely(entry == IOMMU_ERROR_CODE)) {
13fa14e1
DM
380 if (printk_ratelimit())
381 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
382 " npages %lx\n", iommu, paddr, npages);
383 goto iommu_map_failed;
384 }
38192d52 385
13fa14e1 386 iommu_batch_new_entry(entry);
38192d52 387
13fa14e1 388 /* Convert entry to a dma_addr_t */
bb620c3d 389 dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
13fa14e1 390 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 391
13fa14e1 392 /* Insert into HW table */
38192d52 393 paddr &= IO_PAGE_MASK;
13fa14e1 394 while (npages--) {
38192d52 395 err = iommu_batch_add(paddr);
13fa14e1 396 if (unlikely(err < 0L))
38192d52 397 goto iommu_map_failed;
13fa14e1
DM
398 paddr += IO_PAGE_SIZE;
399 }
400
401 /* If we are in an open segment, try merging */
402 if (segstart != s) {
403 /* We cannot merge if:
404 * - allocated dma_addr isn't contiguous to previous allocation
405 */
406 if ((dma_addr != dma_next) ||
f0880257
FT
407 (outs->dma_length + s->length > max_seg_size) ||
408 (is_span_boundary(out_entry, base_shift,
409 seg_boundary_size, outs, s))) {
13fa14e1
DM
410 /* Can't merge: create a new segment */
411 segstart = s;
412 outcount++;
413 outs = sg_next(outs);
414 } else {
415 outs->dma_length += s->length;
38192d52 416 }
13fa14e1 417 }
38192d52 418
13fa14e1
DM
419 if (segstart == s) {
420 /* This is a new segment, fill entries */
421 outs->dma_address = dma_addr;
422 outs->dma_length = slen;
f0880257 423 out_entry = entry;
38192d52 424 }
13fa14e1
DM
425
426 /* Calculate next page pointer for contiguous check */
427 dma_next = dma_addr + slen;
38192d52
DM
428 }
429
430 err = iommu_batch_end();
431
6a32fd4d
DM
432 if (unlikely(err < 0L))
433 goto iommu_map_failed;
18397944 434
bb620c3d 435 local_irq_restore(flags);
18397944 436
13fa14e1
DM
437 if (outcount < incount) {
438 outs = sg_next(outs);
439 outs->dma_address = DMA_ERROR_CODE;
440 outs->dma_length = 0;
441 }
442
443 return outcount;
6a32fd4d
DM
444
445iommu_map_failed:
13fa14e1
DM
446 for_each_sg(sglist, s, nelems, i) {
447 if (s->dma_length != 0) {
448 unsigned long vaddr, npages;
449
450 vaddr = s->dma_address & IO_PAGE_MASK;
0fcff28f
JR
451 npages = iommu_num_pages(s->dma_address, s->dma_length,
452 IO_PAGE_SIZE);
bb620c3d 453 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
d618382b 454 IOMMU_ERROR_CODE);
13fa14e1
DM
455 /* XXX demap? XXX */
456 s->dma_address = DMA_ERROR_CODE;
457 s->dma_length = 0;
458 }
459 if (s == outs)
460 break;
461 }
bb620c3d 462 local_irq_restore(flags);
6a32fd4d
DM
463
464 return 0;
8f6a93a1
DM
465}
466
ad7ad57c 467static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1 468 int nelems, enum dma_data_direction direction,
00085f1e 469 unsigned long attrs)
8f6a93a1 470{
a2fb23af 471 struct pci_pbm_info *pbm;
13fa14e1 472 struct scatterlist *sg;
16ce82d8 473 struct iommu *iommu;
bb620c3d 474 unsigned long flags, entry;
13fa14e1 475 u32 devhandle;
18397944 476
13fa14e1 477 BUG_ON(direction == DMA_NONE);
18397944 478
ad7ad57c
DM
479 iommu = dev->archdata.iommu;
480 pbm = dev->archdata.host_controller;
a2fb23af 481 devhandle = pbm->devhandle;
18397944 482
bb620c3d 483 local_irq_save(flags);
18397944 484
13fa14e1
DM
485 sg = sglist;
486 while (nelems--) {
487 dma_addr_t dma_handle = sg->dma_address;
488 unsigned int len = sg->dma_length;
bb620c3d
SV
489 unsigned long npages;
490 struct iommu_map_table *tbl = &iommu->tbl;
491 unsigned long shift = IO_PAGE_SHIFT;
13fa14e1
DM
492
493 if (!len)
494 break;
0fcff28f 495 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
bb620c3d
SV
496 entry = ((dma_handle - tbl->table_map_base) >> shift);
497 dma_4v_iommu_demap(&devhandle, entry, npages);
498 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
d618382b 499 IOMMU_ERROR_CODE);
13fa14e1
DM
500 sg = sg_next(sg);
501 }
18397944 502
bb620c3d 503 local_irq_restore(flags);
8f6a93a1
DM
504}
505
02f7a189 506static struct dma_map_ops sun4v_dma_ops = {
c416258a
AP
507 .alloc = dma_4v_alloc_coherent,
508 .free = dma_4v_free_coherent,
797a7568
FT
509 .map_page = dma_4v_map_page,
510 .unmap_page = dma_4v_unmap_page,
ad7ad57c
DM
511 .map_sg = dma_4v_map_sg,
512 .unmap_sg = dma_4v_unmap_sg,
8f6a93a1
DM
513};
514
7c9503b8 515static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
bade5622 516{
e87dc350
DM
517 struct property *prop;
518 struct device_node *dp;
519
61c7a080 520 dp = pbm->op->dev.of_node;
34768bc8
DM
521 prop = of_find_property(dp, "66mhz-capable", NULL);
522 pbm->is_66mhz_capable = (prop != NULL);
e822358a 523 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
c2609267
DM
524
525 /* XXX register error interrupt handlers XXX */
bade5622
DM
526}
527
7c9503b8 528static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
bb620c3d 529 struct iommu_map_table *iommu)
18397944 530{
bb620c3d
SV
531 struct iommu_pool *pool;
532 unsigned long i, pool_nr, cnt = 0;
7c8f486a 533 u32 devhandle;
18397944
DM
534
535 devhandle = pbm->devhandle;
bb620c3d
SV
536 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
537 pool = &(iommu->pools[pool_nr]);
538 for (i = pool->start; i <= pool->end; i++) {
539 unsigned long ret, io_attrs, ra;
540
541 ret = pci_sun4v_iommu_getmap(devhandle,
542 HV_PCI_TSBID(0, i),
543 &io_attrs, &ra);
544 if (ret == HV_EOK) {
545 if (page_in_phys_avail(ra)) {
546 pci_sun4v_iommu_demap(devhandle,
547 HV_PCI_TSBID(0,
548 i), 1);
549 } else {
550 cnt++;
551 __set_bit(i, iommu->map);
552 }
c2a5a46b 553 }
e7a0453e 554 }
18397944 555 }
e7a0453e 556 return cnt;
18397944
DM
557}
558
7c9503b8 559static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
bade5622 560{
8aef7278 561 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
16ce82d8 562 struct iommu *iommu = pbm->iommu;
c6fee081 563 unsigned long num_tsb_entries, sz;
8aef7278
DM
564 u32 dma_mask, dma_offset;
565 const u32 *vdma;
566
61c7a080 567 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
8aef7278
DM
568 if (!vdma)
569 vdma = vdma_default;
18397944 570
59db8102 571 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
3822b509
DM
572 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
573 vdma[0], vdma[1]);
574 return -EINVAL;
20b739fe 575 }
18397944 576
59db8102
DM
577 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
578 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
18397944
DM
579
580 dma_offset = vdma[0];
581
582 /* Setup initial software IOMMU state. */
c12f048f 583 spin_lock_init(&iommu->lock);
18397944 584 iommu->ctx_lowest_free = 1;
bb620c3d 585 iommu->tbl.table_map_base = dma_offset;
18397944
DM
586 iommu->dma_addr_mask = dma_mask;
587
588 /* Allocate and initialize the free area map. */
59db8102 589 sz = (num_tsb_entries + 7) / 8;
18397944 590 sz = (sz + 7UL) & ~7UL;
bb620c3d
SV
591 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
592 if (!iommu->tbl.map) {
3822b509
DM
593 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
594 return -ENOMEM;
18397944 595 }
bb620c3d
SV
596 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
597 NULL, false /* no large_pool */,
598 0 /* default npools */,
599 false /* want span boundary checking */);
600 sz = probe_existing_entries(pbm, &iommu->tbl);
c2a5a46b
DM
601 if (sz)
602 printk("%s: Imported %lu TSB entries from OBP\n",
603 pbm->name, sz);
3822b509
DM
604
605 return 0;
bade5622
DM
606}
607
35a17eb6
DM
608#ifdef CONFIG_PCI_MSI
609struct pci_sun4v_msiq_entry {
610 u64 version_type;
611#define MSIQ_VERSION_MASK 0xffffffff00000000UL
612#define MSIQ_VERSION_SHIFT 32
613#define MSIQ_TYPE_MASK 0x00000000000000ffUL
614#define MSIQ_TYPE_SHIFT 0
615#define MSIQ_TYPE_NONE 0x00
616#define MSIQ_TYPE_MSG 0x01
617#define MSIQ_TYPE_MSI32 0x02
618#define MSIQ_TYPE_MSI64 0x03
619#define MSIQ_TYPE_INTX 0x08
620#define MSIQ_TYPE_NONE2 0xff
621
622 u64 intx_sysino;
623 u64 reserved1;
624 u64 stick;
625 u64 req_id; /* bus/device/func */
626#define MSIQ_REQID_BUS_MASK 0xff00UL
627#define MSIQ_REQID_BUS_SHIFT 8
628#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
629#define MSIQ_REQID_DEVICE_SHIFT 3
630#define MSIQ_REQID_FUNC_MASK 0x0007UL
631#define MSIQ_REQID_FUNC_SHIFT 0
632
633 u64 msi_address;
634
e5dd42e4 635 /* The format of this value is message type dependent.
35a17eb6
DM
636 * For MSI bits 15:0 are the data from the MSI packet.
637 * For MSI-X bits 31:0 are the data from the MSI packet.
638 * For MSG, the message code and message routing code where:
639 * bits 39:32 is the bus/device/fn of the msg target-id
640 * bits 18:16 is the message routing code
641 * bits 7:0 is the message code
642 * For INTx the low order 2-bits are:
643 * 00 - INTA
644 * 01 - INTB
645 * 10 - INTC
646 * 11 - INTD
647 */
648 u64 msi_data;
649
650 u64 reserved2;
651};
652
759f89e0
DM
653static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
654 unsigned long *head)
35a17eb6 655{
759f89e0 656 unsigned long err, limit;
35a17eb6 657
759f89e0 658 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
35a17eb6 659 if (unlikely(err))
759f89e0 660 return -ENXIO;
35a17eb6 661
759f89e0
DM
662 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
663 if (unlikely(*head >= limit))
664 return -EFBIG;
665
666 return 0;
667}
668
669static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
670 unsigned long msiqid, unsigned long *head,
671 unsigned long *msi)
672{
673 struct pci_sun4v_msiq_entry *ep;
674 unsigned long err, type;
675
676 /* Note: void pointer arithmetic, 'head' is a byte offset */
677 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
678 (pbm->msiq_ent_count *
679 sizeof(struct pci_sun4v_msiq_entry))) +
680 *head);
681
682 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
683 return 0;
35a17eb6 684
759f89e0
DM
685 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
686 if (unlikely(type != MSIQ_TYPE_MSI32 &&
687 type != MSIQ_TYPE_MSI64))
688 return -EINVAL;
35a17eb6 689
759f89e0
DM
690 *msi = ep->msi_data;
691
692 err = pci_sun4v_msi_setstate(pbm->devhandle,
693 ep->msi_data /* msi_num */,
694 HV_MSISTATE_IDLE);
695 if (unlikely(err))
696 return -ENXIO;
35a17eb6 697
759f89e0
DM
698 /* Clear the entry. */
699 ep->version_type &= ~MSIQ_TYPE_MASK;
35a17eb6 700
759f89e0
DM
701 (*head) += sizeof(struct pci_sun4v_msiq_entry);
702 if (*head >=
703 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
704 *head = 0;
35a17eb6 705
759f89e0 706 return 1;
35a17eb6
DM
707}
708
759f89e0
DM
709static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
710 unsigned long head)
35a17eb6 711{
759f89e0 712 unsigned long err;
35a17eb6 713
759f89e0
DM
714 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
715 if (unlikely(err))
716 return -EINVAL;
35a17eb6 717
759f89e0
DM
718 return 0;
719}
35a17eb6 720
759f89e0
DM
721static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
722 unsigned long msi, int is_msi64)
723{
724 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
725 (is_msi64 ?
726 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
727 return -ENXIO;
728 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
729 return -ENXIO;
730 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
731 return -ENXIO;
35a17eb6
DM
732 return 0;
733}
734
759f89e0 735static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
35a17eb6 736{
759f89e0
DM
737 unsigned long err, msiqid;
738
739 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
740 if (err)
741 return -ENXIO;
742
743 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
744
745 return 0;
35a17eb6
DM
746}
747
759f89e0 748static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
35a17eb6
DM
749{
750 unsigned long q_size, alloc_size, pages, order;
751 int i;
752
753 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
754 alloc_size = (pbm->msiq_num * q_size);
755 order = get_order(alloc_size);
756 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
757 if (pages == 0UL) {
758 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
759 order);
760 return -ENOMEM;
761 }
762 memset((char *)pages, 0, PAGE_SIZE << order);
763 pbm->msi_queues = (void *) pages;
764
765 for (i = 0; i < pbm->msiq_num; i++) {
766 unsigned long err, base = __pa(pages + (i * q_size));
767 unsigned long ret1, ret2;
768
769 err = pci_sun4v_msiq_conf(pbm->devhandle,
770 pbm->msiq_first + i,
771 base, pbm->msiq_ent_count);
772 if (err) {
773 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
774 err);
775 goto h_error;
776 }
777
778 err = pci_sun4v_msiq_info(pbm->devhandle,
779 pbm->msiq_first + i,
780 &ret1, &ret2);
781 if (err) {
782 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
783 err);
784 goto h_error;
785 }
786 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
787 printk(KERN_ERR "MSI: Bogus qconf "
788 "expected[%lx:%x] got[%lx:%lx]\n",
789 base, pbm->msiq_ent_count,
790 ret1, ret2);
791 goto h_error;
792 }
793 }
794
795 return 0;
796
797h_error:
798 free_pages(pages, order);
799 return -EINVAL;
800}
801
759f89e0 802static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
35a17eb6 803{
759f89e0 804 unsigned long q_size, alloc_size, pages, order;
35a17eb6
DM
805 int i;
806
759f89e0
DM
807 for (i = 0; i < pbm->msiq_num; i++) {
808 unsigned long msiqid = pbm->msiq_first + i;
35a17eb6 809
759f89e0 810 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
35a17eb6 811 }
7fe3730d 812
759f89e0
DM
813 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
814 alloc_size = (pbm->msiq_num * q_size);
815 order = get_order(alloc_size);
35a17eb6 816
759f89e0 817 pages = (unsigned long) pbm->msi_queues;
35a17eb6 818
759f89e0 819 free_pages(pages, order);
35a17eb6 820
759f89e0 821 pbm->msi_queues = NULL;
35a17eb6
DM
822}
823
759f89e0
DM
824static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
825 unsigned long msiqid,
826 unsigned long devino)
35a17eb6 827{
44ed3c0c 828 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
35a17eb6 829
44ed3c0c 830 if (!irq)
759f89e0 831 return -ENOMEM;
35a17eb6 832
759f89e0
DM
833 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
834 return -EINVAL;
7cc85833
DM
835 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
836 return -EINVAL;
35a17eb6 837
44ed3c0c 838 return irq;
35a17eb6 839}
e9870c4c 840
759f89e0
DM
841static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
842 .get_head = pci_sun4v_get_head,
843 .dequeue_msi = pci_sun4v_dequeue_msi,
844 .set_head = pci_sun4v_set_head,
845 .msi_setup = pci_sun4v_msi_setup,
846 .msi_teardown = pci_sun4v_msi_teardown,
847 .msiq_alloc = pci_sun4v_msiq_alloc,
848 .msiq_free = pci_sun4v_msiq_free,
849 .msiq_build_irq = pci_sun4v_msiq_build_irq,
850};
851
e9870c4c
DM
852static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
853{
759f89e0 854 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
e9870c4c 855}
35a17eb6
DM
856#else /* CONFIG_PCI_MSI */
857static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
858{
859}
860#endif /* !(CONFIG_PCI_MSI) */
861
7c9503b8
GKH
862static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
863 struct platform_device *op, u32 devhandle)
bade5622 864{
61c7a080 865 struct device_node *dp = op->dev.of_node;
3822b509 866 int err;
bade5622 867
c1b1a5f1
DM
868 pbm->numa_node = of_node_to_nid(dp);
869
ca3dd88e
DM
870 pbm->pci_ops = &sun4v_pci_ops;
871 pbm->config_space_reg_bits = 12;
34768bc8 872
6c108f12
DM
873 pbm->index = pci_num_pbms++;
874
22fecbae 875 pbm->op = op;
bade5622 876
3833789b 877 pbm->devhandle = devhandle;
bade5622 878
e87dc350 879 pbm->name = dp->full_name;
bade5622 880
e87dc350 881 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
c1b1a5f1 882 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
bade5622 883
9fd8b647 884 pci_determine_mem_io_space(pbm);
bade5622 885
cfa0652c 886 pci_get_pbm_props(pbm);
3822b509
DM
887
888 err = pci_sun4v_iommu_init(pbm);
889 if (err)
890 return err;
891
35a17eb6 892 pci_sun4v_msi_init(pbm);
3822b509 893
e822358a 894 pci_sun4v_scan_bus(pbm, &op->dev);
3822b509 895
d3ae4b5b
DM
896 pbm->next = pci_pbm_root;
897 pci_pbm_root = pbm;
898
3822b509 899 return 0;
bade5622
DM
900}
901
7c9503b8 902static int pci_sun4v_probe(struct platform_device *op)
8f6a93a1 903{
3822b509 904 const struct linux_prom64_registers *regs;
e01c0d6d 905 static int hvapi_negotiated = 0;
34768bc8 906 struct pci_pbm_info *pbm;
3822b509 907 struct device_node *dp;
16ce82d8 908 struct iommu *iommu;
7c8f486a 909 u32 devhandle;
d7472c38 910 int i, err;
3833789b 911
61c7a080 912 dp = op->dev.of_node;
3822b509 913
e01c0d6d 914 if (!hvapi_negotiated++) {
8d2aec51
DM
915 err = sun4v_hvapi_register(HV_GRP_PCI,
916 vpci_major,
917 &vpci_minor);
e01c0d6d
DM
918
919 if (err) {
3822b509
DM
920 printk(KERN_ERR PFX "Could not register hvapi, "
921 "err=%d\n", err);
922 return err;
e01c0d6d 923 }
3822b509 924 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
e01c0d6d 925 vpci_major, vpci_minor);
ad7ad57c
DM
926
927 dma_ops = &sun4v_dma_ops;
e01c0d6d
DM
928 }
929
3822b509 930 regs = of_get_property(dp, "reg", NULL);
d7472c38 931 err = -ENODEV;
3822b509
DM
932 if (!regs) {
933 printk(KERN_ERR PFX "Could not find config registers\n");
d7472c38 934 goto out_err;
75c6d141 935 }
e87dc350 936 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
3833789b 937
d7472c38 938 err = -ENOMEM;
d3ae4b5b
DM
939 if (!iommu_batch_initialized) {
940 for_each_possible_cpu(i) {
941 unsigned long page = get_zeroed_page(GFP_KERNEL);
7c8f486a 942
d3ae4b5b
DM
943 if (!page)
944 goto out_err;
7c8f486a 945
d3ae4b5b
DM
946 per_cpu(iommu_batch, i).pglist = (u64 *) page;
947 }
948 iommu_batch_initialized = 1;
bade5622 949 }
7c8f486a 950
d3ae4b5b
DM
951 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
952 if (!pbm) {
953 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
d7472c38 954 goto out_err;
3822b509 955 }
7c8f486a 956
d3ae4b5b 957 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
3822b509 958 if (!iommu) {
d3ae4b5b 959 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
d7472c38 960 goto out_free_controller;
3822b509 961 }
7c8f486a 962
d3ae4b5b 963 pbm->iommu = iommu;
bade5622 964
d3ae4b5b
DM
965 err = pci_sun4v_pbm_init(pbm, op, devhandle);
966 if (err)
967 goto out_free_iommu;
7c8f486a 968
d3ae4b5b 969 dev_set_drvdata(&op->dev, pbm);
bade5622 970
d3ae4b5b 971 return 0;
7c8f486a 972
d3ae4b5b
DM
973out_free_iommu:
974 kfree(pbm->iommu);
d7472c38
DM
975
976out_free_controller:
d3ae4b5b 977 kfree(pbm);
d7472c38
DM
978
979out_err:
980 return err;
8f6a93a1 981}
3822b509 982
3628aa06 983static const struct of_device_id pci_sun4v_match[] = {
3822b509
DM
984 {
985 .name = "pci",
986 .compatible = "SUNW,sun4v-pci",
987 },
988 {},
989};
990
4ebb24f7 991static struct platform_driver pci_sun4v_driver = {
4018294b
GL
992 .driver = {
993 .name = DRIVER_NAME,
4018294b
GL
994 .of_match_table = pci_sun4v_match,
995 },
3822b509
DM
996 .probe = pci_sun4v_probe,
997};
998
999static int __init pci_sun4v_init(void)
1000{
4ebb24f7 1001 return platform_driver_register(&pci_sun4v_driver);
3822b509
DM
1002}
1003
1004subsys_initcall(pci_sun4v_init);
This page took 0.693246 seconds and 5 git commands to generate.