Commit | Line | Data |
---|---|---|
828b35f6 JG |
1 | /* |
2 | * Copyright IBM Corp. 2012 | |
3 | * | |
4 | * Author(s): | |
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/export.h> | |
11 | #include <linux/iommu-helper.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/pci.h> | |
14 | #include <asm/pci_dma.h> | |
15 | ||
16 | static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO; | |
17 | ||
18 | static struct kmem_cache *dma_region_table_cache; | |
19 | static struct kmem_cache *dma_page_table_cache; | |
20 | ||
21 | static unsigned long *dma_alloc_cpu_table(void) | |
22 | { | |
23 | unsigned long *table, *entry; | |
24 | ||
25 | table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC); | |
26 | if (!table) | |
27 | return NULL; | |
28 | ||
29 | for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) | |
30 | *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; | |
31 | return table; | |
32 | } | |
33 | ||
34 | static void dma_free_cpu_table(void *table) | |
35 | { | |
36 | kmem_cache_free(dma_region_table_cache, table); | |
37 | } | |
38 | ||
39 | static unsigned long *dma_alloc_page_table(void) | |
40 | { | |
41 | unsigned long *table, *entry; | |
42 | ||
43 | table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC); | |
44 | if (!table) | |
45 | return NULL; | |
46 | ||
47 | for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) | |
48 | *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; | |
49 | return table; | |
50 | } | |
51 | ||
52 | static void dma_free_page_table(void *table) | |
53 | { | |
54 | kmem_cache_free(dma_page_table_cache, table); | |
55 | } | |
56 | ||
57 | static unsigned long *dma_get_seg_table_origin(unsigned long *entry) | |
58 | { | |
59 | unsigned long *sto; | |
60 | ||
61 | if (reg_entry_isvalid(*entry)) | |
62 | sto = get_rt_sto(*entry); | |
63 | else { | |
64 | sto = dma_alloc_cpu_table(); | |
65 | if (!sto) | |
66 | return NULL; | |
67 | ||
68 | set_rt_sto(entry, sto); | |
69 | validate_rt_entry(entry); | |
70 | entry_clr_protected(entry); | |
71 | } | |
72 | return sto; | |
73 | } | |
74 | ||
75 | static unsigned long *dma_get_page_table_origin(unsigned long *entry) | |
76 | { | |
77 | unsigned long *pto; | |
78 | ||
79 | if (reg_entry_isvalid(*entry)) | |
80 | pto = get_st_pto(*entry); | |
81 | else { | |
82 | pto = dma_alloc_page_table(); | |
83 | if (!pto) | |
84 | return NULL; | |
85 | set_st_pto(entry, pto); | |
86 | validate_st_entry(entry); | |
87 | entry_clr_protected(entry); | |
88 | } | |
89 | return pto; | |
90 | } | |
91 | ||
92 | static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) | |
93 | { | |
94 | unsigned long *sto, *pto; | |
95 | unsigned int rtx, sx, px; | |
96 | ||
97 | rtx = calc_rtx(dma_addr); | |
98 | sto = dma_get_seg_table_origin(&rto[rtx]); | |
99 | if (!sto) | |
100 | return NULL; | |
101 | ||
102 | sx = calc_sx(dma_addr); | |
103 | pto = dma_get_page_table_origin(&sto[sx]); | |
104 | if (!pto) | |
105 | return NULL; | |
106 | ||
107 | px = calc_px(dma_addr); | |
108 | return &pto[px]; | |
109 | } | |
110 | ||
111 | static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr, | |
112 | dma_addr_t dma_addr, int flags) | |
113 | { | |
114 | unsigned long *entry; | |
115 | ||
116 | entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); | |
117 | if (!entry) { | |
118 | WARN_ON_ONCE(1); | |
119 | return; | |
120 | } | |
121 | ||
122 | if (flags & ZPCI_PTE_INVALID) { | |
123 | invalidate_pt_entry(entry); | |
124 | return; | |
125 | } else { | |
126 | set_pt_pfaa(entry, page_addr); | |
127 | validate_pt_entry(entry); | |
128 | } | |
129 | ||
130 | if (flags & ZPCI_TABLE_PROTECTED) | |
131 | entry_set_protected(entry); | |
132 | else | |
133 | entry_clr_protected(entry); | |
134 | } | |
135 | ||
136 | static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |
137 | dma_addr_t dma_addr, size_t size, int flags) | |
138 | { | |
139 | unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
140 | u8 *page_addr = (u8 *) (pa & PAGE_MASK); | |
141 | dma_addr_t start_dma_addr = dma_addr; | |
142 | unsigned long irq_flags; | |
143 | int i, rc = 0; | |
144 | ||
145 | if (!nr_pages) | |
146 | return -EINVAL; | |
147 | ||
148 | spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); | |
149 | if (!zdev->dma_table) { | |
150 | dev_err(&zdev->pdev->dev, "Missing DMA table\n"); | |
151 | goto no_refresh; | |
152 | } | |
153 | ||
154 | for (i = 0; i < nr_pages; i++) { | |
155 | dma_update_cpu_trans(zdev, page_addr, dma_addr, flags); | |
156 | page_addr += PAGE_SIZE; | |
157 | dma_addr += PAGE_SIZE; | |
158 | } | |
159 | ||
160 | /* | |
161 | * rpcit is not required to establish new translations when previously | |
162 | * invalid translation-table entries are validated, however it is | |
163 | * required when altering previously valid entries. | |
164 | */ | |
165 | if (!zdev->tlb_refresh && | |
166 | ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) | |
167 | /* | |
168 | * TODO: also need to check that the old entry is indeed INVALID | |
169 | * and not only for one page but for the whole range... | |
170 | * -> now we WARN_ON in that case but with lazy unmap that | |
171 | * needs to be redone! | |
172 | */ | |
173 | goto no_refresh; | |
174 | rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr, | |
175 | nr_pages * PAGE_SIZE); | |
176 | ||
177 | no_refresh: | |
178 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); | |
179 | return rc; | |
180 | } | |
181 | ||
182 | static void dma_free_seg_table(unsigned long entry) | |
183 | { | |
184 | unsigned long *sto = get_rt_sto(entry); | |
185 | int sx; | |
186 | ||
187 | for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) | |
188 | if (reg_entry_isvalid(sto[sx])) | |
189 | dma_free_page_table(get_st_pto(sto[sx])); | |
190 | ||
191 | dma_free_cpu_table(sto); | |
192 | } | |
193 | ||
194 | static void dma_cleanup_tables(struct zpci_dev *zdev) | |
195 | { | |
196 | unsigned long *table = zdev->dma_table; | |
197 | int rtx; | |
198 | ||
199 | if (!zdev || !zdev->dma_table) | |
200 | return; | |
201 | ||
202 | for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) | |
203 | if (reg_entry_isvalid(table[rtx])) | |
204 | dma_free_seg_table(table[rtx]); | |
205 | ||
206 | dma_free_cpu_table(table); | |
207 | zdev->dma_table = NULL; | |
208 | } | |
209 | ||
210 | static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start, | |
211 | int size) | |
212 | { | |
213 | unsigned long boundary_size = 0x1000000; | |
214 | ||
215 | return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, | |
216 | start, size, 0, boundary_size, 0); | |
217 | } | |
218 | ||
219 | static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size) | |
220 | { | |
221 | unsigned long offset, flags; | |
222 | ||
223 | spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); | |
224 | offset = __dma_alloc_iommu(zdev, zdev->next_bit, size); | |
225 | if (offset == -1) | |
226 | offset = __dma_alloc_iommu(zdev, 0, size); | |
227 | ||
228 | if (offset != -1) { | |
229 | zdev->next_bit = offset + size; | |
230 | if (zdev->next_bit >= zdev->iommu_pages) | |
231 | zdev->next_bit = 0; | |
232 | } | |
233 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | |
234 | return offset; | |
235 | } | |
236 | ||
237 | static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size) | |
238 | { | |
239 | unsigned long flags; | |
240 | ||
241 | spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); | |
242 | if (!zdev->iommu_bitmap) | |
243 | goto out; | |
244 | bitmap_clear(zdev->iommu_bitmap, offset, size); | |
245 | if (offset >= zdev->next_bit) | |
246 | zdev->next_bit = offset + size; | |
247 | out: | |
248 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | |
249 | } | |
250 | ||
251 | int dma_set_mask(struct device *dev, u64 mask) | |
252 | { | |
253 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
254 | return -EIO; | |
255 | ||
256 | *dev->dma_mask = mask; | |
257 | return 0; | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(dma_set_mask); | |
260 | ||
261 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | |
262 | unsigned long offset, size_t size, | |
263 | enum dma_data_direction direction, | |
264 | struct dma_attrs *attrs) | |
265 | { | |
266 | struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); | |
267 | unsigned long nr_pages, iommu_page_index; | |
268 | unsigned long pa = page_to_phys(page) + offset; | |
269 | int flags = ZPCI_PTE_VALID; | |
270 | dma_addr_t dma_addr; | |
271 | ||
272 | WARN_ON_ONCE(offset > PAGE_SIZE); | |
273 | ||
274 | /* This rounds up number of pages based on size and offset */ | |
275 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); | |
276 | iommu_page_index = dma_alloc_iommu(zdev, nr_pages); | |
277 | if (iommu_page_index == -1) | |
278 | goto out_err; | |
279 | ||
280 | /* Use rounded up size */ | |
281 | size = nr_pages * PAGE_SIZE; | |
282 | ||
283 | dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; | |
284 | if (dma_addr + size > zdev->end_dma) { | |
285 | dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n", | |
286 | dma_addr, size, zdev->end_dma); | |
287 | goto out_free; | |
288 | } | |
289 | ||
290 | if (direction == DMA_NONE || direction == DMA_TO_DEVICE) | |
291 | flags |= ZPCI_TABLE_PROTECTED; | |
292 | ||
293 | if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) | |
294 | return dma_addr + offset; | |
295 | ||
296 | out_free: | |
297 | dma_free_iommu(zdev, iommu_page_index, nr_pages); | |
298 | out_err: | |
299 | dev_err(dev, "Failed to map addr: %lx\n", pa); | |
300 | return DMA_ERROR_CODE; | |
301 | } | |
302 | ||
303 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | |
304 | size_t size, enum dma_data_direction direction, | |
305 | struct dma_attrs *attrs) | |
306 | { | |
307 | struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); | |
308 | unsigned long iommu_page_index; | |
309 | int npages; | |
310 | ||
311 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | |
312 | dma_addr = dma_addr & PAGE_MASK; | |
313 | if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, | |
314 | ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) | |
315 | dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr); | |
316 | ||
317 | iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; | |
318 | dma_free_iommu(zdev, iommu_page_index, npages); | |
319 | } | |
320 | ||
321 | static void *s390_dma_alloc(struct device *dev, size_t size, | |
322 | dma_addr_t *dma_handle, gfp_t flag, | |
323 | struct dma_attrs *attrs) | |
324 | { | |
325 | struct page *page; | |
326 | unsigned long pa; | |
327 | dma_addr_t map; | |
328 | ||
329 | size = PAGE_ALIGN(size); | |
330 | page = alloc_pages(flag, get_order(size)); | |
331 | if (!page) | |
332 | return NULL; | |
333 | pa = page_to_phys(page); | |
334 | memset((void *) pa, 0, size); | |
335 | ||
336 | map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE, | |
337 | size, DMA_BIDIRECTIONAL, NULL); | |
338 | if (dma_mapping_error(dev, map)) { | |
339 | free_pages(pa, get_order(size)); | |
340 | return NULL; | |
341 | } | |
342 | ||
343 | if (dma_handle) | |
344 | *dma_handle = map; | |
345 | return (void *) pa; | |
346 | } | |
347 | ||
348 | static void s390_dma_free(struct device *dev, size_t size, | |
349 | void *pa, dma_addr_t dma_handle, | |
350 | struct dma_attrs *attrs) | |
351 | { | |
352 | s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), | |
353 | DMA_BIDIRECTIONAL, NULL); | |
354 | free_pages((unsigned long) pa, get_order(size)); | |
355 | } | |
356 | ||
357 | static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
358 | int nr_elements, enum dma_data_direction dir, | |
359 | struct dma_attrs *attrs) | |
360 | { | |
361 | int mapped_elements = 0; | |
362 | struct scatterlist *s; | |
363 | int i; | |
364 | ||
365 | for_each_sg(sg, s, nr_elements, i) { | |
366 | struct page *page = sg_page(s); | |
367 | s->dma_address = s390_dma_map_pages(dev, page, s->offset, | |
368 | s->length, dir, NULL); | |
369 | if (!dma_mapping_error(dev, s->dma_address)) { | |
370 | s->dma_length = s->length; | |
371 | mapped_elements++; | |
372 | } else | |
373 | goto unmap; | |
374 | } | |
375 | out: | |
376 | return mapped_elements; | |
377 | ||
378 | unmap: | |
379 | for_each_sg(sg, s, mapped_elements, i) { | |
380 | if (s->dma_address) | |
381 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, | |
382 | dir, NULL); | |
383 | s->dma_address = 0; | |
384 | s->dma_length = 0; | |
385 | } | |
386 | mapped_elements = 0; | |
387 | goto out; | |
388 | } | |
389 | ||
390 | static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
391 | int nr_elements, enum dma_data_direction dir, | |
392 | struct dma_attrs *attrs) | |
393 | { | |
394 | struct scatterlist *s; | |
395 | int i; | |
396 | ||
397 | for_each_sg(sg, s, nr_elements, i) { | |
398 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); | |
399 | s->dma_address = 0; | |
400 | s->dma_length = 0; | |
401 | } | |
402 | } | |
403 | ||
404 | int zpci_dma_init_device(struct zpci_dev *zdev) | |
405 | { | |
406 | unsigned int bitmap_order; | |
407 | int rc; | |
408 | ||
409 | spin_lock_init(&zdev->iommu_bitmap_lock); | |
410 | spin_lock_init(&zdev->dma_table_lock); | |
411 | ||
412 | zdev->dma_table = dma_alloc_cpu_table(); | |
413 | if (!zdev->dma_table) { | |
414 | rc = -ENOMEM; | |
415 | goto out_clean; | |
416 | } | |
417 | ||
418 | zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; | |
419 | zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; | |
420 | bitmap_order = get_order(zdev->iommu_pages / 8); | |
421 | pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n", | |
422 | zdev->iommu_size, zdev->iommu_pages, bitmap_order); | |
423 | ||
424 | zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
425 | bitmap_order); | |
426 | if (!zdev->iommu_bitmap) { | |
427 | rc = -ENOMEM; | |
428 | goto out_reg; | |
429 | } | |
430 | ||
431 | rc = zpci_register_ioat(zdev, | |
432 | 0, | |
433 | zdev->start_dma + PAGE_OFFSET, | |
434 | zdev->start_dma + zdev->iommu_size - 1, | |
435 | (u64) zdev->dma_table); | |
436 | if (rc) | |
437 | goto out_reg; | |
438 | return 0; | |
439 | ||
440 | out_reg: | |
441 | dma_free_cpu_table(zdev->dma_table); | |
442 | out_clean: | |
443 | return rc; | |
444 | } | |
445 | ||
446 | void zpci_dma_exit_device(struct zpci_dev *zdev) | |
447 | { | |
448 | zpci_unregister_ioat(zdev, 0); | |
449 | dma_cleanup_tables(zdev); | |
450 | free_pages((unsigned long) zdev->iommu_bitmap, | |
451 | get_order(zdev->iommu_pages / 8)); | |
452 | zdev->iommu_bitmap = NULL; | |
453 | zdev->next_bit = 0; | |
454 | } | |
455 | ||
456 | static int __init dma_alloc_cpu_table_caches(void) | |
457 | { | |
458 | dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", | |
459 | ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN, | |
460 | 0, NULL); | |
461 | if (!dma_region_table_cache) | |
462 | return -ENOMEM; | |
463 | ||
464 | dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", | |
465 | ZPCI_PT_SIZE, ZPCI_PT_ALIGN, | |
466 | 0, NULL); | |
467 | if (!dma_page_table_cache) { | |
468 | kmem_cache_destroy(dma_region_table_cache); | |
469 | return -ENOMEM; | |
470 | } | |
471 | return 0; | |
472 | } | |
473 | ||
474 | int __init zpci_dma_init(void) | |
475 | { | |
476 | return dma_alloc_cpu_table_caches(); | |
477 | } | |
478 | ||
479 | void zpci_dma_exit(void) | |
480 | { | |
481 | kmem_cache_destroy(dma_page_table_cache); | |
482 | kmem_cache_destroy(dma_region_table_cache); | |
483 | } | |
484 | ||
485 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
486 | ||
487 | static int __init dma_debug_do_init(void) | |
488 | { | |
489 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
490 | return 0; | |
491 | } | |
492 | fs_initcall(dma_debug_do_init); | |
493 | ||
494 | struct dma_map_ops s390_dma_ops = { | |
495 | .alloc = s390_dma_alloc, | |
496 | .free = s390_dma_free, | |
497 | .map_sg = s390_dma_map_sg, | |
498 | .unmap_sg = s390_dma_unmap_sg, | |
499 | .map_page = s390_dma_map_pages, | |
500 | .unmap_page = s390_dma_unmap_pages, | |
501 | /* if we support direct DMA this must be conditional */ | |
502 | .is_phys = 0, | |
503 | /* dma_supported is unconditionally true without a callback */ | |
504 | }; | |
505 | EXPORT_SYMBOL_GPL(s390_dma_ops); |