drm/i915: Refactor execlists default context pinning
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
0e46ce2e 26#include <linux/seq_file.h>
5bab6f60 27#include <linux/stop_machine.h>
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/i915_drm.h>
76aaf220 30#include "i915_drv.h"
5dda8fa3 31#include "i915_vgpu.h"
76aaf220
DV
32#include "i915_trace.h"
33#include "intel_drv.h"
34
45f8f69a
TU
35/**
36 * DOC: Global GTT views
37 *
38 * Background and previous state
39 *
40 * Historically objects could exists (be bound) in global GTT space only as
41 * singular instances with a view representing all of the object's backing pages
42 * in a linear fashion. This view will be called a normal view.
43 *
44 * To support multiple views of the same object, where the number of mapped
45 * pages is not equal to the backing store, or where the layout of the pages
46 * is not linear, concept of a GGTT view was added.
47 *
48 * One example of an alternative view is a stereo display driven by a single
49 * image. In this case we would have a framebuffer looking like this
50 * (2x2 pages):
51 *
52 * 12
53 * 34
54 *
55 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
56 * rendering. In contrast, fed to the display engine would be an alternative
57 * view which could look something like this:
58 *
59 * 1212
60 * 3434
61 *
62 * In this example both the size and layout of pages in the alternative view is
63 * different from the normal view.
64 *
65 * Implementation and usage
66 *
67 * GGTT views are implemented using VMAs and are distinguished via enum
68 * i915_ggtt_view_type and struct i915_ggtt_view.
69 *
70 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
71 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
72 * renaming in large amounts of code. They take the struct i915_ggtt_view
73 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
74 *
75 * As a helper for callers which are only interested in the normal view,
76 * globally const i915_ggtt_view_normal singleton instance exists. All old core
77 * GEM API functions, the ones not taking the view parameter, are operating on,
78 * or with the normal GGTT view.
79 *
80 * Code wanting to add or use a new GGTT view needs to:
81 *
82 * 1. Add a new enum with a suitable name.
83 * 2. Extend the metadata in the i915_ggtt_view structure if required.
84 * 3. Add support to i915_get_vma_pages().
85 *
86 * New views are required to build a scatter-gather table from within the
87 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
88 * exists for the lifetime of an VMA.
89 *
90 * Core API is designed to have copy semantics which means that passed in
91 * struct i915_ggtt_view does not need to be persistent (left around after
92 * calling the core API functions).
93 *
94 */
95
ce7fda2e
CW
96static inline struct i915_ggtt *
97i915_vm_to_ggtt(struct i915_address_space *vm)
98{
99 GEM_BUG_ON(!i915_is_ggtt(vm));
100 return container_of(vm, struct i915_ggtt, base);
101}
102
70b9f6f8
DV
103static int
104i915_get_ggtt_vma_pages(struct i915_vma *vma);
105
b5e16987
VS
106const struct i915_ggtt_view i915_ggtt_view_normal = {
107 .type = I915_GGTT_VIEW_NORMAL,
108};
9abc4648 109const struct i915_ggtt_view i915_ggtt_view_rotated = {
b5e16987 110 .type = I915_GGTT_VIEW_ROTATED,
9abc4648 111};
fe14d5f4 112
cfa7c862
DV
113static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
114{
1893a71b
CW
115 bool has_aliasing_ppgtt;
116 bool has_full_ppgtt;
1f9a99e0 117 bool has_full_48bit_ppgtt;
1893a71b
CW
118
119 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
120 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
1f9a99e0 121 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
1893a71b 122
71ba2d64
YZ
123 if (intel_vgpu_active(dev))
124 has_full_ppgtt = false; /* emulation is too hard */
125
70ee45e1
DL
126 /*
127 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
128 * execlists, the sole mechanism available to submit work.
129 */
130 if (INTEL_INFO(dev)->gen < 9 &&
131 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
cfa7c862
DV
132 return 0;
133
134 if (enable_ppgtt == 1)
135 return 1;
136
1893a71b 137 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
138 return 2;
139
1f9a99e0
MT
140 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
141 return 3;
142
93a25a9e
DV
143#ifdef CONFIG_INTEL_IOMMU
144 /* Disable ppgtt on SNB if VT-d is on. */
145 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
146 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 147 return 0;
93a25a9e
DV
148 }
149#endif
150
62942ed7 151 /* Early VLV doesn't have this */
666a4537 152 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
62942ed7
JB
153 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
154 return 0;
155 }
156
2f82bbdf 157 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
1f9a99e0 158 return has_full_48bit_ppgtt ? 3 : 2;
2f82bbdf
MT
159 else
160 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
161}
162
70b9f6f8
DV
163static int ppgtt_bind_vma(struct i915_vma *vma,
164 enum i915_cache_level cache_level,
165 u32 unused)
47552659
DV
166{
167 u32 pte_flags = 0;
168
169 /* Currently applicable only to VLV */
170 if (vma->obj->gt_ro)
171 pte_flags |= PTE_READ_ONLY;
172
173 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
174 cache_level, pte_flags);
70b9f6f8
DV
175
176 return 0;
47552659
DV
177}
178
179static void ppgtt_unbind_vma(struct i915_vma *vma)
180{
181 vma->vm->clear_range(vma->vm,
182 vma->node.start,
183 vma->obj->base.size,
184 true);
185}
6f65e29a 186
2c642b07
DV
187static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
188 enum i915_cache_level level,
189 bool valid)
94ec8f61 190{
07749ef3 191 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
94ec8f61 192 pte |= addr;
63c42e56
BW
193
194 switch (level) {
195 case I915_CACHE_NONE:
fbe5d36e 196 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
197 break;
198 case I915_CACHE_WT:
199 pte |= PPAT_DISPLAY_ELLC_INDEX;
200 break;
201 default:
202 pte |= PPAT_CACHED_INDEX;
203 break;
204 }
205
94ec8f61
BW
206 return pte;
207}
208
fe36f55d
MK
209static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
210 const enum i915_cache_level level)
b1fe6673 211{
07749ef3 212 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
213 pde |= addr;
214 if (level != I915_CACHE_NONE)
215 pde |= PPAT_CACHED_PDE_INDEX;
216 else
217 pde |= PPAT_UNCACHED_INDEX;
218 return pde;
219}
220
762d9936
MT
221#define gen8_pdpe_encode gen8_pde_encode
222#define gen8_pml4e_encode gen8_pde_encode
223
07749ef3
MT
224static gen6_pte_t snb_pte_encode(dma_addr_t addr,
225 enum i915_cache_level level,
226 bool valid, u32 unused)
54d12527 227{
07749ef3 228 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
54d12527 229 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
230
231 switch (level) {
350ec881
CW
232 case I915_CACHE_L3_LLC:
233 case I915_CACHE_LLC:
234 pte |= GEN6_PTE_CACHE_LLC;
235 break;
236 case I915_CACHE_NONE:
237 pte |= GEN6_PTE_UNCACHED;
238 break;
239 default:
5f77eeb0 240 MISSING_CASE(level);
350ec881
CW
241 }
242
243 return pte;
244}
245
07749ef3
MT
246static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
247 enum i915_cache_level level,
248 bool valid, u32 unused)
350ec881 249{
07749ef3 250 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
350ec881
CW
251 pte |= GEN6_PTE_ADDR_ENCODE(addr);
252
253 switch (level) {
254 case I915_CACHE_L3_LLC:
255 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
256 break;
257 case I915_CACHE_LLC:
258 pte |= GEN6_PTE_CACHE_LLC;
259 break;
260 case I915_CACHE_NONE:
9119708c 261 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
262 break;
263 default:
5f77eeb0 264 MISSING_CASE(level);
e7210c3c
BW
265 }
266
54d12527
BW
267 return pte;
268}
269
07749ef3
MT
270static gen6_pte_t byt_pte_encode(dma_addr_t addr,
271 enum i915_cache_level level,
272 bool valid, u32 flags)
93c34e70 273{
07749ef3 274 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
93c34e70
KG
275 pte |= GEN6_PTE_ADDR_ENCODE(addr);
276
24f3a8cf
AG
277 if (!(flags & PTE_READ_ONLY))
278 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
279
280 if (level != I915_CACHE_NONE)
281 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
282
283 return pte;
284}
285
07749ef3
MT
286static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
287 enum i915_cache_level level,
288 bool valid, u32 unused)
9119708c 289{
07749ef3 290 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
0d8ff15e 291 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
292
293 if (level != I915_CACHE_NONE)
87a6b688 294 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
295
296 return pte;
297}
298
07749ef3
MT
299static gen6_pte_t iris_pte_encode(dma_addr_t addr,
300 enum i915_cache_level level,
301 bool valid, u32 unused)
4d15c145 302{
07749ef3 303 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
4d15c145
BW
304 pte |= HSW_PTE_ADDR_ENCODE(addr);
305
651d794f
CW
306 switch (level) {
307 case I915_CACHE_NONE:
308 break;
309 case I915_CACHE_WT:
c51e9701 310 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
311 break;
312 default:
c51e9701 313 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
314 break;
315 }
4d15c145
BW
316
317 return pte;
318}
319
c114f76a
MK
320static int __setup_page_dma(struct drm_device *dev,
321 struct i915_page_dma *p, gfp_t flags)
678d96fb
BW
322{
323 struct device *device = &dev->pdev->dev;
324
c114f76a 325 p->page = alloc_page(flags);
44159ddb
MK
326 if (!p->page)
327 return -ENOMEM;
678d96fb 328
44159ddb
MK
329 p->daddr = dma_map_page(device,
330 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
678d96fb 331
44159ddb
MK
332 if (dma_mapping_error(device, p->daddr)) {
333 __free_page(p->page);
334 return -EINVAL;
335 }
1266cdb1
MT
336
337 return 0;
678d96fb
BW
338}
339
c114f76a
MK
340static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
341{
342 return __setup_page_dma(dev, p, GFP_KERNEL);
343}
344
44159ddb 345static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
06fda602 346{
44159ddb 347 if (WARN_ON(!p->page))
06fda602 348 return;
678d96fb 349
44159ddb
MK
350 dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
351 __free_page(p->page);
352 memset(p, 0, sizeof(*p));
353}
354
d1c54acd 355static void *kmap_page_dma(struct i915_page_dma *p)
73eeea53 356{
d1c54acd
MK
357 return kmap_atomic(p->page);
358}
73eeea53 359
d1c54acd
MK
360/* We use the flushing unmap only with ppgtt structures:
361 * page directories, page tables and scratch pages.
362 */
363static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
364{
73eeea53
MK
365 /* There are only few exceptions for gen >=6. chv and bxt.
366 * And we are not sure about the latter so play safe for now.
367 */
368 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
369 drm_clflush_virt_range(vaddr, PAGE_SIZE);
370
371 kunmap_atomic(vaddr);
372}
373
567047be 374#define kmap_px(px) kmap_page_dma(px_base(px))
d1c54acd
MK
375#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
376
567047be
MK
377#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
378#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
379#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
380#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
381
d1c54acd
MK
382static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
383 const uint64_t val)
384{
385 int i;
386 uint64_t * const vaddr = kmap_page_dma(p);
387
388 for (i = 0; i < 512; i++)
389 vaddr[i] = val;
390
391 kunmap_page_dma(dev, vaddr);
392}
393
73eeea53
MK
394static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
395 const uint32_t val32)
396{
397 uint64_t v = val32;
398
399 v = v << 32 | val32;
400
401 fill_page_dma(dev, p, v);
402}
403
4ad2af1e
MK
404static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
405{
406 struct i915_page_scratch *sp;
407 int ret;
408
409 sp = kzalloc(sizeof(*sp), GFP_KERNEL);
410 if (sp == NULL)
411 return ERR_PTR(-ENOMEM);
412
413 ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
414 if (ret) {
415 kfree(sp);
416 return ERR_PTR(ret);
417 }
418
419 set_pages_uc(px_page(sp), 1);
420
421 return sp;
422}
423
424static void free_scratch_page(struct drm_device *dev,
425 struct i915_page_scratch *sp)
426{
427 set_pages_wb(px_page(sp), 1);
428
429 cleanup_px(dev, sp);
430 kfree(sp);
431}
432
8a1ebd74 433static struct i915_page_table *alloc_pt(struct drm_device *dev)
06fda602 434{
ec565b3c 435 struct i915_page_table *pt;
678d96fb
BW
436 const size_t count = INTEL_INFO(dev)->gen >= 8 ?
437 GEN8_PTES : GEN6_PTES;
438 int ret = -ENOMEM;
06fda602
BW
439
440 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
441 if (!pt)
442 return ERR_PTR(-ENOMEM);
443
678d96fb
BW
444 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
445 GFP_KERNEL);
446
447 if (!pt->used_ptes)
448 goto fail_bitmap;
449
567047be 450 ret = setup_px(dev, pt);
678d96fb 451 if (ret)
44159ddb 452 goto fail_page_m;
06fda602
BW
453
454 return pt;
678d96fb 455
44159ddb 456fail_page_m:
678d96fb
BW
457 kfree(pt->used_ptes);
458fail_bitmap:
459 kfree(pt);
460
461 return ERR_PTR(ret);
06fda602
BW
462}
463
2e906bea 464static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
06fda602 465{
2e906bea
MK
466 cleanup_px(dev, pt);
467 kfree(pt->used_ptes);
468 kfree(pt);
469}
470
471static void gen8_initialize_pt(struct i915_address_space *vm,
472 struct i915_page_table *pt)
473{
474 gen8_pte_t scratch_pte;
475
476 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
477 I915_CACHE_LLC, true);
478
479 fill_px(vm->dev, pt, scratch_pte);
480}
481
482static void gen6_initialize_pt(struct i915_address_space *vm,
483 struct i915_page_table *pt)
484{
485 gen6_pte_t scratch_pte;
486
487 WARN_ON(px_dma(vm->scratch_page) == 0);
488
489 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
490 I915_CACHE_LLC, true, 0);
491
492 fill32_px(vm->dev, pt, scratch_pte);
06fda602
BW
493}
494
8a1ebd74 495static struct i915_page_directory *alloc_pd(struct drm_device *dev)
06fda602 496{
ec565b3c 497 struct i915_page_directory *pd;
33c8819f 498 int ret = -ENOMEM;
06fda602
BW
499
500 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
501 if (!pd)
502 return ERR_PTR(-ENOMEM);
503
33c8819f
MT
504 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
505 sizeof(*pd->used_pdes), GFP_KERNEL);
506 if (!pd->used_pdes)
a08e111a 507 goto fail_bitmap;
33c8819f 508
567047be 509 ret = setup_px(dev, pd);
33c8819f 510 if (ret)
a08e111a 511 goto fail_page_m;
e5815a2e 512
06fda602 513 return pd;
33c8819f 514
a08e111a 515fail_page_m:
33c8819f 516 kfree(pd->used_pdes);
a08e111a 517fail_bitmap:
33c8819f
MT
518 kfree(pd);
519
520 return ERR_PTR(ret);
06fda602
BW
521}
522
2e906bea
MK
523static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
524{
525 if (px_page(pd)) {
526 cleanup_px(dev, pd);
527 kfree(pd->used_pdes);
528 kfree(pd);
529 }
530}
531
532static void gen8_initialize_pd(struct i915_address_space *vm,
533 struct i915_page_directory *pd)
534{
535 gen8_pde_t scratch_pde;
536
537 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
538
539 fill_px(vm->dev, pd, scratch_pde);
540}
541
6ac18502
MT
542static int __pdp_init(struct drm_device *dev,
543 struct i915_page_directory_pointer *pdp)
544{
545 size_t pdpes = I915_PDPES_PER_PDP(dev);
546
547 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
548 sizeof(unsigned long),
549 GFP_KERNEL);
550 if (!pdp->used_pdpes)
551 return -ENOMEM;
552
553 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
554 GFP_KERNEL);
555 if (!pdp->page_directory) {
556 kfree(pdp->used_pdpes);
557 /* the PDP might be the statically allocated top level. Keep it
558 * as clean as possible */
559 pdp->used_pdpes = NULL;
560 return -ENOMEM;
561 }
562
563 return 0;
564}
565
566static void __pdp_fini(struct i915_page_directory_pointer *pdp)
567{
568 kfree(pdp->used_pdpes);
569 kfree(pdp->page_directory);
570 pdp->page_directory = NULL;
571}
572
762d9936
MT
573static struct
574i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
575{
576 struct i915_page_directory_pointer *pdp;
577 int ret = -ENOMEM;
578
579 WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
580
581 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
582 if (!pdp)
583 return ERR_PTR(-ENOMEM);
584
585 ret = __pdp_init(dev, pdp);
586 if (ret)
587 goto fail_bitmap;
588
589 ret = setup_px(dev, pdp);
590 if (ret)
591 goto fail_page_m;
592
593 return pdp;
594
595fail_page_m:
596 __pdp_fini(pdp);
597fail_bitmap:
598 kfree(pdp);
599
600 return ERR_PTR(ret);
601}
602
6ac18502
MT
603static void free_pdp(struct drm_device *dev,
604 struct i915_page_directory_pointer *pdp)
605{
606 __pdp_fini(pdp);
762d9936
MT
607 if (USES_FULL_48BIT_PPGTT(dev)) {
608 cleanup_px(dev, pdp);
609 kfree(pdp);
610 }
611}
612
69ab76fd
MT
613static void gen8_initialize_pdp(struct i915_address_space *vm,
614 struct i915_page_directory_pointer *pdp)
615{
616 gen8_ppgtt_pdpe_t scratch_pdpe;
617
618 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
619
620 fill_px(vm->dev, pdp, scratch_pdpe);
621}
622
623static void gen8_initialize_pml4(struct i915_address_space *vm,
624 struct i915_pml4 *pml4)
625{
626 gen8_ppgtt_pml4e_t scratch_pml4e;
627
628 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
629 I915_CACHE_LLC);
630
631 fill_px(vm->dev, pml4, scratch_pml4e);
632}
633
762d9936
MT
634static void
635gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
636 struct i915_page_directory_pointer *pdp,
637 struct i915_page_directory *pd,
638 int index)
639{
640 gen8_ppgtt_pdpe_t *page_directorypo;
641
642 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
643 return;
644
645 page_directorypo = kmap_px(pdp);
646 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
647 kunmap_px(ppgtt, page_directorypo);
648}
649
650static void
651gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
652 struct i915_pml4 *pml4,
653 struct i915_page_directory_pointer *pdp,
654 int index)
655{
656 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
657
658 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
659 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
660 kunmap_px(ppgtt, pagemap);
6ac18502
MT
661}
662
94e409c1 663/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 664static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
665 unsigned entry,
666 dma_addr_t addr)
94e409c1 667{
4a570db5 668 struct intel_engine_cs *engine = req->engine;
94e409c1
BW
669 int ret;
670
671 BUG_ON(entry >= 4);
672
5fb9de1a 673 ret = intel_ring_begin(req, 6);
94e409c1
BW
674 if (ret)
675 return ret;
676
e2f80391
TU
677 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
678 intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
679 intel_ring_emit(engine, upper_32_bits(addr));
680 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
681 intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
682 intel_ring_emit(engine, lower_32_bits(addr));
683 intel_ring_advance(engine);
94e409c1
BW
684
685 return 0;
686}
687
2dba3239
MT
688static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
689 struct drm_i915_gem_request *req)
94e409c1 690{
eeb9488e 691 int i, ret;
94e409c1 692
7cb6d7ac 693 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
694 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
695
e85b26dc 696 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
697 if (ret)
698 return ret;
94e409c1 699 }
d595bd4b 700
eeb9488e 701 return 0;
94e409c1
BW
702}
703
2dba3239
MT
704static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
705 struct drm_i915_gem_request *req)
706{
707 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
708}
709
f9b5b782
MT
710static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
711 struct i915_page_directory_pointer *pdp,
712 uint64_t start,
713 uint64_t length,
714 gen8_pte_t scratch_pte)
459108b8 715{
e5716f55 716 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
f9b5b782 717 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
718 unsigned pdpe = gen8_pdpe_index(start);
719 unsigned pde = gen8_pde_index(start);
720 unsigned pte = gen8_pte_index(start);
782f1495 721 unsigned num_entries = length >> PAGE_SHIFT;
459108b8
BW
722 unsigned last_pte, i;
723
f9b5b782
MT
724 if (WARN_ON(!pdp))
725 return;
459108b8
BW
726
727 while (num_entries) {
ec565b3c
MT
728 struct i915_page_directory *pd;
729 struct i915_page_table *pt;
06fda602 730
d4ec9da0 731 if (WARN_ON(!pdp->page_directory[pdpe]))
00245266 732 break;
06fda602 733
d4ec9da0 734 pd = pdp->page_directory[pdpe];
06fda602
BW
735
736 if (WARN_ON(!pd->page_table[pde]))
00245266 737 break;
06fda602
BW
738
739 pt = pd->page_table[pde];
740
567047be 741 if (WARN_ON(!px_page(pt)))
00245266 742 break;
06fda602 743
7ad47cf2 744 last_pte = pte + num_entries;
07749ef3
MT
745 if (last_pte > GEN8_PTES)
746 last_pte = GEN8_PTES;
459108b8 747
d1c54acd 748 pt_vaddr = kmap_px(pt);
459108b8 749
7ad47cf2 750 for (i = pte; i < last_pte; i++) {
459108b8 751 pt_vaddr[i] = scratch_pte;
7ad47cf2
BW
752 num_entries--;
753 }
459108b8 754
44a71024 755 kunmap_px(ppgtt, pt_vaddr);
459108b8 756
7ad47cf2 757 pte = 0;
07749ef3 758 if (++pde == I915_PDES) {
de5ba8eb
MT
759 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
760 break;
7ad47cf2
BW
761 pde = 0;
762 }
459108b8
BW
763 }
764}
765
f9b5b782
MT
766static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
767 uint64_t start,
768 uint64_t length,
769 bool use_scratch)
9df15b49 770{
e5716f55 771 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
f9b5b782
MT
772 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
773 I915_CACHE_LLC, use_scratch);
774
de5ba8eb
MT
775 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
776 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
777 scratch_pte);
778 } else {
e8ebd8e2 779 uint64_t pml4e;
de5ba8eb
MT
780 struct i915_page_directory_pointer *pdp;
781
e8ebd8e2 782 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
783 gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
784 scratch_pte);
785 }
786 }
f9b5b782
MT
787}
788
789static void
790gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
791 struct i915_page_directory_pointer *pdp,
3387d433 792 struct sg_page_iter *sg_iter,
f9b5b782
MT
793 uint64_t start,
794 enum i915_cache_level cache_level)
795{
e5716f55 796 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 797 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
798 unsigned pdpe = gen8_pdpe_index(start);
799 unsigned pde = gen8_pde_index(start);
800 unsigned pte = gen8_pte_index(start);
9df15b49 801
6f1cc993 802 pt_vaddr = NULL;
7ad47cf2 803
3387d433 804 while (__sg_page_iter_next(sg_iter)) {
d7b3de91 805 if (pt_vaddr == NULL) {
d4ec9da0 806 struct i915_page_directory *pd = pdp->page_directory[pdpe];
ec565b3c 807 struct i915_page_table *pt = pd->page_table[pde];
d1c54acd 808 pt_vaddr = kmap_px(pt);
d7b3de91 809 }
9df15b49 810
7ad47cf2 811 pt_vaddr[pte] =
3387d433 812 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
6f1cc993 813 cache_level, true);
07749ef3 814 if (++pte == GEN8_PTES) {
d1c54acd 815 kunmap_px(ppgtt, pt_vaddr);
6f1cc993 816 pt_vaddr = NULL;
07749ef3 817 if (++pde == I915_PDES) {
de5ba8eb
MT
818 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
819 break;
7ad47cf2
BW
820 pde = 0;
821 }
822 pte = 0;
9df15b49
BW
823 }
824 }
d1c54acd
MK
825
826 if (pt_vaddr)
827 kunmap_px(ppgtt, pt_vaddr);
9df15b49
BW
828}
829
f9b5b782
MT
830static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
831 struct sg_table *pages,
832 uint64_t start,
833 enum i915_cache_level cache_level,
834 u32 unused)
835{
e5716f55 836 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3387d433 837 struct sg_page_iter sg_iter;
f9b5b782 838
3387d433 839 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
de5ba8eb
MT
840
841 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
842 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
843 cache_level);
844 } else {
845 struct i915_page_directory_pointer *pdp;
e8ebd8e2 846 uint64_t pml4e;
de5ba8eb
MT
847 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
848
e8ebd8e2 849 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
850 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
851 start, cache_level);
852 }
853 }
f9b5b782
MT
854}
855
f37c0505
MT
856static void gen8_free_page_tables(struct drm_device *dev,
857 struct i915_page_directory *pd)
7ad47cf2
BW
858{
859 int i;
860
567047be 861 if (!px_page(pd))
7ad47cf2
BW
862 return;
863
33c8819f 864 for_each_set_bit(i, pd->used_pdes, I915_PDES) {
06fda602
BW
865 if (WARN_ON(!pd->page_table[i]))
866 continue;
7ad47cf2 867
a08e111a 868 free_pt(dev, pd->page_table[i]);
06fda602
BW
869 pd->page_table[i] = NULL;
870 }
d7b3de91
BW
871}
872
8776f02b
MK
873static int gen8_init_scratch(struct i915_address_space *vm)
874{
875 struct drm_device *dev = vm->dev;
64c050db 876 int ret;
8776f02b
MK
877
878 vm->scratch_page = alloc_scratch_page(dev);
879 if (IS_ERR(vm->scratch_page))
880 return PTR_ERR(vm->scratch_page);
881
882 vm->scratch_pt = alloc_pt(dev);
883 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
884 ret = PTR_ERR(vm->scratch_pt);
885 goto free_scratch_page;
8776f02b
MK
886 }
887
888 vm->scratch_pd = alloc_pd(dev);
889 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
890 ret = PTR_ERR(vm->scratch_pd);
891 goto free_pt;
8776f02b
MK
892 }
893
69ab76fd
MT
894 if (USES_FULL_48BIT_PPGTT(dev)) {
895 vm->scratch_pdp = alloc_pdp(dev);
896 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
897 ret = PTR_ERR(vm->scratch_pdp);
898 goto free_pd;
69ab76fd
MT
899 }
900 }
901
8776f02b
MK
902 gen8_initialize_pt(vm, vm->scratch_pt);
903 gen8_initialize_pd(vm, vm->scratch_pd);
69ab76fd
MT
904 if (USES_FULL_48BIT_PPGTT(dev))
905 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
906
907 return 0;
64c050db
MA
908
909free_pd:
910 free_pd(dev, vm->scratch_pd);
911free_pt:
912 free_pt(dev, vm->scratch_pt);
913free_scratch_page:
914 free_scratch_page(dev, vm->scratch_page);
915
916 return ret;
8776f02b
MK
917}
918
650da34c
ZL
919static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
920{
921 enum vgt_g2v_type msg;
df28564d 922 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
650da34c
ZL
923 int i;
924
df28564d 925 if (USES_FULL_48BIT_PPGTT(dev_priv)) {
650da34c
ZL
926 u64 daddr = px_dma(&ppgtt->pml4);
927
ab75bb5d
VS
928 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
929 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
930
931 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
932 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
933 } else {
934 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
935 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
936
ab75bb5d
VS
937 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
938 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
939 }
940
941 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
942 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
943 }
944
945 I915_WRITE(vgtif_reg(g2v_notify), msg);
946
947 return 0;
948}
949
8776f02b
MK
950static void gen8_free_scratch(struct i915_address_space *vm)
951{
952 struct drm_device *dev = vm->dev;
953
69ab76fd
MT
954 if (USES_FULL_48BIT_PPGTT(dev))
955 free_pdp(dev, vm->scratch_pdp);
8776f02b
MK
956 free_pd(dev, vm->scratch_pd);
957 free_pt(dev, vm->scratch_pt);
958 free_scratch_page(dev, vm->scratch_page);
959}
960
762d9936
MT
961static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
962 struct i915_page_directory_pointer *pdp)
b45a6715
BW
963{
964 int i;
965
d4ec9da0
MT
966 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
967 if (WARN_ON(!pdp->page_directory[i]))
06fda602
BW
968 continue;
969
d4ec9da0
MT
970 gen8_free_page_tables(dev, pdp->page_directory[i]);
971 free_pd(dev, pdp->page_directory[i]);
7ad47cf2 972 }
69876bed 973
d4ec9da0 974 free_pdp(dev, pdp);
762d9936
MT
975}
976
977static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
978{
979 int i;
980
981 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
982 if (WARN_ON(!ppgtt->pml4.pdps[i]))
983 continue;
984
985 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
986 }
987
988 cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
989}
990
991static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
992{
e5716f55 993 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 994
650da34c
ZL
995 if (intel_vgpu_active(vm->dev))
996 gen8_ppgtt_notify_vgt(ppgtt, false);
997
762d9936
MT
998 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
999 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
1000 else
1001 gen8_ppgtt_cleanup_4lvl(ppgtt);
d4ec9da0 1002
8776f02b 1003 gen8_free_scratch(vm);
b45a6715
BW
1004}
1005
d7b2633d
MT
1006/**
1007 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
d4ec9da0
MT
1008 * @vm: Master vm structure.
1009 * @pd: Page directory for this address range.
d7b2633d 1010 * @start: Starting virtual address to begin allocations.
d4ec9da0 1011 * @length: Size of the allocations.
d7b2633d
MT
1012 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1013 * caller to free on error.
1014 *
1015 * Allocate the required number of page tables. Extremely similar to
1016 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1017 * the page directory boundary (instead of the page directory pointer). That
1018 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1019 * possible, and likely that the caller will need to use multiple calls of this
1020 * function to achieve the appropriate allocation.
1021 *
1022 * Return: 0 if success; negative error code otherwise.
1023 */
d4ec9da0 1024static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
e5815a2e 1025 struct i915_page_directory *pd,
5441f0cb 1026 uint64_t start,
d7b2633d
MT
1027 uint64_t length,
1028 unsigned long *new_pts)
bf2b4ed2 1029{
d4ec9da0 1030 struct drm_device *dev = vm->dev;
d7b2633d 1031 struct i915_page_table *pt;
5441f0cb 1032 uint32_t pde;
bf2b4ed2 1033
e8ebd8e2 1034 gen8_for_each_pde(pt, pd, start, length, pde) {
d7b2633d 1035 /* Don't reallocate page tables */
6ac18502 1036 if (test_bit(pde, pd->used_pdes)) {
d7b2633d 1037 /* Scratch is never allocated this way */
d4ec9da0 1038 WARN_ON(pt == vm->scratch_pt);
d7b2633d
MT
1039 continue;
1040 }
1041
8a1ebd74 1042 pt = alloc_pt(dev);
d7b2633d 1043 if (IS_ERR(pt))
5441f0cb
MT
1044 goto unwind_out;
1045
d4ec9da0 1046 gen8_initialize_pt(vm, pt);
d7b2633d 1047 pd->page_table[pde] = pt;
966082c9 1048 __set_bit(pde, new_pts);
4c06ec8d 1049 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
7ad47cf2
BW
1050 }
1051
bf2b4ed2 1052 return 0;
7ad47cf2
BW
1053
1054unwind_out:
d7b2633d 1055 for_each_set_bit(pde, new_pts, I915_PDES)
a08e111a 1056 free_pt(dev, pd->page_table[pde]);
7ad47cf2 1057
d7b3de91 1058 return -ENOMEM;
bf2b4ed2
BW
1059}
1060
d7b2633d
MT
1061/**
1062 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
d4ec9da0 1063 * @vm: Master vm structure.
d7b2633d
MT
1064 * @pdp: Page directory pointer for this address range.
1065 * @start: Starting virtual address to begin allocations.
d4ec9da0
MT
1066 * @length: Size of the allocations.
1067 * @new_pds: Bitmap set by function with new allocations. Likely used by the
d7b2633d
MT
1068 * caller to free on error.
1069 *
1070 * Allocate the required number of page directories starting at the pde index of
1071 * @start, and ending at the pde index @start + @length. This function will skip
1072 * over already allocated page directories within the range, and only allocate
1073 * new ones, setting the appropriate pointer within the pdp as well as the
1074 * correct position in the bitmap @new_pds.
1075 *
1076 * The function will only allocate the pages within the range for a give page
1077 * directory pointer. In other words, if @start + @length straddles a virtually
1078 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1079 * required by the caller, This is not currently possible, and the BUG in the
1080 * code will prevent it.
1081 *
1082 * Return: 0 if success; negative error code otherwise.
1083 */
d4ec9da0
MT
1084static int
1085gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1086 struct i915_page_directory_pointer *pdp,
1087 uint64_t start,
1088 uint64_t length,
1089 unsigned long *new_pds)
bf2b4ed2 1090{
d4ec9da0 1091 struct drm_device *dev = vm->dev;
d7b2633d 1092 struct i915_page_directory *pd;
69876bed 1093 uint32_t pdpe;
6ac18502 1094 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
69876bed 1095
6ac18502 1096 WARN_ON(!bitmap_empty(new_pds, pdpes));
d7b2633d 1097
e8ebd8e2 1098 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
6ac18502 1099 if (test_bit(pdpe, pdp->used_pdpes))
d7b2633d 1100 continue;
33c8819f 1101
8a1ebd74 1102 pd = alloc_pd(dev);
d7b2633d 1103 if (IS_ERR(pd))
d7b3de91 1104 goto unwind_out;
69876bed 1105
d4ec9da0 1106 gen8_initialize_pd(vm, pd);
d7b2633d 1107 pdp->page_directory[pdpe] = pd;
966082c9 1108 __set_bit(pdpe, new_pds);
4c06ec8d 1109 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
d7b3de91
BW
1110 }
1111
bf2b4ed2 1112 return 0;
d7b3de91
BW
1113
1114unwind_out:
6ac18502 1115 for_each_set_bit(pdpe, new_pds, pdpes)
a08e111a 1116 free_pd(dev, pdp->page_directory[pdpe]);
d7b3de91
BW
1117
1118 return -ENOMEM;
bf2b4ed2
BW
1119}
1120
762d9936
MT
1121/**
1122 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1123 * @vm: Master vm structure.
1124 * @pml4: Page map level 4 for this address range.
1125 * @start: Starting virtual address to begin allocations.
1126 * @length: Size of the allocations.
1127 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1128 * caller to free on error.
1129 *
1130 * Allocate the required number of page directory pointers. Extremely similar to
1131 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1132 * The main difference is here we are limited by the pml4 boundary (instead of
1133 * the page directory pointer).
1134 *
1135 * Return: 0 if success; negative error code otherwise.
1136 */
1137static int
1138gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1139 struct i915_pml4 *pml4,
1140 uint64_t start,
1141 uint64_t length,
1142 unsigned long *new_pdps)
1143{
1144 struct drm_device *dev = vm->dev;
1145 struct i915_page_directory_pointer *pdp;
762d9936
MT
1146 uint32_t pml4e;
1147
1148 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1149
e8ebd8e2 1150 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1151 if (!test_bit(pml4e, pml4->used_pml4es)) {
1152 pdp = alloc_pdp(dev);
1153 if (IS_ERR(pdp))
1154 goto unwind_out;
1155
69ab76fd 1156 gen8_initialize_pdp(vm, pdp);
762d9936
MT
1157 pml4->pdps[pml4e] = pdp;
1158 __set_bit(pml4e, new_pdps);
1159 trace_i915_page_directory_pointer_entry_alloc(vm,
1160 pml4e,
1161 start,
1162 GEN8_PML4E_SHIFT);
1163 }
1164 }
1165
1166 return 0;
1167
1168unwind_out:
1169 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1170 free_pdp(dev, pml4->pdps[pml4e]);
1171
1172 return -ENOMEM;
1173}
1174
d7b2633d 1175static void
3a41a05d 1176free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
d7b2633d 1177{
d7b2633d
MT
1178 kfree(new_pts);
1179 kfree(new_pds);
1180}
1181
1182/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1183 * of these are based on the number of PDPEs in the system.
1184 */
1185static
1186int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
3a41a05d 1187 unsigned long **new_pts,
6ac18502 1188 uint32_t pdpes)
d7b2633d 1189{
d7b2633d 1190 unsigned long *pds;
3a41a05d 1191 unsigned long *pts;
d7b2633d 1192
3a41a05d 1193 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
d7b2633d
MT
1194 if (!pds)
1195 return -ENOMEM;
1196
3a41a05d
MW
1197 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1198 GFP_TEMPORARY);
1199 if (!pts)
1200 goto err_out;
d7b2633d
MT
1201
1202 *new_pds = pds;
1203 *new_pts = pts;
1204
1205 return 0;
1206
1207err_out:
3a41a05d 1208 free_gen8_temp_bitmaps(pds, pts);
d7b2633d
MT
1209 return -ENOMEM;
1210}
1211
5b7e4c9c
MK
1212/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1213 * the page table structures, we mark them dirty so that
1214 * context switching/execlist queuing code takes extra steps
1215 * to ensure that tlbs are flushed.
1216 */
1217static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1218{
1219 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1220}
1221
762d9936
MT
1222static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1223 struct i915_page_directory_pointer *pdp,
1224 uint64_t start,
1225 uint64_t length)
bf2b4ed2 1226{
e5716f55 1227 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3a41a05d 1228 unsigned long *new_page_dirs, *new_page_tables;
d4ec9da0 1229 struct drm_device *dev = vm->dev;
5441f0cb 1230 struct i915_page_directory *pd;
33c8819f
MT
1231 const uint64_t orig_start = start;
1232 const uint64_t orig_length = length;
5441f0cb 1233 uint32_t pdpe;
d4ec9da0 1234 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
bf2b4ed2
BW
1235 int ret;
1236
d7b2633d
MT
1237 /* Wrap is never okay since we can only represent 48b, and we don't
1238 * actually use the other side of the canonical address space.
1239 */
1240 if (WARN_ON(start + length < start))
a05d80ee
MK
1241 return -ENODEV;
1242
d4ec9da0 1243 if (WARN_ON(start + length > vm->total))
a05d80ee 1244 return -ENODEV;
d7b2633d 1245
6ac18502 1246 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
bf2b4ed2
BW
1247 if (ret)
1248 return ret;
1249
d7b2633d 1250 /* Do the allocations first so we can easily bail out */
d4ec9da0
MT
1251 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1252 new_page_dirs);
d7b2633d 1253 if (ret) {
3a41a05d 1254 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
d7b2633d
MT
1255 return ret;
1256 }
1257
1258 /* For every page directory referenced, allocate page tables */
e8ebd8e2 1259 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d4ec9da0 1260 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
3a41a05d 1261 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
5441f0cb
MT
1262 if (ret)
1263 goto err_out;
5441f0cb
MT
1264 }
1265
33c8819f
MT
1266 start = orig_start;
1267 length = orig_length;
1268
d7b2633d
MT
1269 /* Allocations have completed successfully, so set the bitmaps, and do
1270 * the mappings. */
e8ebd8e2 1271 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d1c54acd 1272 gen8_pde_t *const page_directory = kmap_px(pd);
33c8819f 1273 struct i915_page_table *pt;
09120d4e 1274 uint64_t pd_len = length;
33c8819f
MT
1275 uint64_t pd_start = start;
1276 uint32_t pde;
1277
d7b2633d
MT
1278 /* Every pd should be allocated, we just did that above. */
1279 WARN_ON(!pd);
1280
e8ebd8e2 1281 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
d7b2633d
MT
1282 /* Same reasoning as pd */
1283 WARN_ON(!pt);
1284 WARN_ON(!pd_len);
1285 WARN_ON(!gen8_pte_count(pd_start, pd_len));
1286
1287 /* Set our used ptes within the page table */
1288 bitmap_set(pt->used_ptes,
1289 gen8_pte_index(pd_start),
1290 gen8_pte_count(pd_start, pd_len));
1291
1292 /* Our pde is now pointing to the pagetable, pt */
966082c9 1293 __set_bit(pde, pd->used_pdes);
d7b2633d
MT
1294
1295 /* Map the PDE to the page table */
fe36f55d
MK
1296 page_directory[pde] = gen8_pde_encode(px_dma(pt),
1297 I915_CACHE_LLC);
4c06ec8d
MT
1298 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1299 gen8_pte_index(start),
1300 gen8_pte_count(start, length),
1301 GEN8_PTES);
d7b2633d
MT
1302
1303 /* NB: We haven't yet mapped ptes to pages. At this
1304 * point we're still relying on insert_entries() */
33c8819f 1305 }
d7b2633d 1306
d1c54acd 1307 kunmap_px(ppgtt, page_directory);
d4ec9da0 1308 __set_bit(pdpe, pdp->used_pdpes);
762d9936 1309 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
33c8819f
MT
1310 }
1311
3a41a05d 1312 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1313 mark_tlbs_dirty(ppgtt);
d7b3de91 1314 return 0;
bf2b4ed2 1315
d7b3de91 1316err_out:
d7b2633d 1317 while (pdpe--) {
e8ebd8e2
DG
1318 unsigned long temp;
1319
3a41a05d
MW
1320 for_each_set_bit(temp, new_page_tables + pdpe *
1321 BITS_TO_LONGS(I915_PDES), I915_PDES)
d4ec9da0 1322 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
d7b2633d
MT
1323 }
1324
6ac18502 1325 for_each_set_bit(pdpe, new_page_dirs, pdpes)
d4ec9da0 1326 free_pd(dev, pdp->page_directory[pdpe]);
d7b2633d 1327
3a41a05d 1328 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1329 mark_tlbs_dirty(ppgtt);
bf2b4ed2
BW
1330 return ret;
1331}
1332
762d9936
MT
1333static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1334 struct i915_pml4 *pml4,
1335 uint64_t start,
1336 uint64_t length)
1337{
1338 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
e5716f55 1339 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1340 struct i915_page_directory_pointer *pdp;
e8ebd8e2 1341 uint64_t pml4e;
762d9936
MT
1342 int ret = 0;
1343
1344 /* Do the pml4 allocations first, so we don't need to track the newly
1345 * allocated tables below the pdp */
1346 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1347
1348 /* The pagedirectory and pagetable allocations are done in the shared 3
1349 * and 4 level code. Just allocate the pdps.
1350 */
1351 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1352 new_pdps);
1353 if (ret)
1354 return ret;
1355
1356 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1357 "The allocation has spanned more than 512GB. "
1358 "It is highly likely this is incorrect.");
1359
e8ebd8e2 1360 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1361 WARN_ON(!pdp);
1362
1363 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1364 if (ret)
1365 goto err_out;
1366
1367 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1368 }
1369
1370 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1371 GEN8_PML4ES_PER_PML4);
1372
1373 return 0;
1374
1375err_out:
1376 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1377 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1378
1379 return ret;
1380}
1381
1382static int gen8_alloc_va_range(struct i915_address_space *vm,
1383 uint64_t start, uint64_t length)
1384{
e5716f55 1385 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936
MT
1386
1387 if (USES_FULL_48BIT_PPGTT(vm->dev))
1388 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1389 else
1390 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1391}
1392
ea91e401
MT
1393static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1394 uint64_t start, uint64_t length,
1395 gen8_pte_t scratch_pte,
1396 struct seq_file *m)
1397{
1398 struct i915_page_directory *pd;
ea91e401
MT
1399 uint32_t pdpe;
1400
e8ebd8e2 1401 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401
MT
1402 struct i915_page_table *pt;
1403 uint64_t pd_len = length;
1404 uint64_t pd_start = start;
1405 uint32_t pde;
1406
1407 if (!test_bit(pdpe, pdp->used_pdpes))
1408 continue;
1409
1410 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1411 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
ea91e401
MT
1412 uint32_t pte;
1413 gen8_pte_t *pt_vaddr;
1414
1415 if (!test_bit(pde, pd->used_pdes))
1416 continue;
1417
1418 pt_vaddr = kmap_px(pt);
1419 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1420 uint64_t va =
1421 (pdpe << GEN8_PDPE_SHIFT) |
1422 (pde << GEN8_PDE_SHIFT) |
1423 (pte << GEN8_PTE_SHIFT);
1424 int i;
1425 bool found = false;
1426
1427 for (i = 0; i < 4; i++)
1428 if (pt_vaddr[pte + i] != scratch_pte)
1429 found = true;
1430 if (!found)
1431 continue;
1432
1433 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1434 for (i = 0; i < 4; i++) {
1435 if (pt_vaddr[pte + i] != scratch_pte)
1436 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1437 else
1438 seq_puts(m, " SCRATCH ");
1439 }
1440 seq_puts(m, "\n");
1441 }
1442 /* don't use kunmap_px, it could trigger
1443 * an unnecessary flush.
1444 */
1445 kunmap_atomic(pt_vaddr);
1446 }
1447 }
1448}
1449
1450static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1451{
1452 struct i915_address_space *vm = &ppgtt->base;
1453 uint64_t start = ppgtt->base.start;
1454 uint64_t length = ppgtt->base.total;
1455 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1456 I915_CACHE_LLC, true);
1457
1458 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1459 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1460 } else {
e8ebd8e2 1461 uint64_t pml4e;
ea91e401
MT
1462 struct i915_pml4 *pml4 = &ppgtt->pml4;
1463 struct i915_page_directory_pointer *pdp;
1464
e8ebd8e2 1465 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
ea91e401
MT
1466 if (!test_bit(pml4e, pml4->used_pml4es))
1467 continue;
1468
1469 seq_printf(m, " PML4E #%llu\n", pml4e);
1470 gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1471 }
1472 }
1473}
1474
331f38e7
ZL
1475static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1476{
3a41a05d 1477 unsigned long *new_page_dirs, *new_page_tables;
331f38e7
ZL
1478 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1479 int ret;
1480
1481 /* We allocate temp bitmap for page tables for no gain
1482 * but as this is for init only, lets keep the things simple
1483 */
1484 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1485 if (ret)
1486 return ret;
1487
1488 /* Allocate for all pdps regardless of how the ppgtt
1489 * was defined.
1490 */
1491 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1492 0, 1ULL << 32,
1493 new_page_dirs);
1494 if (!ret)
1495 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1496
3a41a05d 1497 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
331f38e7
ZL
1498
1499 return ret;
1500}
1501
eb0b44ad 1502/*
f3a964b9
BW
1503 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1504 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1505 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1506 * space.
37aca44a 1507 *
f3a964b9 1508 */
5c5f6457 1509static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1510{
8776f02b 1511 int ret;
7cb6d7ac 1512
8776f02b
MK
1513 ret = gen8_init_scratch(&ppgtt->base);
1514 if (ret)
1515 return ret;
69876bed 1516
d7b2633d 1517 ppgtt->base.start = 0;
d7b2633d 1518 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
5c5f6457 1519 ppgtt->base.allocate_va_range = gen8_alloc_va_range;
d7b2633d 1520 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
c7e16f22 1521 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
777dc5bb
DV
1522 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1523 ppgtt->base.bind_vma = ppgtt_bind_vma;
ea91e401 1524 ppgtt->debug_dump = gen8_dump_ppgtt;
d7b2633d 1525
762d9936
MT
1526 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1527 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1528 if (ret)
1529 goto free_scratch;
6ac18502 1530
69ab76fd
MT
1531 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1532
762d9936 1533 ppgtt->base.total = 1ULL << 48;
2dba3239 1534 ppgtt->switch_mm = gen8_48b_mm_switch;
762d9936 1535 } else {
25f50337 1536 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
81ba8aef
MT
1537 if (ret)
1538 goto free_scratch;
1539
1540 ppgtt->base.total = 1ULL << 32;
2dba3239 1541 ppgtt->switch_mm = gen8_legacy_mm_switch;
762d9936
MT
1542 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1543 0, 0,
1544 GEN8_PML4E_SHIFT);
331f38e7
ZL
1545
1546 if (intel_vgpu_active(ppgtt->base.dev)) {
1547 ret = gen8_preallocate_top_level_pdps(ppgtt);
1548 if (ret)
1549 goto free_scratch;
1550 }
81ba8aef 1551 }
6ac18502 1552
650da34c
ZL
1553 if (intel_vgpu_active(ppgtt->base.dev))
1554 gen8_ppgtt_notify_vgt(ppgtt, true);
1555
d7b2633d 1556 return 0;
6ac18502
MT
1557
1558free_scratch:
1559 gen8_free_scratch(&ppgtt->base);
1560 return ret;
d7b2633d
MT
1561}
1562
87d60b63
BW
1563static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1564{
87d60b63 1565 struct i915_address_space *vm = &ppgtt->base;
09942c65 1566 struct i915_page_table *unused;
07749ef3 1567 gen6_pte_t scratch_pte;
87d60b63 1568 uint32_t pd_entry;
09942c65
MT
1569 uint32_t pte, pde, temp;
1570 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
87d60b63 1571
79ab9370
MK
1572 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1573 I915_CACHE_LLC, true, 0);
87d60b63 1574
09942c65 1575 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
87d60b63 1576 u32 expected;
07749ef3 1577 gen6_pte_t *pt_vaddr;
567047be 1578 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1579 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1580 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1581
1582 if (pd_entry != expected)
1583 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1584 pde,
1585 pd_entry,
1586 expected);
1587 seq_printf(m, "\tPDE: %x\n", pd_entry);
1588
d1c54acd
MK
1589 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1590
07749ef3 1591 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1592 unsigned long va =
07749ef3 1593 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1594 (pte * PAGE_SIZE);
1595 int i;
1596 bool found = false;
1597 for (i = 0; i < 4; i++)
1598 if (pt_vaddr[pte + i] != scratch_pte)
1599 found = true;
1600 if (!found)
1601 continue;
1602
1603 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1604 for (i = 0; i < 4; i++) {
1605 if (pt_vaddr[pte + i] != scratch_pte)
1606 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1607 else
1608 seq_puts(m, " SCRATCH ");
1609 }
1610 seq_puts(m, "\n");
1611 }
d1c54acd 1612 kunmap_px(ppgtt, pt_vaddr);
87d60b63
BW
1613 }
1614}
1615
678d96fb 1616/* Write pde (index) from the page directory @pd to the page table @pt */
ec565b3c
MT
1617static void gen6_write_pde(struct i915_page_directory *pd,
1618 const int pde, struct i915_page_table *pt)
6197349b 1619{
678d96fb
BW
1620 /* Caller needs to make sure the write completes if necessary */
1621 struct i915_hw_ppgtt *ppgtt =
1622 container_of(pd, struct i915_hw_ppgtt, pd);
1623 u32 pd_entry;
6197349b 1624
567047be 1625 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
678d96fb 1626 pd_entry |= GEN6_PDE_VALID;
6197349b 1627
678d96fb
BW
1628 writel(pd_entry, ppgtt->pd_addr + pde);
1629}
6197349b 1630
678d96fb
BW
1631/* Write all the page tables found in the ppgtt structure to incrementing page
1632 * directories. */
1633static void gen6_write_page_range(struct drm_i915_private *dev_priv,
ec565b3c 1634 struct i915_page_directory *pd,
678d96fb
BW
1635 uint32_t start, uint32_t length)
1636{
72e96d64 1637 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ec565b3c 1638 struct i915_page_table *pt;
678d96fb
BW
1639 uint32_t pde, temp;
1640
1641 gen6_for_each_pde(pt, pd, start, length, temp, pde)
1642 gen6_write_pde(pd, pde, pt);
1643
1644 /* Make sure write is complete before other code can use this page
1645 * table. Also require for WC mapped PTEs */
72e96d64 1646 readl(ggtt->gsm);
3e302542
BW
1647}
1648
b4a74e3a 1649static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1650{
44159ddb 1651 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
b4a74e3a 1652
44159ddb 1653 return (ppgtt->pd.base.ggtt_offset / 64) << 16;
b4a74e3a
BW
1654}
1655
90252e5c 1656static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1657 struct drm_i915_gem_request *req)
90252e5c 1658{
4a570db5 1659 struct intel_engine_cs *engine = req->engine;
90252e5c
BW
1660 int ret;
1661
90252e5c 1662 /* NB: TLBs must be flushed and invalidated before a switch */
e2f80391 1663 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
90252e5c
BW
1664 if (ret)
1665 return ret;
1666
5fb9de1a 1667 ret = intel_ring_begin(req, 6);
90252e5c
BW
1668 if (ret)
1669 return ret;
1670
e2f80391
TU
1671 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
1672 intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
1673 intel_ring_emit(engine, PP_DIR_DCLV_2G);
1674 intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
1675 intel_ring_emit(engine, get_pd_offset(ppgtt));
1676 intel_ring_emit(engine, MI_NOOP);
1677 intel_ring_advance(engine);
90252e5c
BW
1678
1679 return 0;
1680}
1681
71ba2d64 1682static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1683 struct drm_i915_gem_request *req)
71ba2d64 1684{
4a570db5 1685 struct intel_engine_cs *engine = req->engine;
71ba2d64
YZ
1686 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1687
e2f80391
TU
1688 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1689 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
71ba2d64
YZ
1690 return 0;
1691}
1692
48a10389 1693static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1694 struct drm_i915_gem_request *req)
48a10389 1695{
4a570db5 1696 struct intel_engine_cs *engine = req->engine;
48a10389
BW
1697 int ret;
1698
48a10389 1699 /* NB: TLBs must be flushed and invalidated before a switch */
e2f80391 1700 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
48a10389
BW
1701 if (ret)
1702 return ret;
1703
5fb9de1a 1704 ret = intel_ring_begin(req, 6);
48a10389
BW
1705 if (ret)
1706 return ret;
1707
e2f80391
TU
1708 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
1709 intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
1710 intel_ring_emit(engine, PP_DIR_DCLV_2G);
1711 intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
1712 intel_ring_emit(engine, get_pd_offset(ppgtt));
1713 intel_ring_emit(engine, MI_NOOP);
1714 intel_ring_advance(engine);
48a10389 1715
90252e5c 1716 /* XXX: RCS is the only one to auto invalidate the TLBs? */
e2f80391
TU
1717 if (engine->id != RCS) {
1718 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
90252e5c
BW
1719 if (ret)
1720 return ret;
1721 }
1722
48a10389
BW
1723 return 0;
1724}
1725
eeb9488e 1726static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1727 struct drm_i915_gem_request *req)
eeb9488e 1728{
4a570db5 1729 struct intel_engine_cs *engine = req->engine;
eeb9488e
BW
1730 struct drm_device *dev = ppgtt->base.dev;
1731 struct drm_i915_private *dev_priv = dev->dev_private;
1732
48a10389 1733
e2f80391
TU
1734 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1735 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e 1736
e2f80391 1737 POSTING_READ(RING_PP_DIR_DCLV(engine));
eeb9488e
BW
1738
1739 return 0;
1740}
1741
82460d97 1742static void gen8_ppgtt_enable(struct drm_device *dev)
eeb9488e 1743{
eeb9488e 1744 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 1745 struct intel_engine_cs *engine;
3e302542 1746
b4ac5afc 1747 for_each_engine(engine, dev_priv) {
2dba3239 1748 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
e2f80391 1749 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1750 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1751 }
eeb9488e 1752}
6197349b 1753
82460d97 1754static void gen7_ppgtt_enable(struct drm_device *dev)
3e302542 1755{
50227e1c 1756 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 1757 struct intel_engine_cs *engine;
b4a74e3a 1758 uint32_t ecochk, ecobits;
6197349b 1759
b4a74e3a
BW
1760 ecobits = I915_READ(GAC_ECO_BITS);
1761 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1762
b4a74e3a
BW
1763 ecochk = I915_READ(GAM_ECOCHK);
1764 if (IS_HASWELL(dev)) {
1765 ecochk |= ECOCHK_PPGTT_WB_HSW;
1766 } else {
1767 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1768 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1769 }
1770 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1771
b4ac5afc 1772 for_each_engine(engine, dev_priv) {
6197349b 1773 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1774 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1775 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1776 }
b4a74e3a 1777}
6197349b 1778
82460d97 1779static void gen6_ppgtt_enable(struct drm_device *dev)
b4a74e3a 1780{
50227e1c 1781 struct drm_i915_private *dev_priv = dev->dev_private;
b4a74e3a 1782 uint32_t ecochk, gab_ctl, ecobits;
a65c2fcd 1783
b4a74e3a
BW
1784 ecobits = I915_READ(GAC_ECO_BITS);
1785 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1786 ECOBITS_PPGTT_CACHE64B);
6197349b 1787
b4a74e3a
BW
1788 gab_ctl = I915_READ(GAB_CTL);
1789 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1790
1791 ecochk = I915_READ(GAM_ECOCHK);
1792 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1793
1794 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1795}
1796
1d2a314c 1797/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1798static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
782f1495
BW
1799 uint64_t start,
1800 uint64_t length,
828c7908 1801 bool use_scratch)
1d2a314c 1802{
e5716f55 1803 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 1804 gen6_pte_t *pt_vaddr, scratch_pte;
782f1495
BW
1805 unsigned first_entry = start >> PAGE_SHIFT;
1806 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3
MT
1807 unsigned act_pt = first_entry / GEN6_PTES;
1808 unsigned first_pte = first_entry % GEN6_PTES;
7bddb01f 1809 unsigned last_pte, i;
1d2a314c 1810
c114f76a
MK
1811 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1812 I915_CACHE_LLC, true, 0);
1d2a314c 1813
7bddb01f
DV
1814 while (num_entries) {
1815 last_pte = first_pte + num_entries;
07749ef3
MT
1816 if (last_pte > GEN6_PTES)
1817 last_pte = GEN6_PTES;
7bddb01f 1818
d1c54acd 1819 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1d2a314c 1820
7bddb01f
DV
1821 for (i = first_pte; i < last_pte; i++)
1822 pt_vaddr[i] = scratch_pte;
1d2a314c 1823
d1c54acd 1824 kunmap_px(ppgtt, pt_vaddr);
1d2a314c 1825
7bddb01f
DV
1826 num_entries -= last_pte - first_pte;
1827 first_pte = 0;
a15326a5 1828 act_pt++;
7bddb01f 1829 }
1d2a314c
DV
1830}
1831
853ba5d2 1832static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
def886c3 1833 struct sg_table *pages,
782f1495 1834 uint64_t start,
24f3a8cf 1835 enum i915_cache_level cache_level, u32 flags)
def886c3 1836{
e5716f55 1837 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 1838 gen6_pte_t *pt_vaddr;
782f1495 1839 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3
MT
1840 unsigned act_pt = first_entry / GEN6_PTES;
1841 unsigned act_pte = first_entry % GEN6_PTES;
6e995e23
ID
1842 struct sg_page_iter sg_iter;
1843
cc79714f 1844 pt_vaddr = NULL;
6e995e23 1845 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
cc79714f 1846 if (pt_vaddr == NULL)
d1c54acd 1847 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
6e995e23 1848
cc79714f
CW
1849 pt_vaddr[act_pte] =
1850 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
24f3a8cf
AG
1851 cache_level, true, flags);
1852
07749ef3 1853 if (++act_pte == GEN6_PTES) {
d1c54acd 1854 kunmap_px(ppgtt, pt_vaddr);
cc79714f 1855 pt_vaddr = NULL;
a15326a5 1856 act_pt++;
6e995e23 1857 act_pte = 0;
def886c3 1858 }
def886c3 1859 }
cc79714f 1860 if (pt_vaddr)
d1c54acd 1861 kunmap_px(ppgtt, pt_vaddr);
def886c3
DV
1862}
1863
678d96fb 1864static int gen6_alloc_va_range(struct i915_address_space *vm,
a05d80ee 1865 uint64_t start_in, uint64_t length_in)
678d96fb 1866{
4933d519
MT
1867 DECLARE_BITMAP(new_page_tables, I915_PDES);
1868 struct drm_device *dev = vm->dev;
72e96d64
JL
1869 struct drm_i915_private *dev_priv = to_i915(dev);
1870 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e5716f55 1871 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1872 struct i915_page_table *pt;
a05d80ee 1873 uint32_t start, length, start_save, length_save;
678d96fb 1874 uint32_t pde, temp;
4933d519
MT
1875 int ret;
1876
a05d80ee
MK
1877 if (WARN_ON(start_in + length_in > ppgtt->base.total))
1878 return -ENODEV;
1879
1880 start = start_save = start_in;
1881 length = length_save = length_in;
4933d519
MT
1882
1883 bitmap_zero(new_page_tables, I915_PDES);
1884
1885 /* The allocation is done in two stages so that we can bail out with
1886 * minimal amount of pain. The first stage finds new page tables that
1887 * need allocation. The second stage marks use ptes within the page
1888 * tables.
1889 */
1890 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
79ab9370 1891 if (pt != vm->scratch_pt) {
4933d519
MT
1892 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1893 continue;
1894 }
1895
1896 /* We've already allocated a page table */
1897 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1898
8a1ebd74 1899 pt = alloc_pt(dev);
4933d519
MT
1900 if (IS_ERR(pt)) {
1901 ret = PTR_ERR(pt);
1902 goto unwind_out;
1903 }
1904
1905 gen6_initialize_pt(vm, pt);
1906
1907 ppgtt->pd.page_table[pde] = pt;
966082c9 1908 __set_bit(pde, new_page_tables);
72744cb1 1909 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
4933d519
MT
1910 }
1911
1912 start = start_save;
1913 length = length_save;
678d96fb
BW
1914
1915 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1916 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1917
1918 bitmap_zero(tmp_bitmap, GEN6_PTES);
1919 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1920 gen6_pte_count(start, length));
1921
966082c9 1922 if (__test_and_clear_bit(pde, new_page_tables))
4933d519
MT
1923 gen6_write_pde(&ppgtt->pd, pde, pt);
1924
72744cb1
MT
1925 trace_i915_page_table_entry_map(vm, pde, pt,
1926 gen6_pte_index(start),
1927 gen6_pte_count(start, length),
1928 GEN6_PTES);
4933d519 1929 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
678d96fb
BW
1930 GEN6_PTES);
1931 }
1932
4933d519
MT
1933 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1934
1935 /* Make sure write is complete before other code can use this page
1936 * table. Also require for WC mapped PTEs */
72e96d64 1937 readl(ggtt->gsm);
4933d519 1938
563222a7 1939 mark_tlbs_dirty(ppgtt);
678d96fb 1940 return 0;
4933d519
MT
1941
1942unwind_out:
1943 for_each_set_bit(pde, new_page_tables, I915_PDES) {
ec565b3c 1944 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
4933d519 1945
79ab9370 1946 ppgtt->pd.page_table[pde] = vm->scratch_pt;
a08e111a 1947 free_pt(vm->dev, pt);
4933d519
MT
1948 }
1949
1950 mark_tlbs_dirty(ppgtt);
1951 return ret;
678d96fb
BW
1952}
1953
8776f02b
MK
1954static int gen6_init_scratch(struct i915_address_space *vm)
1955{
1956 struct drm_device *dev = vm->dev;
1957
1958 vm->scratch_page = alloc_scratch_page(dev);
1959 if (IS_ERR(vm->scratch_page))
1960 return PTR_ERR(vm->scratch_page);
1961
1962 vm->scratch_pt = alloc_pt(dev);
1963 if (IS_ERR(vm->scratch_pt)) {
1964 free_scratch_page(dev, vm->scratch_page);
1965 return PTR_ERR(vm->scratch_pt);
1966 }
1967
1968 gen6_initialize_pt(vm, vm->scratch_pt);
1969
1970 return 0;
1971}
1972
1973static void gen6_free_scratch(struct i915_address_space *vm)
1974{
1975 struct drm_device *dev = vm->dev;
1976
1977 free_pt(dev, vm->scratch_pt);
1978 free_scratch_page(dev, vm->scratch_page);
1979}
1980
061dd493 1981static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1982{
e5716f55 1983 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
09942c65
MT
1984 struct i915_page_table *pt;
1985 uint32_t pde;
4933d519 1986
061dd493
DV
1987 drm_mm_remove_node(&ppgtt->node);
1988
09942c65 1989 gen6_for_all_pdes(pt, ppgtt, pde) {
79ab9370 1990 if (pt != vm->scratch_pt)
a08e111a 1991 free_pt(ppgtt->base.dev, pt);
4933d519 1992 }
06fda602 1993
8776f02b 1994 gen6_free_scratch(vm);
3440d265
DV
1995}
1996
b146520f 1997static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1998{
8776f02b 1999 struct i915_address_space *vm = &ppgtt->base;
853ba5d2 2000 struct drm_device *dev = ppgtt->base.dev;
72e96d64
JL
2001 struct drm_i915_private *dev_priv = to_i915(dev);
2002 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e3cc1995 2003 bool retried = false;
b146520f 2004 int ret;
1d2a314c 2005
c8d4c0d6
BW
2006 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2007 * allocator works in address space sizes, so it's multiplied by page
2008 * size. We allocate at the top of the GTT to avoid fragmentation.
2009 */
72e96d64 2010 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 2011
8776f02b
MK
2012 ret = gen6_init_scratch(vm);
2013 if (ret)
2014 return ret;
4933d519 2015
e3cc1995 2016alloc:
72e96d64 2017 ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
c8d4c0d6
BW
2018 &ppgtt->node, GEN6_PD_SIZE,
2019 GEN6_PD_ALIGN, 0,
72e96d64 2020 0, ggtt->base.total,
3e8b5ae9 2021 DRM_MM_TOPDOWN);
e3cc1995 2022 if (ret == -ENOSPC && !retried) {
72e96d64 2023 ret = i915_gem_evict_something(dev, &ggtt->base,
e3cc1995 2024 GEN6_PD_SIZE, GEN6_PD_ALIGN,
d23db88c 2025 I915_CACHE_NONE,
72e96d64 2026 0, ggtt->base.total,
d23db88c 2027 0);
e3cc1995 2028 if (ret)
678d96fb 2029 goto err_out;
e3cc1995
BW
2030
2031 retried = true;
2032 goto alloc;
2033 }
c8d4c0d6 2034
c8c26622 2035 if (ret)
678d96fb
BW
2036 goto err_out;
2037
c8c26622 2038
72e96d64 2039 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 2040 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 2041
c8c26622 2042 return 0;
678d96fb
BW
2043
2044err_out:
8776f02b 2045 gen6_free_scratch(vm);
678d96fb 2046 return ret;
b146520f
BW
2047}
2048
b146520f
BW
2049static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2050{
2f2cf682 2051 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 2052}
06dc68d6 2053
4933d519
MT
2054static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2055 uint64_t start, uint64_t length)
2056{
ec565b3c 2057 struct i915_page_table *unused;
4933d519 2058 uint32_t pde, temp;
1d2a314c 2059
4933d519 2060 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
79ab9370 2061 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
2062}
2063
5c5f6457 2064static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f
BW
2065{
2066 struct drm_device *dev = ppgtt->base.dev;
72e96d64
JL
2067 struct drm_i915_private *dev_priv = to_i915(dev);
2068 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
2069 int ret;
2070
72e96d64 2071 ppgtt->base.pte_encode = ggtt->base.pte_encode;
b146520f 2072 if (IS_GEN6(dev)) {
b146520f
BW
2073 ppgtt->switch_mm = gen6_mm_switch;
2074 } else if (IS_HASWELL(dev)) {
b146520f
BW
2075 ppgtt->switch_mm = hsw_mm_switch;
2076 } else if (IS_GEN7(dev)) {
b146520f
BW
2077 ppgtt->switch_mm = gen7_mm_switch;
2078 } else
2079 BUG();
2080
71ba2d64
YZ
2081 if (intel_vgpu_active(dev))
2082 ppgtt->switch_mm = vgpu_mm_switch;
2083
b146520f
BW
2084 ret = gen6_ppgtt_alloc(ppgtt);
2085 if (ret)
2086 return ret;
2087
5c5f6457 2088 ppgtt->base.allocate_va_range = gen6_alloc_va_range;
b146520f
BW
2089 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2090 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
777dc5bb
DV
2091 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2092 ppgtt->base.bind_vma = ppgtt_bind_vma;
b146520f 2093 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
b146520f 2094 ppgtt->base.start = 0;
09942c65 2095 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
87d60b63 2096 ppgtt->debug_dump = gen6_dump_ppgtt;
1d2a314c 2097
44159ddb 2098 ppgtt->pd.base.ggtt_offset =
07749ef3 2099 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1d2a314c 2100
72e96d64 2101 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
44159ddb 2102 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
678d96fb 2103
5c5f6457 2104 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1d2a314c 2105
678d96fb
BW
2106 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2107
440fd528 2108 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
2109 ppgtt->node.size >> 20,
2110 ppgtt->node.start / PAGE_SIZE);
3440d265 2111
fa76da34 2112 DRM_DEBUG("Adding PPGTT at offset %x\n",
44159ddb 2113 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 2114
b146520f 2115 return 0;
3440d265
DV
2116}
2117
5c5f6457 2118static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
3440d265 2119{
853ba5d2 2120 ppgtt->base.dev = dev;
3440d265 2121
3ed124b2 2122 if (INTEL_INFO(dev)->gen < 8)
5c5f6457 2123 return gen6_ppgtt_init(ppgtt);
3ed124b2 2124 else
d7b2633d 2125 return gen8_ppgtt_init(ppgtt);
fa76da34 2126}
c114f76a 2127
a2cad9df
MW
2128static void i915_address_space_init(struct i915_address_space *vm,
2129 struct drm_i915_private *dev_priv)
2130{
2131 drm_mm_init(&vm->mm, vm->start, vm->total);
2132 vm->dev = dev_priv->dev;
2133 INIT_LIST_HEAD(&vm->active_list);
2134 INIT_LIST_HEAD(&vm->inactive_list);
2135 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2136}
2137
d5165ebd
TG
2138static void gtt_write_workarounds(struct drm_device *dev)
2139{
2140 struct drm_i915_private *dev_priv = dev->dev_private;
2141
2142 /* This function is for gtt related workarounds. This function is
2143 * called on driver load and after a GPU reset, so you can place
2144 * workarounds here even if they get overwritten by GPU reset.
2145 */
2146 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2147 if (IS_BROADWELL(dev))
2148 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2149 else if (IS_CHERRYVIEW(dev))
2150 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2151 else if (IS_SKYLAKE(dev))
2152 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2153 else if (IS_BROXTON(dev))
2154 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2155}
2156
fa76da34
DV
2157int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2158{
2159 struct drm_i915_private *dev_priv = dev->dev_private;
2160 int ret = 0;
3ed124b2 2161
5c5f6457 2162 ret = __hw_ppgtt_init(dev, ppgtt);
fa76da34 2163 if (ret == 0) {
c7c48dfd 2164 kref_init(&ppgtt->ref);
a2cad9df 2165 i915_address_space_init(&ppgtt->base, dev_priv);
93bd8649 2166 }
1d2a314c
DV
2167
2168 return ret;
2169}
2170
82460d97
DV
2171int i915_ppgtt_init_hw(struct drm_device *dev)
2172{
d5165ebd
TG
2173 gtt_write_workarounds(dev);
2174
671b5013
TD
2175 /* In the case of execlists, PPGTT is enabled by the context descriptor
2176 * and the PDPs are contained within the context itself. We don't
2177 * need to do anything here. */
2178 if (i915.enable_execlists)
2179 return 0;
2180
82460d97
DV
2181 if (!USES_PPGTT(dev))
2182 return 0;
2183
2184 if (IS_GEN6(dev))
2185 gen6_ppgtt_enable(dev);
2186 else if (IS_GEN7(dev))
2187 gen7_ppgtt_enable(dev);
2188 else if (INTEL_INFO(dev)->gen >= 8)
2189 gen8_ppgtt_enable(dev);
2190 else
5f77eeb0 2191 MISSING_CASE(INTEL_INFO(dev)->gen);
82460d97 2192
4ad2fd88
JH
2193 return 0;
2194}
1d2a314c 2195
4d884705
DV
2196struct i915_hw_ppgtt *
2197i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2198{
2199 struct i915_hw_ppgtt *ppgtt;
2200 int ret;
2201
2202 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2203 if (!ppgtt)
2204 return ERR_PTR(-ENOMEM);
2205
2206 ret = i915_ppgtt_init(dev, ppgtt);
2207 if (ret) {
2208 kfree(ppgtt);
2209 return ERR_PTR(ret);
2210 }
2211
2212 ppgtt->file_priv = fpriv;
2213
198c974d
DCS
2214 trace_i915_ppgtt_create(&ppgtt->base);
2215
4d884705
DV
2216 return ppgtt;
2217}
2218
ee960be7
DV
2219void i915_ppgtt_release(struct kref *kref)
2220{
2221 struct i915_hw_ppgtt *ppgtt =
2222 container_of(kref, struct i915_hw_ppgtt, ref);
2223
198c974d
DCS
2224 trace_i915_ppgtt_release(&ppgtt->base);
2225
ee960be7
DV
2226 /* vmas should already be unbound */
2227 WARN_ON(!list_empty(&ppgtt->base.active_list));
2228 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2229
19dd120c
DV
2230 list_del(&ppgtt->base.global_link);
2231 drm_mm_takedown(&ppgtt->base.mm);
2232
ee960be7
DV
2233 ppgtt->base.cleanup(&ppgtt->base);
2234 kfree(ppgtt);
2235}
1d2a314c 2236
a81cc00c
BW
2237extern int intel_iommu_gfx_mapped;
2238/* Certain Gen5 chipsets require require idling the GPU before
2239 * unmapping anything from the GTT when VT-d is enabled.
2240 */
2c642b07 2241static bool needs_idle_maps(struct drm_device *dev)
a81cc00c
BW
2242{
2243#ifdef CONFIG_INTEL_IOMMU
2244 /* Query intel_iommu to see if we need the workaround. Presumably that
2245 * was loaded first.
2246 */
2247 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
2248 return true;
2249#endif
2250 return false;
2251}
2252
5c042287
BW
2253static bool do_idling(struct drm_i915_private *dev_priv)
2254{
72e96d64 2255 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287
BW
2256 bool ret = dev_priv->mm.interruptible;
2257
72e96d64 2258 if (unlikely(ggtt->do_idle_maps)) {
5c042287 2259 dev_priv->mm.interruptible = false;
b2da9fe5 2260 if (i915_gpu_idle(dev_priv->dev)) {
5c042287
BW
2261 DRM_ERROR("Couldn't idle GPU\n");
2262 /* Wait a bit, in hopes it avoids the hang */
2263 udelay(10);
2264 }
2265 }
2266
2267 return ret;
2268}
2269
2270static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2271{
72e96d64
JL
2272 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2273
2274 if (unlikely(ggtt->do_idle_maps))
5c042287
BW
2275 dev_priv->mm.interruptible = interruptible;
2276}
2277
828c7908
BW
2278void i915_check_and_clear_faults(struct drm_device *dev)
2279{
2280 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 2281 struct intel_engine_cs *engine;
828c7908
BW
2282
2283 if (INTEL_INFO(dev)->gen < 6)
2284 return;
2285
b4ac5afc 2286 for_each_engine(engine, dev_priv) {
828c7908 2287 u32 fault_reg;
e2f80391 2288 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2289 if (fault_reg & RING_FAULT_VALID) {
2290 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2291 "\tAddr: 0x%08lx\n"
828c7908
BW
2292 "\tAddress space: %s\n"
2293 "\tSource ID: %d\n"
2294 "\tType: %d\n",
2295 fault_reg & PAGE_MASK,
2296 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2297 RING_FAULT_SRCID(fault_reg),
2298 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2299 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2300 fault_reg & ~RING_FAULT_VALID);
2301 }
2302 }
4a570db5 2303 POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
828c7908
BW
2304}
2305
91e56499
CW
2306static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
2307{
2d1fe073 2308 if (INTEL_INFO(dev_priv)->gen < 6) {
91e56499
CW
2309 intel_gtt_chipset_flush();
2310 } else {
2311 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2312 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2313 }
2314}
2315
828c7908
BW
2316void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2317{
72e96d64
JL
2318 struct drm_i915_private *dev_priv = to_i915(dev);
2319 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2320
2321 /* Don't bother messing with faults pre GEN6 as we have little
2322 * documentation supporting that it's a good idea.
2323 */
2324 if (INTEL_INFO(dev)->gen < 6)
2325 return;
2326
2327 i915_check_and_clear_faults(dev);
2328
72e96d64
JL
2329 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2330 true);
91e56499
CW
2331
2332 i915_ggtt_flush(dev_priv);
828c7908
BW
2333}
2334
74163907 2335int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
7c2e6fdf 2336{
9da3da66
CW
2337 if (!dma_map_sg(&obj->base.dev->pdev->dev,
2338 obj->pages->sgl, obj->pages->nents,
2339 PCI_DMA_BIDIRECTIONAL))
2340 return -ENOSPC;
2341
2342 return 0;
7c2e6fdf
DV
2343}
2344
2c642b07 2345static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61
BW
2346{
2347#ifdef writeq
2348 writeq(pte, addr);
2349#else
2350 iowrite32((u32)pte, addr);
2351 iowrite32(pte >> 32, addr + 4);
2352#endif
2353}
2354
2355static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2356 struct sg_table *st,
782f1495 2357 uint64_t start,
24f3a8cf 2358 enum i915_cache_level level, u32 unused)
94ec8f61 2359{
72e96d64 2360 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2361 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495 2362 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3 2363 gen8_pte_t __iomem *gtt_entries =
72e96d64 2364 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
94ec8f61
BW
2365 int i = 0;
2366 struct sg_page_iter sg_iter;
57007df7 2367 dma_addr_t addr = 0; /* shut up gcc */
be69459a
ID
2368 int rpm_atomic_seq;
2369
2370 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
94ec8f61
BW
2371
2372 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2373 addr = sg_dma_address(sg_iter.sg) +
2374 (sg_iter.sg_pgoffset << PAGE_SHIFT);
2375 gen8_set_pte(&gtt_entries[i],
2376 gen8_pte_encode(addr, level, true));
2377 i++;
2378 }
2379
2380 /*
2381 * XXX: This serves as a posting read to make sure that the PTE has
2382 * actually been updated. There is some concern that even though
2383 * registers and PTEs are within the same BAR that they are potentially
2384 * of NUMA access patterns. Therefore, even with the way we assume
2385 * hardware should work, we must keep this posting read for paranoia.
2386 */
2387 if (i != 0)
2388 WARN_ON(readq(&gtt_entries[i-1])
2389 != gen8_pte_encode(addr, level, true));
2390
94ec8f61
BW
2391 /* This next bit makes the above posting read even more important. We
2392 * want to flush the TLBs only after we're certain all the PTE updates
2393 * have finished.
2394 */
2395 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2396 POSTING_READ(GFX_FLSH_CNTL_GEN6);
be69459a
ID
2397
2398 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
94ec8f61
BW
2399}
2400
c140330b
CW
2401struct insert_entries {
2402 struct i915_address_space *vm;
2403 struct sg_table *st;
2404 uint64_t start;
2405 enum i915_cache_level level;
2406 u32 flags;
2407};
2408
2409static int gen8_ggtt_insert_entries__cb(void *_arg)
2410{
2411 struct insert_entries *arg = _arg;
2412 gen8_ggtt_insert_entries(arg->vm, arg->st,
2413 arg->start, arg->level, arg->flags);
2414 return 0;
2415}
2416
2417static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2418 struct sg_table *st,
2419 uint64_t start,
2420 enum i915_cache_level level,
2421 u32 flags)
2422{
2423 struct insert_entries arg = { vm, st, start, level, flags };
2424 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2425}
2426
e76e9aeb
BW
2427/*
2428 * Binds an object into the global gtt with the specified cache level. The object
2429 * will be accessible to the GPU via commands whose operands reference offsets
2430 * within the global GTT as well as accessible by the GPU through the GMADR
2431 * mapped BAR (dev_priv->mm.gtt->gtt).
2432 */
853ba5d2 2433static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
7faf1ab2 2434 struct sg_table *st,
782f1495 2435 uint64_t start,
24f3a8cf 2436 enum i915_cache_level level, u32 flags)
e76e9aeb 2437{
72e96d64 2438 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2439 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495 2440 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3 2441 gen6_pte_t __iomem *gtt_entries =
72e96d64 2442 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
6e995e23
ID
2443 int i = 0;
2444 struct sg_page_iter sg_iter;
57007df7 2445 dma_addr_t addr = 0;
be69459a
ID
2446 int rpm_atomic_seq;
2447
2448 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
e76e9aeb 2449
6e995e23 2450 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2db76d7c 2451 addr = sg_page_iter_dma_address(&sg_iter);
24f3a8cf 2452 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
6e995e23 2453 i++;
e76e9aeb
BW
2454 }
2455
e76e9aeb
BW
2456 /* XXX: This serves as a posting read to make sure that the PTE has
2457 * actually been updated. There is some concern that even though
2458 * registers and PTEs are within the same BAR that they are potentially
2459 * of NUMA access patterns. Therefore, even with the way we assume
2460 * hardware should work, we must keep this posting read for paranoia.
2461 */
57007df7
PM
2462 if (i != 0) {
2463 unsigned long gtt = readl(&gtt_entries[i-1]);
2464 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2465 }
0f9b91c7
BW
2466
2467 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates
2469 * have finished.
2470 */
2471 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2472 POSTING_READ(GFX_FLSH_CNTL_GEN6);
be69459a
ID
2473
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
e76e9aeb
BW
2475}
2476
94ec8f61 2477static void gen8_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2478 uint64_t start,
2479 uint64_t length,
94ec8f61
BW
2480 bool use_scratch)
2481{
72e96d64 2482 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2483 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2484 unsigned first_entry = start >> PAGE_SHIFT;
2485 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2486 gen8_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2487 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2488 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61 2489 int i;
be69459a
ID
2490 int rpm_atomic_seq;
2491
2492 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
94ec8f61
BW
2493
2494 if (WARN(num_entries > max_entries,
2495 "First entry = %d; Num entries = %d (max=%d)\n",
2496 first_entry, num_entries, max_entries))
2497 num_entries = max_entries;
2498
c114f76a 2499 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
94ec8f61
BW
2500 I915_CACHE_LLC,
2501 use_scratch);
2502 for (i = 0; i < num_entries; i++)
2503 gen8_set_pte(&gtt_base[i], scratch_pte);
2504 readl(gtt_base);
be69459a
ID
2505
2506 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
94ec8f61
BW
2507}
2508
853ba5d2 2509static void gen6_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2510 uint64_t start,
2511 uint64_t length,
828c7908 2512 bool use_scratch)
7faf1ab2 2513{
72e96d64 2514 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2515 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2516 unsigned first_entry = start >> PAGE_SHIFT;
2517 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2518 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2519 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2520 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2 2521 int i;
be69459a
ID
2522 int rpm_atomic_seq;
2523
2524 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
7faf1ab2
DV
2525
2526 if (WARN(num_entries > max_entries,
2527 "First entry = %d; Num entries = %d (max=%d)\n",
2528 first_entry, num_entries, max_entries))
2529 num_entries = max_entries;
2530
c114f76a
MK
2531 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
2532 I915_CACHE_LLC, use_scratch, 0);
828c7908 2533
7faf1ab2
DV
2534 for (i = 0; i < num_entries; i++)
2535 iowrite32(scratch_pte, &gtt_base[i]);
2536 readl(gtt_base);
be69459a
ID
2537
2538 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
7faf1ab2
DV
2539}
2540
d369d2d9
DV
2541static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2542 struct sg_table *pages,
2543 uint64_t start,
2544 enum i915_cache_level cache_level, u32 unused)
7faf1ab2 2545{
be69459a 2546 struct drm_i915_private *dev_priv = vm->dev->dev_private;
7faf1ab2
DV
2547 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2548 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
be69459a
ID
2549 int rpm_atomic_seq;
2550
2551 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
7faf1ab2 2552
d369d2d9 2553 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
0875546c 2554
be69459a
ID
2555 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2556
7faf1ab2
DV
2557}
2558
853ba5d2 2559static void i915_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2560 uint64_t start,
2561 uint64_t length,
828c7908 2562 bool unused)
7faf1ab2 2563{
be69459a 2564 struct drm_i915_private *dev_priv = vm->dev->dev_private;
782f1495
BW
2565 unsigned first_entry = start >> PAGE_SHIFT;
2566 unsigned num_entries = length >> PAGE_SHIFT;
be69459a
ID
2567 int rpm_atomic_seq;
2568
2569 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2570
7faf1ab2 2571 intel_gtt_clear_range(first_entry, num_entries);
be69459a
ID
2572
2573 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
7faf1ab2
DV
2574}
2575
70b9f6f8
DV
2576static int ggtt_bind_vma(struct i915_vma *vma,
2577 enum i915_cache_level cache_level,
2578 u32 flags)
0a878716
DV
2579{
2580 struct drm_i915_gem_object *obj = vma->obj;
2581 u32 pte_flags = 0;
2582 int ret;
2583
2584 ret = i915_get_ggtt_vma_pages(vma);
2585 if (ret)
2586 return ret;
2587
2588 /* Currently applicable only to VLV */
2589 if (obj->gt_ro)
2590 pte_flags |= PTE_READ_ONLY;
2591
2592 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2593 vma->node.start,
2594 cache_level, pte_flags);
2595
2596 /*
2597 * Without aliasing PPGTT there's no difference between
2598 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2599 * upgrade to both bound if we bind either to avoid double-binding.
2600 */
2601 vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2602
2603 return 0;
2604}
2605
2606static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2607 enum i915_cache_level cache_level,
2608 u32 flags)
d5bd1449 2609{
321d178e 2610 u32 pte_flags;
70b9f6f8
DV
2611 int ret;
2612
2613 ret = i915_get_ggtt_vma_pages(vma);
2614 if (ret)
2615 return ret;
7faf1ab2 2616
24f3a8cf 2617 /* Currently applicable only to VLV */
321d178e
CW
2618 pte_flags = 0;
2619 if (vma->obj->gt_ro)
f329f5f6 2620 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2621
ec7adb6e 2622
0a878716 2623 if (flags & GLOBAL_BIND) {
321d178e
CW
2624 vma->vm->insert_entries(vma->vm,
2625 vma->ggtt_view.pages,
0875546c
DV
2626 vma->node.start,
2627 cache_level, pte_flags);
6f65e29a 2628 }
d5bd1449 2629
0a878716 2630 if (flags & LOCAL_BIND) {
321d178e
CW
2631 struct i915_hw_ppgtt *appgtt =
2632 to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
2633 appgtt->base.insert_entries(&appgtt->base,
2634 vma->ggtt_view.pages,
782f1495 2635 vma->node.start,
f329f5f6 2636 cache_level, pte_flags);
6f65e29a 2637 }
70b9f6f8
DV
2638
2639 return 0;
d5bd1449
CW
2640}
2641
6f65e29a 2642static void ggtt_unbind_vma(struct i915_vma *vma)
74163907 2643{
6f65e29a 2644 struct drm_device *dev = vma->vm->dev;
7faf1ab2 2645 struct drm_i915_private *dev_priv = dev->dev_private;
6f65e29a 2646 struct drm_i915_gem_object *obj = vma->obj;
06615ee5
JL
2647 const uint64_t size = min_t(uint64_t,
2648 obj->base.size,
2649 vma->node.size);
6f65e29a 2650
aff43766 2651 if (vma->bound & GLOBAL_BIND) {
782f1495
BW
2652 vma->vm->clear_range(vma->vm,
2653 vma->node.start,
06615ee5 2654 size,
6f65e29a 2655 true);
6f65e29a 2656 }
74898d7e 2657
0875546c 2658 if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
6f65e29a 2659 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
06615ee5 2660
6f65e29a 2661 appgtt->base.clear_range(&appgtt->base,
782f1495 2662 vma->node.start,
06615ee5 2663 size,
6f65e29a 2664 true);
6f65e29a 2665 }
74163907
DV
2666}
2667
2668void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
7c2e6fdf 2669{
5c042287
BW
2670 struct drm_device *dev = obj->base.dev;
2671 struct drm_i915_private *dev_priv = dev->dev_private;
2672 bool interruptible;
2673
2674 interruptible = do_idling(dev_priv);
2675
5ec5b516
ID
2676 dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
2677 PCI_DMA_BIDIRECTIONAL);
5c042287
BW
2678
2679 undo_idling(dev_priv, interruptible);
7c2e6fdf 2680}
644ec02b 2681
42d6ab48
CW
2682static void i915_gtt_color_adjust(struct drm_mm_node *node,
2683 unsigned long color,
440fd528
TR
2684 u64 *start,
2685 u64 *end)
42d6ab48
CW
2686{
2687 if (node->color != color)
2688 *start += 4096;
2689
2690 if (!list_empty(&node->node_list)) {
2691 node = list_entry(node->node_list.next,
2692 struct drm_mm_node,
2693 node_list);
2694 if (node->allocated && node->color != color)
2695 *end -= 4096;
2696 }
2697}
fbe5d36e 2698
f548c0e9 2699static int i915_gem_setup_global_gtt(struct drm_device *dev,
088e0df4
MT
2700 u64 start,
2701 u64 mappable_end,
2702 u64 end)
644ec02b 2703{
e78891ca
BW
2704 /* Let GEM Manage all of the aperture.
2705 *
2706 * However, leave one page at the end still bound to the scratch page.
2707 * There are a number of places where the hardware apparently prefetches
2708 * past the end of the object, and we've seen multiple hangs with the
2709 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2710 * aperture. One page should be enough to keep any prefetching inside
2711 * of the aperture.
2712 */
72e96d64
JL
2713 struct drm_i915_private *dev_priv = to_i915(dev);
2714 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452
CW
2715 struct drm_mm_node *entry;
2716 struct drm_i915_gem_object *obj;
2717 unsigned long hole_start, hole_end;
fa76da34 2718 int ret;
644ec02b 2719
35451cb6
BW
2720 BUG_ON(mappable_end > end);
2721
72e96d64 2722 ggtt->base.start = start;
5dda8fa3 2723
a2cad9df
MW
2724 /* Subtract the guard page before address space initialization to
2725 * shrink the range used by drm_mm */
72e96d64
JL
2726 ggtt->base.total = end - start - PAGE_SIZE;
2727 i915_address_space_init(&ggtt->base, dev_priv);
2728 ggtt->base.total += PAGE_SIZE;
5dda8fa3
YZ
2729
2730 if (intel_vgpu_active(dev)) {
2731 ret = intel_vgt_balloon(dev);
2732 if (ret)
2733 return ret;
2734 }
2735
42d6ab48 2736 if (!HAS_LLC(dev))
72e96d64 2737 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
644ec02b 2738
ed2f3452 2739 /* Mark any preallocated objects as occupied */
35c20a60 2740 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
72e96d64 2741 struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
fa76da34 2742
088e0df4 2743 DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
c6cfb325
BW
2744 i915_gem_obj_ggtt_offset(obj), obj->base.size);
2745
2746 WARN_ON(i915_gem_obj_ggtt_bound(obj));
72e96d64 2747 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
6c5566a8
DV
2748 if (ret) {
2749 DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2750 return ret;
2751 }
aff43766 2752 vma->bound |= GLOBAL_BIND;
d0710abb 2753 __i915_vma_set_map_and_fenceable(vma);
72e96d64 2754 list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
ed2f3452
CW
2755 }
2756
ed2f3452 2757 /* Clear any non-preallocated blocks */
72e96d64 2758 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2759 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2760 hole_start, hole_end);
72e96d64 2761 ggtt->base.clear_range(&ggtt->base, hole_start,
782f1495 2762 hole_end - hole_start, true);
ed2f3452
CW
2763 }
2764
2765 /* And finally clear the reserved guard page */
72e96d64 2766 ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
6c5566a8 2767
fa76da34
DV
2768 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
2769 struct i915_hw_ppgtt *ppgtt;
2770
2771 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2772 if (!ppgtt)
2773 return -ENOMEM;
2774
5c5f6457
DV
2775 ret = __hw_ppgtt_init(dev, ppgtt);
2776 if (ret) {
2777 ppgtt->base.cleanup(&ppgtt->base);
2778 kfree(ppgtt);
2779 return ret;
2780 }
2781
2782 if (ppgtt->base.allocate_va_range)
2783 ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2784 ppgtt->base.total);
4933d519 2785 if (ret) {
061dd493 2786 ppgtt->base.cleanup(&ppgtt->base);
4933d519 2787 kfree(ppgtt);
fa76da34 2788 return ret;
4933d519 2789 }
fa76da34 2790
5c5f6457
DV
2791 ppgtt->base.clear_range(&ppgtt->base,
2792 ppgtt->base.start,
2793 ppgtt->base.total,
2794 true);
2795
fa76da34 2796 dev_priv->mm.aliasing_ppgtt = ppgtt;
72e96d64
JL
2797 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2798 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
fa76da34
DV
2799 }
2800
6c5566a8 2801 return 0;
e76e9aeb
BW
2802}
2803
d85489d3
JL
2804/**
2805 * i915_gem_init_ggtt - Initialize GEM for Global GTT
2806 * @dev: DRM device
2807 */
2808void i915_gem_init_ggtt(struct drm_device *dev)
d7e5008f 2809{
72e96d64
JL
2810 struct drm_i915_private *dev_priv = to_i915(dev);
2811 struct i915_ggtt *ggtt = &dev_priv->ggtt;
d7e5008f 2812
72e96d64 2813 i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
e76e9aeb
BW
2814}
2815
d85489d3
JL
2816/**
2817 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2818 * @dev: DRM device
2819 */
2820void i915_ggtt_cleanup_hw(struct drm_device *dev)
90d0a0e8 2821{
72e96d64
JL
2822 struct drm_i915_private *dev_priv = to_i915(dev);
2823 struct i915_ggtt *ggtt = &dev_priv->ggtt;
90d0a0e8 2824
70e32544
DV
2825 if (dev_priv->mm.aliasing_ppgtt) {
2826 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2827
2828 ppgtt->base.cleanup(&ppgtt->base);
2829 }
2830
a4eba47b
ID
2831 i915_gem_cleanup_stolen(dev);
2832
72e96d64 2833 if (drm_mm_initialized(&ggtt->base.mm)) {
5dda8fa3
YZ
2834 if (intel_vgpu_active(dev))
2835 intel_vgt_deballoon();
2836
72e96d64
JL
2837 drm_mm_takedown(&ggtt->base.mm);
2838 list_del(&ggtt->base.global_link);
90d0a0e8
DV
2839 }
2840
72e96d64 2841 ggtt->base.cleanup(&ggtt->base);
90d0a0e8 2842}
70e32544 2843
2c642b07 2844static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2845{
2846 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2847 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2848 return snb_gmch_ctl << 20;
2849}
2850
2c642b07 2851static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2852{
2853 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2854 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2855 if (bdw_gmch_ctl)
2856 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2857
2858#ifdef CONFIG_X86_32
2859 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2860 if (bdw_gmch_ctl > 4)
2861 bdw_gmch_ctl = 4;
2862#endif
2863
9459d252
BW
2864 return bdw_gmch_ctl << 20;
2865}
2866
2c642b07 2867static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2868{
2869 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2870 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2871
2872 if (gmch_ctrl)
2873 return 1 << (20 + gmch_ctrl);
2874
2875 return 0;
2876}
2877
2c642b07 2878static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2879{
2880 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2881 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2882 return snb_gmch_ctl << 25; /* 32 MB units */
2883}
2884
2c642b07 2885static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2886{
2887 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2888 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2889 return bdw_gmch_ctl << 25; /* 32 MB units */
2890}
2891
d7f25f23
DL
2892static size_t chv_get_stolen_size(u16 gmch_ctrl)
2893{
2894 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2895 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2896
2897 /*
2898 * 0x0 to 0x10: 32MB increments starting at 0MB
2899 * 0x11 to 0x16: 4MB increments starting at 8MB
2900 * 0x17 to 0x1d: 4MB increments start at 36MB
2901 */
2902 if (gmch_ctrl < 0x11)
2903 return gmch_ctrl << 25;
2904 else if (gmch_ctrl < 0x17)
2905 return (gmch_ctrl - 0x11 + 2) << 22;
2906 else
2907 return (gmch_ctrl - 0x17 + 9) << 22;
2908}
2909
66375014
DL
2910static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2911{
2912 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2913 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2914
2915 if (gen9_gmch_ctl < 0xf0)
2916 return gen9_gmch_ctl << 25; /* 32 MB units */
2917 else
2918 /* 4MB increments starting at 0xf0 for 4MB */
2919 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2920}
2921
63340133
BW
2922static int ggtt_probe_common(struct drm_device *dev,
2923 size_t gtt_size)
2924{
72e96d64
JL
2925 struct drm_i915_private *dev_priv = to_i915(dev);
2926 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4ad2af1e 2927 struct i915_page_scratch *scratch_page;
72e96d64 2928 phys_addr_t ggtt_phys_addr;
63340133
BW
2929
2930 /* For Modern GENs the PTEs and register space are split in the BAR */
72e96d64
JL
2931 ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
2932 (pci_resource_len(dev->pdev, 0) / 2);
63340133 2933
2a073f89
ID
2934 /*
2935 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2936 * dropped. For WC mappings in general we have 64 byte burst writes
2937 * when the WC buffer is flushed, so we can't use it, but have to
2938 * resort to an uncached mapping. The WC issue is easily caught by the
2939 * readback check when writing GTT PTE entries.
2940 */
2941 if (IS_BROXTON(dev))
72e96d64 2942 ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
2a073f89 2943 else
72e96d64
JL
2944 ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
2945 if (!ggtt->gsm) {
63340133
BW
2946 DRM_ERROR("Failed to map the gtt page table\n");
2947 return -ENOMEM;
2948 }
2949
4ad2af1e
MK
2950 scratch_page = alloc_scratch_page(dev);
2951 if (IS_ERR(scratch_page)) {
63340133
BW
2952 DRM_ERROR("Scratch setup failed\n");
2953 /* iounmap will also get called at remove, but meh */
72e96d64 2954 iounmap(ggtt->gsm);
4ad2af1e 2955 return PTR_ERR(scratch_page);
63340133
BW
2956 }
2957
72e96d64 2958 ggtt->base.scratch_page = scratch_page;
4ad2af1e
MK
2959
2960 return 0;
63340133
BW
2961}
2962
fbe5d36e
BW
2963/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2964 * bits. When using advanced contexts each context stores its own PAT, but
2965 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 2966static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 2967{
fbe5d36e
BW
2968 uint64_t pat;
2969
2970 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2971 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2972 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2973 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2974 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2975 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2976 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2977 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2978
2d1fe073 2979 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
2980 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2981 * so RTL will always use the value corresponding to
2982 * pat_sel = 000".
2983 * So let's disable cache for GGTT to avoid screen corruptions.
2984 * MOCS still can be used though.
2985 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2986 * before this patch, i.e. the same uncached + snooping access
2987 * like on gen6/7 seems to be in effect.
2988 * - So this just fixes blitter/render access. Again it looks
2989 * like it's not just uncached access, but uncached + snooping.
2990 * So we can still hold onto all our assumptions wrt cpu
2991 * clflushing on LLC machines.
2992 */
2993 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2994
fbe5d36e
BW
2995 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2996 * write would work. */
7e435ad2
VS
2997 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2998 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
2999}
3000
ee0ce478
VS
3001static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3002{
3003 uint64_t pat;
3004
3005 /*
3006 * Map WB on BDW to snooped on CHV.
3007 *
3008 * Only the snoop bit has meaning for CHV, the rest is
3009 * ignored.
3010 *
cf3d262e
VS
3011 * The hardware will never snoop for certain types of accesses:
3012 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3013 * - PPGTT page tables
3014 * - some other special cycles
3015 *
3016 * As with BDW, we also need to consider the following for GT accesses:
3017 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3018 * so RTL will always use the value corresponding to
3019 * pat_sel = 000".
3020 * Which means we must set the snoop bit in PAT entry 0
3021 * in order to keep the global status page working.
ee0ce478
VS
3022 */
3023 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3024 GEN8_PPAT(1, 0) |
3025 GEN8_PPAT(2, 0) |
3026 GEN8_PPAT(3, 0) |
3027 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3028 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3029 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3030 GEN8_PPAT(7, CHV_PPAT_SNOOP);
3031
7e435ad2
VS
3032 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3033 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
3034}
3035
d507d735 3036static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 3037{
d507d735 3038 struct drm_device *dev = ggtt->base.dev;
72e96d64 3039 struct drm_i915_private *dev_priv = to_i915(dev);
63340133
BW
3040 u16 snb_gmch_ctl;
3041 int ret;
3042
3043 /* TODO: We're not aware of mappable constraints on gen8 yet */
d507d735
JL
3044 ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
3045 ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
63340133
BW
3046
3047 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
3048 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
3049
3050 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3051
66375014 3052 if (INTEL_INFO(dev)->gen >= 9) {
d507d735
JL
3053 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
3054 ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
66375014 3055 } else if (IS_CHERRYVIEW(dev)) {
d507d735
JL
3056 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
3057 ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3058 } else {
d507d735
JL
3059 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
3060 ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3061 }
63340133 3062
d507d735 3063 ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 3064
5a4e33a3 3065 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
ee0ce478
VS
3066 chv_setup_private_ppat(dev_priv);
3067 else
3068 bdw_setup_private_ppat(dev_priv);
fbe5d36e 3069
d507d735 3070 ret = ggtt_probe_common(dev, ggtt->size);
63340133 3071
d507d735 3072 ggtt->base.clear_range = gen8_ggtt_clear_range;
c140330b 3073 if (IS_CHERRYVIEW(dev_priv))
d507d735
JL
3074 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3075 else
3076 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3077 ggtt->base.bind_vma = ggtt_bind_vma;
3078 ggtt->base.unbind_vma = ggtt_unbind_vma;
3079
63340133
BW
3080 return ret;
3081}
3082
d507d735 3083static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 3084{
d507d735 3085 struct drm_device *dev = ggtt->base.dev;
e76e9aeb 3086 u16 snb_gmch_ctl;
e76e9aeb
BW
3087 int ret;
3088
d507d735
JL
3089 ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
3090 ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
41907ddc 3091
baa09f5f
BW
3092 /* 64/512MB is the current min/max we actually know of, but this is just
3093 * a coarse sanity check.
e76e9aeb 3094 */
d507d735
JL
3095 if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
3096 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 3097 return -ENXIO;
e76e9aeb
BW
3098 }
3099
e76e9aeb
BW
3100 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
3101 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
e76e9aeb 3102 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 3103
d507d735
JL
3104 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
3105 ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
3106 ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 3107
d507d735 3108 ret = ggtt_probe_common(dev, ggtt->size);
e76e9aeb 3109
d507d735
JL
3110 ggtt->base.clear_range = gen6_ggtt_clear_range;
3111 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3112 ggtt->base.bind_vma = ggtt_bind_vma;
3113 ggtt->base.unbind_vma = ggtt_unbind_vma;
7faf1ab2 3114
e76e9aeb
BW
3115 return ret;
3116}
3117
853ba5d2 3118static void gen6_gmch_remove(struct i915_address_space *vm)
e76e9aeb 3119{
62106b4f 3120 struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
853ba5d2 3121
62106b4f 3122 iounmap(ggtt->gsm);
4ad2af1e 3123 free_scratch_page(vm->dev, vm->scratch_page);
644ec02b 3124}
baa09f5f 3125
d507d735 3126static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 3127{
d507d735 3128 struct drm_device *dev = ggtt->base.dev;
72e96d64 3129 struct drm_i915_private *dev_priv = to_i915(dev);
baa09f5f
BW
3130 int ret;
3131
baa09f5f
BW
3132 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
3133 if (!ret) {
3134 DRM_ERROR("failed to set up gmch\n");
3135 return -EIO;
3136 }
3137
d507d735
JL
3138 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
3139 &ggtt->mappable_base, &ggtt->mappable_end);
baa09f5f 3140
d507d735
JL
3141 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
3142 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3143 ggtt->base.clear_range = i915_ggtt_clear_range;
3144 ggtt->base.bind_vma = ggtt_bind_vma;
3145 ggtt->base.unbind_vma = ggtt_unbind_vma;
baa09f5f 3146
d507d735 3147 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
3148 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3149
baa09f5f
BW
3150 return 0;
3151}
3152
853ba5d2 3153static void i915_gmch_remove(struct i915_address_space *vm)
baa09f5f
BW
3154{
3155 intel_gmch_remove();
3156}
3157
d85489d3
JL
3158/**
3159 * i915_ggtt_init_hw - Initialize GGTT hardware
3160 * @dev: DRM device
3161 */
3162int i915_ggtt_init_hw(struct drm_device *dev)
baa09f5f 3163{
72e96d64 3164 struct drm_i915_private *dev_priv = to_i915(dev);
62106b4f 3165 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
3166 int ret;
3167
baa09f5f 3168 if (INTEL_INFO(dev)->gen <= 5) {
62106b4f
JL
3169 ggtt->probe = i915_gmch_probe;
3170 ggtt->base.cleanup = i915_gmch_remove;
63340133 3171 } else if (INTEL_INFO(dev)->gen < 8) {
62106b4f
JL
3172 ggtt->probe = gen6_gmch_probe;
3173 ggtt->base.cleanup = gen6_gmch_remove;
3accaf7e
MK
3174
3175 if (HAS_EDRAM(dev))
62106b4f 3176 ggtt->base.pte_encode = iris_pte_encode;
4d15c145 3177 else if (IS_HASWELL(dev))
62106b4f 3178 ggtt->base.pte_encode = hsw_pte_encode;
b2f21b4d 3179 else if (IS_VALLEYVIEW(dev))
62106b4f 3180 ggtt->base.pte_encode = byt_pte_encode;
350ec881 3181 else if (INTEL_INFO(dev)->gen >= 7)
62106b4f 3182 ggtt->base.pte_encode = ivb_pte_encode;
b2f21b4d 3183 else
62106b4f 3184 ggtt->base.pte_encode = snb_pte_encode;
63340133 3185 } else {
62106b4f
JL
3186 ggtt->probe = gen8_gmch_probe;
3187 ggtt->base.cleanup = gen6_gmch_remove;
baa09f5f
BW
3188 }
3189
62106b4f
JL
3190 ggtt->base.dev = dev;
3191 ggtt->base.is_ggtt = true;
c114f76a 3192
d507d735 3193 ret = ggtt->probe(ggtt);
a54c0c27 3194 if (ret)
baa09f5f 3195 return ret;
baa09f5f 3196
c890e2d5
CW
3197 if ((ggtt->base.total - 1) >> 32) {
3198 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3199 "of address space! Found %lldM!\n",
3200 ggtt->base.total >> 20);
3201 ggtt->base.total = 1ULL << 32;
3202 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3203 }
3204
a4eba47b
ID
3205 /*
3206 * Initialise stolen early so that we may reserve preallocated
3207 * objects for the BIOS to KMS transition.
3208 */
3209 ret = i915_gem_init_stolen(dev);
3210 if (ret)
3211 goto out_gtt_cleanup;
3212
baa09f5f 3213 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3214 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3215 ggtt->base.total >> 20);
3216 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3217 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
5db6c735
DV
3218#ifdef CONFIG_INTEL_IOMMU
3219 if (intel_iommu_gfx_mapped)
3220 DRM_INFO("VT-d active for gfx access\n");
3221#endif
cfa7c862
DV
3222 /*
3223 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3224 * user's requested state against the hardware/driver capabilities. We
3225 * do this now so that we can print out any log messages once rather
3226 * than every time we check intel_enable_ppgtt().
3227 */
3228 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3229 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
baa09f5f
BW
3230
3231 return 0;
a4eba47b
ID
3232
3233out_gtt_cleanup:
72e96d64 3234 ggtt->base.cleanup(&ggtt->base);
a4eba47b
ID
3235
3236 return ret;
baa09f5f 3237}
6f65e29a 3238
fa42331b
DV
3239void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3240{
72e96d64
JL
3241 struct drm_i915_private *dev_priv = to_i915(dev);
3242 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fa42331b 3243 struct drm_i915_gem_object *obj;
2c3d9984
TU
3244 struct i915_vma *vma;
3245 bool flush;
fa42331b
DV
3246
3247 i915_check_and_clear_faults(dev);
3248
3249 /* First fill our portion of the GTT with scratch pages */
72e96d64
JL
3250 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
3251 true);
fa42331b 3252
2c3d9984 3253 /* Cache flush objects bound into GGTT and rebind them. */
fa42331b 3254 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2c3d9984 3255 flush = false;
1c7f4bca 3256 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3257 if (vma->vm != &ggtt->base)
2c3d9984 3258 continue;
fa42331b 3259
2c3d9984
TU
3260 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3261 PIN_UPDATE));
fa42331b 3262
2c3d9984
TU
3263 flush = true;
3264 }
3265
3266 if (flush)
3267 i915_gem_clflush_object(obj, obj->pin_display);
3268 }
fa42331b
DV
3269
3270 if (INTEL_INFO(dev)->gen >= 8) {
3271 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3272 chv_setup_private_ppat(dev_priv);
3273 else
3274 bdw_setup_private_ppat(dev_priv);
3275
3276 return;
3277 }
3278
3279 if (USES_PPGTT(dev)) {
72e96d64
JL
3280 struct i915_address_space *vm;
3281
fa42331b
DV
3282 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3283 /* TODO: Perhaps it shouldn't be gen6 specific */
3284
e5716f55 3285 struct i915_hw_ppgtt *ppgtt;
fa42331b 3286
e5716f55 3287 if (vm->is_ggtt)
fa42331b 3288 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3289 else
3290 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b
DV
3291
3292 gen6_write_page_range(dev_priv, &ppgtt->pd,
3293 0, ppgtt->base.total);
3294 }
3295 }
3296
3297 i915_ggtt_flush(dev_priv);
3298}
3299
ec7adb6e
JL
3300static struct i915_vma *
3301__i915_gem_vma_create(struct drm_i915_gem_object *obj,
3302 struct i915_address_space *vm,
3303 const struct i915_ggtt_view *ggtt_view)
6f65e29a 3304{
dabde5c7 3305 struct i915_vma *vma;
6f65e29a 3306
ec7adb6e
JL
3307 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3308 return ERR_PTR(-EINVAL);
e20d2ab7
CW
3309
3310 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
dabde5c7
DC
3311 if (vma == NULL)
3312 return ERR_PTR(-ENOMEM);
ec7adb6e 3313
1c7f4bca
CW
3314 INIT_LIST_HEAD(&vma->vm_link);
3315 INIT_LIST_HEAD(&vma->obj_link);
6f65e29a
BW
3316 INIT_LIST_HEAD(&vma->exec_list);
3317 vma->vm = vm;
3318 vma->obj = obj;
596c5923 3319 vma->is_ggtt = i915_is_ggtt(vm);
6f65e29a 3320
777dc5bb 3321 if (i915_is_ggtt(vm))
ec7adb6e 3322 vma->ggtt_view = *ggtt_view;
596c5923
CW
3323 else
3324 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
6f65e29a 3325
1c7f4bca 3326 list_add_tail(&vma->obj_link, &obj->vma_list);
6f65e29a
BW
3327
3328 return vma;
3329}
3330
3331struct i915_vma *
ec7adb6e
JL
3332i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3333 struct i915_address_space *vm)
3334{
3335 struct i915_vma *vma;
3336
3337 vma = i915_gem_obj_to_vma(obj, vm);
3338 if (!vma)
3339 vma = __i915_gem_vma_create(obj, vm,
3340 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
3341
3342 return vma;
3343}
3344
3345struct i915_vma *
3346i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
fe14d5f4 3347 const struct i915_ggtt_view *view)
6f65e29a 3348{
72e96d64
JL
3349 struct drm_device *dev = obj->base.dev;
3350 struct drm_i915_private *dev_priv = to_i915(dev);
3351 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ade7daa1 3352 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
ec7adb6e 3353
6f65e29a 3354 if (!vma)
72e96d64 3355 vma = __i915_gem_vma_create(obj, &ggtt->base, view);
6f65e29a
BW
3356
3357 return vma;
ec7adb6e 3358
6f65e29a 3359}
fe14d5f4 3360
804beb4b 3361static struct scatterlist *
2d7f3bdb 3362rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3363 unsigned int width, unsigned int height,
87130255 3364 unsigned int stride,
804beb4b 3365 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3366{
3367 unsigned int column, row;
3368 unsigned int src_idx;
50470bb0 3369
50470bb0 3370 for (column = 0; column < width; column++) {
87130255 3371 src_idx = stride * (height - 1) + column;
50470bb0
TU
3372 for (row = 0; row < height; row++) {
3373 st->nents++;
3374 /* We don't need the pages, but need to initialize
3375 * the entries so the sg list can be happily traversed.
3376 * The only thing we need are DMA addresses.
3377 */
3378 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3379 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3380 sg_dma_len(sg) = PAGE_SIZE;
3381 sg = sg_next(sg);
87130255 3382 src_idx -= stride;
50470bb0
TU
3383 }
3384 }
804beb4b
TU
3385
3386 return sg;
50470bb0
TU
3387}
3388
3389static struct sg_table *
11d23e6f 3390intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
50470bb0
TU
3391 struct drm_i915_gem_object *obj)
3392{
1663b9d6 3393 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
89e3e142 3394 unsigned int size_pages_uv;
50470bb0
TU
3395 struct sg_page_iter sg_iter;
3396 unsigned long i;
3397 dma_addr_t *page_addr_list;
3398 struct sg_table *st;
89e3e142
TU
3399 unsigned int uv_start_page;
3400 struct scatterlist *sg;
1d00dad5 3401 int ret = -ENOMEM;
50470bb0 3402
50470bb0 3403 /* Allocate a temporary list of source pages for random access. */
f2a85e19
CW
3404 page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
3405 sizeof(dma_addr_t),
3406 GFP_TEMPORARY);
50470bb0
TU
3407 if (!page_addr_list)
3408 return ERR_PTR(ret);
3409
89e3e142
TU
3410 /* Account for UV plane with NV12. */
3411 if (rot_info->pixel_format == DRM_FORMAT_NV12)
1663b9d6 3412 size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
89e3e142
TU
3413 else
3414 size_pages_uv = 0;
3415
50470bb0
TU
3416 /* Allocate target SG list. */
3417 st = kmalloc(sizeof(*st), GFP_KERNEL);
3418 if (!st)
3419 goto err_st_alloc;
3420
89e3e142 3421 ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
50470bb0
TU
3422 if (ret)
3423 goto err_sg_alloc;
3424
3425 /* Populate source page list from the object. */
3426 i = 0;
3427 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
3428 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
3429 i++;
3430 }
3431
11f20322
VS
3432 st->nents = 0;
3433 sg = st->sgl;
3434
50470bb0 3435 /* Rotate the pages. */
89e3e142 3436 sg = rotate_pages(page_addr_list, 0,
1663b9d6
VS
3437 rot_info->plane[0].width, rot_info->plane[0].height,
3438 rot_info->plane[0].width,
11f20322 3439 st, sg);
50470bb0 3440
89e3e142
TU
3441 /* Append the UV plane if NV12. */
3442 if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3443 uv_start_page = size_pages;
3444
3445 /* Check for tile-row un-alignment. */
3446 if (offset_in_page(rot_info->uv_offset))
3447 uv_start_page--;
3448
dedf278c
TU
3449 rot_info->uv_start_page = uv_start_page;
3450
11f20322
VS
3451 sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
3452 rot_info->plane[1].width, rot_info->plane[1].height,
3453 rot_info->plane[1].width,
3454 st, sg);
89e3e142
TU
3455 }
3456
1663b9d6
VS
3457 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
3458 obj->base.size, rot_info->plane[0].width,
3459 rot_info->plane[0].height, size_pages + size_pages_uv,
89e3e142 3460 size_pages);
50470bb0
TU
3461
3462 drm_free_large(page_addr_list);
3463
3464 return st;
3465
3466err_sg_alloc:
3467 kfree(st);
3468err_st_alloc:
3469 drm_free_large(page_addr_list);
3470
1663b9d6
VS
3471 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
3472 obj->base.size, ret, rot_info->plane[0].width,
3473 rot_info->plane[0].height, size_pages + size_pages_uv,
89e3e142 3474 size_pages);
50470bb0
TU
3475 return ERR_PTR(ret);
3476}
ec7adb6e 3477
8bd7ef16
JL
3478static struct sg_table *
3479intel_partial_pages(const struct i915_ggtt_view *view,
3480 struct drm_i915_gem_object *obj)
3481{
3482 struct sg_table *st;
3483 struct scatterlist *sg;
3484 struct sg_page_iter obj_sg_iter;
3485 int ret = -ENOMEM;
3486
3487 st = kmalloc(sizeof(*st), GFP_KERNEL);
3488 if (!st)
3489 goto err_st_alloc;
3490
3491 ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3492 if (ret)
3493 goto err_sg_alloc;
3494
3495 sg = st->sgl;
3496 st->nents = 0;
3497 for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3498 view->params.partial.offset)
3499 {
3500 if (st->nents >= view->params.partial.size)
3501 break;
3502
3503 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3504 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3505 sg_dma_len(sg) = PAGE_SIZE;
3506
3507 sg = sg_next(sg);
3508 st->nents++;
3509 }
3510
3511 return st;
3512
3513err_sg_alloc:
3514 kfree(st);
3515err_st_alloc:
3516 return ERR_PTR(ret);
3517}
3518
70b9f6f8 3519static int
50470bb0 3520i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3521{
50470bb0
TU
3522 int ret = 0;
3523
fe14d5f4
TU
3524 if (vma->ggtt_view.pages)
3525 return 0;
3526
3527 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3528 vma->ggtt_view.pages = vma->obj->pages;
50470bb0
TU
3529 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3530 vma->ggtt_view.pages =
11d23e6f 3531 intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
8bd7ef16
JL
3532 else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3533 vma->ggtt_view.pages =
3534 intel_partial_pages(&vma->ggtt_view, vma->obj);
fe14d5f4
TU
3535 else
3536 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3537 vma->ggtt_view.type);
3538
3539 if (!vma->ggtt_view.pages) {
ec7adb6e 3540 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
fe14d5f4 3541 vma->ggtt_view.type);
50470bb0
TU
3542 ret = -EINVAL;
3543 } else if (IS_ERR(vma->ggtt_view.pages)) {
3544 ret = PTR_ERR(vma->ggtt_view.pages);
3545 vma->ggtt_view.pages = NULL;
3546 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3547 vma->ggtt_view.type, ret);
fe14d5f4
TU
3548 }
3549
50470bb0 3550 return ret;
fe14d5f4
TU
3551}
3552
3553/**
3554 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3555 * @vma: VMA to map
3556 * @cache_level: mapping cache level
3557 * @flags: flags like global or local mapping
3558 *
3559 * DMA addresses are taken from the scatter-gather table of this object (or of
3560 * this VMA in case of non-default GGTT views) and PTE entries set up.
3561 * Note that DMA addresses are also the only part of the SG table we care about.
3562 */
3563int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3564 u32 flags)
3565{
75d04a37
MK
3566 int ret;
3567 u32 bind_flags;
1d335d1b 3568
75d04a37
MK
3569 if (WARN_ON(flags == 0))
3570 return -EINVAL;
1d335d1b 3571
75d04a37 3572 bind_flags = 0;
0875546c
DV
3573 if (flags & PIN_GLOBAL)
3574 bind_flags |= GLOBAL_BIND;
3575 if (flags & PIN_USER)
3576 bind_flags |= LOCAL_BIND;
3577
3578 if (flags & PIN_UPDATE)
3579 bind_flags |= vma->bound;
3580 else
3581 bind_flags &= ~vma->bound;
3582
75d04a37
MK
3583 if (bind_flags == 0)
3584 return 0;
3585
3586 if (vma->bound == 0 && vma->vm->allocate_va_range) {
b2dd4511
MK
3587 /* XXX: i915_vma_pin() will fix this +- hack */
3588 vma->pin_count++;
596c5923 3589 trace_i915_va_alloc(vma);
75d04a37
MK
3590 ret = vma->vm->allocate_va_range(vma->vm,
3591 vma->node.start,
3592 vma->node.size);
b2dd4511 3593 vma->pin_count--;
75d04a37
MK
3594 if (ret)
3595 return ret;
3596 }
3597
3598 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
70b9f6f8
DV
3599 if (ret)
3600 return ret;
0875546c
DV
3601
3602 vma->bound |= bind_flags;
fe14d5f4
TU
3603
3604 return 0;
3605}
91e6711e
JL
3606
3607/**
3608 * i915_ggtt_view_size - Get the size of a GGTT view.
3609 * @obj: Object the view is of.
3610 * @view: The view in question.
3611 *
3612 * @return The size of the GGTT view in bytes.
3613 */
3614size_t
3615i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3616 const struct i915_ggtt_view *view)
3617{
9e759ff1 3618 if (view->type == I915_GGTT_VIEW_NORMAL) {
91e6711e 3619 return obj->base.size;
9e759ff1 3620 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
1663b9d6 3621 return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
8bd7ef16
JL
3622 } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3623 return view->params.partial.size << PAGE_SHIFT;
91e6711e
JL
3624 } else {
3625 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3626 return obj->base.size;
3627 }
3628}
8ef8561f
CW
3629
3630void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3631{
3632 void __iomem *ptr;
3633
3634 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3635 if (WARN_ON(!vma->obj->map_and_fenceable))
3636 return ERR_PTR(-ENODEV);
3637
3638 GEM_BUG_ON(!vma->is_ggtt);
3639 GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
3640
3641 ptr = vma->iomap;
3642 if (ptr == NULL) {
3643 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
3644 vma->node.start,
3645 vma->node.size);
3646 if (ptr == NULL)
3647 return ERR_PTR(-ENOMEM);
3648
3649 vma->iomap = ptr;
3650 }
3651
3652 vma->pin_count++;
3653 return ptr;
3654}
This page took 0.63504 seconds and 5 git commands to generate.