2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool mutex_is_locked_by(struct mutex
*mutex
, struct task_struct
*task
)
40 if (!mutex_is_locked(mutex
))
43 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
44 return mutex
->owner
== task
;
46 /* Since UP may be pre-empted, we cannot assume that we own the lock */
51 static int num_vma_bound(struct drm_i915_gem_object
*obj
)
56 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
57 if (drm_mm_node_allocated(&vma
->node
))
66 static bool swap_available(void)
68 return get_nr_swap_pages() > 0;
71 static bool can_release_pages(struct drm_i915_gem_object
*obj
)
73 /* Only shmemfs objects are backed by swap */
77 /* Only report true if by unbinding the object and putting its pages
78 * we can actually make forward progress towards freeing physical
81 * If the pages are pinned for any other reason than being bound
82 * to the GPU, simply unbinding from the GPU is not going to succeed
83 * in releasing our pin count on the pages themselves.
85 if (obj
->pages_pin_count
!= num_vma_bound(obj
))
88 /* We can only return physical pages to the system if we can either
89 * discard the contents (because the user has marked them as being
90 * purgeable) or if we can move their contents out to swap.
92 return swap_available() || obj
->madv
== I915_MADV_DONTNEED
;
96 * i915_gem_shrink - Shrink buffer object caches
97 * @dev_priv: i915 device
98 * @target: amount of memory to make available, in pages
99 * @flags: control flags for selecting cache types
101 * This function is the main interface to the shrinker. It will try to release
102 * up to @target pages of main memory backing storage from buffer objects.
103 * Selection of the specific caches can be done with @flags. This is e.g. useful
104 * when purgeable objects should be removed from caches preferentially.
106 * Note that it's not guaranteed that released amount is actually available as
107 * free system memory - the pages might still be in-used to due to other reasons
108 * (like cpu mmaps) or the mm core has reused them before we could grab them.
109 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
110 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
112 * Also note that any kind of pinning (both per-vma address space pins and
113 * backing storage pins at the buffer object level) result in the shrinker code
114 * having to skip the object.
117 * The number of pages of backing storage actually released.
120 i915_gem_shrink(struct drm_i915_private
*dev_priv
,
121 unsigned long target
, unsigned flags
)
124 struct list_head
*list
;
127 { &dev_priv
->mm
.unbound_list
, I915_SHRINK_UNBOUND
},
128 { &dev_priv
->mm
.bound_list
, I915_SHRINK_BOUND
},
131 unsigned long count
= 0;
133 trace_i915_gem_shrink(dev_priv
, target
, flags
);
134 i915_gem_retire_requests(dev_priv
);
137 * Unbinding of objects will require HW access; Let us not wake the
138 * device just to recover a little memory. If absolutely necessary,
139 * we will force the wake during oom-notifier.
141 if ((flags
& I915_SHRINK_BOUND
) &&
142 !intel_runtime_pm_get_if_in_use(dev_priv
))
143 flags
&= ~I915_SHRINK_BOUND
;
146 * As we may completely rewrite the (un)bound list whilst unbinding
147 * (due to retiring requests) we have to strictly process only
148 * one element of the list at the time, and recheck the list
149 * on every iteration.
151 * In particular, we must hold a reference whilst removing the
152 * object as we may end up waiting for and/or retiring the objects.
153 * This might release the final reference (held by the active list)
154 * and result in the object being freed from under us. This is
155 * similar to the precautions the eviction code must take whilst
158 * Also note that although these lists do not hold a reference to
159 * the object we can safely grab one here: The final object
160 * unreferencing and the bound_list are both protected by the
161 * dev->struct_mutex and so we won't ever be able to observe an
162 * object on the bound_list with a reference count equals 0.
164 for (phase
= phases
; phase
->list
; phase
++) {
165 struct list_head still_in_list
;
167 if ((flags
& phase
->bit
) == 0)
170 INIT_LIST_HEAD(&still_in_list
);
171 while (count
< target
&& !list_empty(phase
->list
)) {
172 struct drm_i915_gem_object
*obj
;
173 struct i915_vma
*vma
, *v
;
175 obj
= list_first_entry(phase
->list
,
176 typeof(*obj
), global_list
);
177 list_move_tail(&obj
->global_list
, &still_in_list
);
179 if (flags
& I915_SHRINK_PURGEABLE
&&
180 obj
->madv
!= I915_MADV_DONTNEED
)
183 if (flags
& I915_SHRINK_VMAPS
&&
184 !is_vmalloc_addr(obj
->mapping
))
187 if ((flags
& I915_SHRINK_ACTIVE
) == 0 && obj
->active
)
190 if (!can_release_pages(obj
))
193 drm_gem_object_reference(&obj
->base
);
195 /* For the unbound phase, this should be a no-op! */
196 list_for_each_entry_safe(vma
, v
,
197 &obj
->vma_list
, obj_link
)
198 if (i915_vma_unbind(vma
))
201 if (i915_gem_object_put_pages(obj
) == 0)
202 count
+= obj
->base
.size
>> PAGE_SHIFT
;
204 drm_gem_object_unreference(&obj
->base
);
206 list_splice(&still_in_list
, phase
->list
);
209 if (flags
& I915_SHRINK_BOUND
)
210 intel_runtime_pm_put(dev_priv
);
212 i915_gem_retire_requests(dev_priv
);
218 * i915_gem_shrink_all - Shrink buffer object caches completely
219 * @dev_priv: i915 device
221 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
222 * caches completely. It also first waits for and retires all outstanding
223 * requests to also be able to release backing storage for active objects.
225 * This should only be used in code to intentionally quiescent the gpu or as a
226 * last-ditch effort when memory seems to have run out.
229 * The number of pages of backing storage actually released.
231 unsigned long i915_gem_shrink_all(struct drm_i915_private
*dev_priv
)
233 return i915_gem_shrink(dev_priv
, -1UL,
235 I915_SHRINK_UNBOUND
|
239 static bool i915_gem_shrinker_lock(struct drm_device
*dev
, bool *unlock
)
241 if (!mutex_trylock(&dev
->struct_mutex
)) {
242 if (!mutex_is_locked_by(&dev
->struct_mutex
, current
))
245 if (to_i915(dev
)->mm
.shrinker_no_lock_stealing
)
256 i915_gem_shrinker_count(struct shrinker
*shrinker
, struct shrink_control
*sc
)
258 struct drm_i915_private
*dev_priv
=
259 container_of(shrinker
, struct drm_i915_private
, mm
.shrinker
);
260 struct drm_device
*dev
= &dev_priv
->drm
;
261 struct drm_i915_gem_object
*obj
;
265 if (!i915_gem_shrinker_lock(dev
, &unlock
))
268 i915_gem_retire_requests(dev_priv
);
271 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
)
272 if (can_release_pages(obj
))
273 count
+= obj
->base
.size
>> PAGE_SHIFT
;
275 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
276 if (!obj
->active
&& can_release_pages(obj
))
277 count
+= obj
->base
.size
>> PAGE_SHIFT
;
281 mutex_unlock(&dev
->struct_mutex
);
287 i915_gem_shrinker_scan(struct shrinker
*shrinker
, struct shrink_control
*sc
)
289 struct drm_i915_private
*dev_priv
=
290 container_of(shrinker
, struct drm_i915_private
, mm
.shrinker
);
291 struct drm_device
*dev
= &dev_priv
->drm
;
295 if (!i915_gem_shrinker_lock(dev
, &unlock
))
298 freed
= i915_gem_shrink(dev_priv
,
301 I915_SHRINK_UNBOUND
|
302 I915_SHRINK_PURGEABLE
);
303 if (freed
< sc
->nr_to_scan
)
304 freed
+= i915_gem_shrink(dev_priv
,
305 sc
->nr_to_scan
- freed
,
307 I915_SHRINK_UNBOUND
);
309 mutex_unlock(&dev
->struct_mutex
);
314 struct shrinker_lock_uninterruptible
{
315 bool was_interruptible
;
320 i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private
*dev_priv
,
321 struct shrinker_lock_uninterruptible
*slu
,
324 unsigned long timeout
= msecs_to_jiffies(timeout_ms
) + 1;
326 while (!i915_gem_shrinker_lock(&dev_priv
->drm
, &slu
->unlock
)) {
327 schedule_timeout_killable(1);
328 if (fatal_signal_pending(current
))
330 if (--timeout
== 0) {
331 pr_err("Unable to lock GPU to purge memory.\n");
336 slu
->was_interruptible
= dev_priv
->mm
.interruptible
;
337 dev_priv
->mm
.interruptible
= false;
342 i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private
*dev_priv
,
343 struct shrinker_lock_uninterruptible
*slu
)
345 dev_priv
->mm
.interruptible
= slu
->was_interruptible
;
347 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
351 i915_gem_shrinker_oom(struct notifier_block
*nb
, unsigned long event
, void *ptr
)
353 struct drm_i915_private
*dev_priv
=
354 container_of(nb
, struct drm_i915_private
, mm
.oom_notifier
);
355 struct shrinker_lock_uninterruptible slu
;
356 struct drm_i915_gem_object
*obj
;
357 unsigned long unevictable
, bound
, unbound
, freed_pages
;
359 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv
, &slu
, 5000))
362 intel_runtime_pm_get(dev_priv
);
363 freed_pages
= i915_gem_shrink_all(dev_priv
);
364 intel_runtime_pm_put(dev_priv
);
366 /* Because we may be allocating inside our own driver, we cannot
367 * assert that there are no objects with pinned pages that are not
368 * being pointed to by hardware.
370 unbound
= bound
= unevictable
= 0;
371 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
372 if (!can_release_pages(obj
))
373 unevictable
+= obj
->base
.size
>> PAGE_SHIFT
;
375 unbound
+= obj
->base
.size
>> PAGE_SHIFT
;
377 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
378 if (!can_release_pages(obj
))
379 unevictable
+= obj
->base
.size
>> PAGE_SHIFT
;
381 bound
+= obj
->base
.size
>> PAGE_SHIFT
;
384 i915_gem_shrinker_unlock_uninterruptible(dev_priv
, &slu
);
386 if (freed_pages
|| unbound
|| bound
)
387 pr_info("Purging GPU memory, %lu pages freed, "
388 "%lu pages still pinned.\n",
389 freed_pages
, unevictable
);
390 if (unbound
|| bound
)
391 pr_err("%lu and %lu pages still available in the "
392 "bound and unbound GPU page lists.\n",
395 *(unsigned long *)ptr
+= freed_pages
;
400 i915_gem_shrinker_vmap(struct notifier_block
*nb
, unsigned long event
, void *ptr
)
402 struct drm_i915_private
*dev_priv
=
403 container_of(nb
, struct drm_i915_private
, mm
.vmap_notifier
);
404 struct shrinker_lock_uninterruptible slu
;
405 struct i915_vma
*vma
, *next
;
406 unsigned long freed_pages
= 0;
409 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv
, &slu
, 5000))
412 /* Force everything onto the inactive lists */
413 ret
= i915_gem_wait_for_idle(dev_priv
);
417 intel_runtime_pm_get(dev_priv
);
418 freed_pages
+= i915_gem_shrink(dev_priv
, -1UL,
420 I915_SHRINK_UNBOUND
|
423 intel_runtime_pm_put(dev_priv
);
425 /* We also want to clear any cached iomaps as they wrap vmap */
426 list_for_each_entry_safe(vma
, next
,
427 &dev_priv
->ggtt
.base
.inactive_list
, vm_link
) {
428 unsigned long count
= vma
->node
.size
>> PAGE_SHIFT
;
429 if (vma
->iomap
&& i915_vma_unbind(vma
) == 0)
430 freed_pages
+= count
;
434 i915_gem_shrinker_unlock_uninterruptible(dev_priv
, &slu
);
436 *(unsigned long *)ptr
+= freed_pages
;
441 * i915_gem_shrinker_init - Initialize i915 shrinker
442 * @dev_priv: i915 device
444 * This function registers and sets up the i915 shrinker and OOM handler.
446 void i915_gem_shrinker_init(struct drm_i915_private
*dev_priv
)
448 dev_priv
->mm
.shrinker
.scan_objects
= i915_gem_shrinker_scan
;
449 dev_priv
->mm
.shrinker
.count_objects
= i915_gem_shrinker_count
;
450 dev_priv
->mm
.shrinker
.seeks
= DEFAULT_SEEKS
;
451 WARN_ON(register_shrinker(&dev_priv
->mm
.shrinker
));
453 dev_priv
->mm
.oom_notifier
.notifier_call
= i915_gem_shrinker_oom
;
454 WARN_ON(register_oom_notifier(&dev_priv
->mm
.oom_notifier
));
456 dev_priv
->mm
.vmap_notifier
.notifier_call
= i915_gem_shrinker_vmap
;
457 WARN_ON(register_vmap_purge_notifier(&dev_priv
->mm
.vmap_notifier
));
461 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
462 * @dev_priv: i915 device
464 * This function unregisters the i915 shrinker and OOM handler.
466 void i915_gem_shrinker_cleanup(struct drm_i915_private
*dev_priv
)
468 WARN_ON(unregister_vmap_purge_notifier(&dev_priv
->mm
.vmap_notifier
));
469 WARN_ON(unregister_oom_notifier(&dev_priv
->mm
.oom_notifier
));
470 unregister_shrinker(&dev_priv
->mm
.shrinker
);