4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
7 #include <linux/memcontrol.h>
8 #include <linux/writeback.h>
9 #include <linux/pagemap.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/swap.h>
19 * Per zone, two clock lists are maintained for file pages: the
20 * inactive and the active list. Freshly faulted pages start out at
21 * the head of the inactive list and page reclaim scans pages from the
22 * tail. Pages that are accessed multiple times on the inactive list
23 * are promoted to the active list, to protect them from reclaim,
24 * whereas active pages are demoted to the inactive list when the
25 * active list grows too big.
27 * fault ------------------------+
29 * +--------------+ | +-------------+
30 * reclaim <- | inactive | <-+-- demotion | active | <--+
31 * +--------------+ +-------------+ |
33 * +-------------- promotion ------------------+
36 * Access frequency and refault distance
38 * A workload is thrashing when its pages are frequently used but they
39 * are evicted from the inactive list every time before another access
40 * would have promoted them to the active list.
42 * In cases where the average access distance between thrashing pages
43 * is bigger than the size of memory there is nothing that can be
44 * done - the thrashing set could never fit into memory under any
47 * However, the average access distance could be bigger than the
48 * inactive list, yet smaller than the size of memory. In this case,
49 * the set could fit into memory if it weren't for the currently
50 * active pages - which may be used more, hopefully less frequently:
52 * +-memory available to cache-+
54 * +-inactive------+-active----+
55 * a b | c d e f g h i | J K L M N |
56 * +---------------+-----------+
58 * It is prohibitively expensive to accurately track access frequency
59 * of pages. But a reasonable approximation can be made to measure
60 * thrashing on the inactive list, after which refaulting pages can be
61 * activated optimistically to compete with the existing active pages.
63 * Approximating inactive page access frequency - Observations:
65 * 1. When a page is accessed for the first time, it is added to the
66 * head of the inactive list, slides every existing inactive page
67 * towards the tail by one slot, and pushes the current tail page
70 * 2. When a page is accessed for the second time, it is promoted to
71 * the active list, shrinking the inactive list by one slot. This
72 * also slides all inactive pages that were faulted into the cache
73 * more recently than the activated page towards the tail of the
78 * 1. The sum of evictions and activations between any two points in
79 * time indicate the minimum number of inactive pages accessed in
82 * 2. Moving one inactive page N page slots towards the tail of the
83 * list requires at least N inactive page accesses.
87 * 1. When a page is finally evicted from memory, the number of
88 * inactive pages accessed while the page was in cache is at least
89 * the number of page slots on the inactive list.
91 * 2. In addition, measuring the sum of evictions and activations (E)
92 * at the time of a page's eviction, and comparing it to another
93 * reading (R) at the time the page faults back into memory tells
94 * the minimum number of accesses while the page was not cached.
95 * This is called the refault distance.
97 * Because the first access of the page was the fault and the second
98 * access the refault, we combine the in-cache distance with the
99 * out-of-cache distance to get the complete minimum access distance
102 * NR_inactive + (R - E)
104 * And knowing the minimum access distance of a page, we can easily
105 * tell if the page would be able to stay in cache assuming all page
106 * slots in the cache were available:
108 * NR_inactive + (R - E) <= NR_inactive + NR_active
110 * which can be further simplified to
112 * (R - E) <= NR_active
114 * Put into words, the refault distance (out-of-cache) can be seen as
115 * a deficit in inactive list space (in-cache). If the inactive list
116 * had (R - E) more page slots, the page would not have been evicted
117 * in between accesses, but activated instead. And on a full system,
118 * the only thing eating into inactive list space is active pages.
121 * Activating refaulting pages
123 * All that is known about the active list is that the pages have been
124 * accessed more than once in the past. This means that at any given
125 * time there is actually a good chance that pages on the active list
126 * are no longer in active use.
128 * So when a refault distance of (R - E) is observed and there are at
129 * least (R - E) active pages, the refaulting page is activated
130 * optimistically in the hope that (R - E) active pages are actually
131 * used less frequently than the refaulting page - or even not used at
134 * If this is wrong and demotion kicks in, the pages which are truly
135 * used more frequently will be reactivated while the less frequently
136 * used once will be evicted from memory.
138 * But if this is right, the stale pages will be pushed out of memory
139 * and the used pages get to stay in cache.
144 * For each zone's file LRU lists, a counter for inactive evictions
145 * and activations is maintained (zone->inactive_age).
147 * On eviction, a snapshot of this counter (along with some bits to
148 * identify the zone) is stored in the now empty page cache radix tree
149 * slot of the evicted page. This is called a shadow entry.
151 * On cache misses for which there are shadow entries, an eligible
152 * refault distance will immediately activate the refaulting page.
155 #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
156 ZONES_SHIFT + NODES_SHIFT)
157 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
160 * Eviction timestamps need to be able to cover the full range of
161 * actionable refaults. However, bits are tight in the radix tree
162 * entry, and after storing the identifier for the lruvec there might
163 * not be enough left to represent every single actionable refault. In
164 * that case, we have to sacrifice granularity for distance, and group
165 * evictions into coarser buckets by shaving off lower timestamp bits.
167 static unsigned int bucket_order __read_mostly
;
169 static void *pack_shadow(unsigned long eviction
, struct zone
*zone
)
171 eviction
>>= bucket_order
;
172 eviction
= (eviction
<< NODES_SHIFT
) | zone_to_nid(zone
);
173 eviction
= (eviction
<< ZONES_SHIFT
) | zone_idx(zone
);
174 eviction
= (eviction
<< RADIX_TREE_EXCEPTIONAL_SHIFT
);
176 return (void *)(eviction
| RADIX_TREE_EXCEPTIONAL_ENTRY
);
179 static void unpack_shadow(void *shadow
, struct zone
**zonep
,
180 unsigned long *evictionp
)
182 unsigned long entry
= (unsigned long)shadow
;
185 entry
>>= RADIX_TREE_EXCEPTIONAL_SHIFT
;
186 zid
= entry
& ((1UL << ZONES_SHIFT
) - 1);
187 entry
>>= ZONES_SHIFT
;
188 nid
= entry
& ((1UL << NODES_SHIFT
) - 1);
189 entry
>>= NODES_SHIFT
;
191 *zonep
= NODE_DATA(nid
)->node_zones
+ zid
;
192 *evictionp
= entry
<< bucket_order
;
196 * workingset_eviction - note the eviction of a page from memory
197 * @mapping: address space the page was backing
198 * @page: the page being evicted
200 * Returns a shadow entry to be stored in @mapping->page_tree in place
201 * of the evicted @page so that a later refault can be detected.
203 void *workingset_eviction(struct address_space
*mapping
, struct page
*page
)
205 struct zone
*zone
= page_zone(page
);
206 unsigned long eviction
;
208 eviction
= atomic_long_inc_return(&zone
->inactive_age
);
209 return pack_shadow(eviction
, zone
);
213 * workingset_refault - evaluate the refault of a previously evicted page
214 * @shadow: shadow entry of the evicted page
216 * Calculates and evaluates the refault distance of the previously
217 * evicted page in the context of the zone it was allocated in.
219 * Returns %true if the page should be activated, %false otherwise.
221 bool workingset_refault(void *shadow
)
223 unsigned long refault_distance
;
224 unsigned long eviction
;
225 unsigned long refault
;
228 unpack_shadow(shadow
, &zone
, &eviction
);
230 refault
= atomic_long_read(&zone
->inactive_age
);
233 * The unsigned subtraction here gives an accurate distance
234 * across inactive_age overflows in most cases.
236 * There is a special case: usually, shadow entries have a
237 * short lifetime and are either refaulted or reclaimed along
238 * with the inode before they get too old. But it is not
239 * impossible for the inactive_age to lap a shadow entry in
240 * the field, which can then can result in a false small
241 * refault distance, leading to a false activation should this
242 * old entry actually refault again. However, earlier kernels
243 * used to deactivate unconditionally with *every* reclaim
244 * invocation for the longest time, so the occasional
245 * inappropriate activation leading to pressure on the active
246 * list is not a problem.
248 refault_distance
= (refault
- eviction
) & EVICTION_MASK
;
250 inc_zone_state(zone
, WORKINGSET_REFAULT
);
252 if (refault_distance
<= zone_page_state(zone
, NR_ACTIVE_FILE
)) {
253 inc_zone_state(zone
, WORKINGSET_ACTIVATE
);
260 * workingset_activation - note a page activation
261 * @page: page that is being activated
263 void workingset_activation(struct page
*page
)
265 atomic_long_inc(&page_zone(page
)->inactive_age
);
269 * Shadow entries reflect the share of the working set that does not
270 * fit into memory, so their number depends on the access pattern of
271 * the workload. In most cases, they will refault or get reclaimed
272 * along with the inode, but a (malicious) workload that streams
273 * through files with a total size several times that of available
274 * memory, while preventing the inodes from being reclaimed, can
275 * create excessive amounts of shadow nodes. To keep a lid on this,
276 * track shadow nodes and reclaim them when they grow way past the
277 * point where they would still be useful.
280 struct list_lru workingset_shadow_nodes
;
282 static unsigned long count_shadow_nodes(struct shrinker
*shrinker
,
283 struct shrink_control
*sc
)
285 unsigned long shadow_nodes
;
286 unsigned long max_nodes
;
289 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
291 shadow_nodes
= list_lru_shrink_count(&workingset_shadow_nodes
, sc
);
294 pages
= node_present_pages(sc
->nid
);
296 * Active cache pages are limited to 50% of memory, and shadow
297 * entries that represent a refault distance bigger than that
298 * do not have any effect. Limit the number of shadow nodes
299 * such that shadow entries do not exceed the number of active
300 * cache pages, assuming a worst-case node population density
301 * of 1/8th on average.
303 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
304 * each, this will reclaim shadow entries when they consume
305 * ~2% of available memory:
307 * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
309 max_nodes
= pages
>> (1 + RADIX_TREE_MAP_SHIFT
- 3);
311 if (shadow_nodes
<= max_nodes
)
314 return shadow_nodes
- max_nodes
;
317 static enum lru_status
shadow_lru_isolate(struct list_head
*item
,
318 struct list_lru_one
*lru
,
319 spinlock_t
*lru_lock
,
322 struct address_space
*mapping
;
323 struct radix_tree_node
*node
;
328 * Page cache insertions and deletions synchroneously maintain
329 * the shadow node LRU under the mapping->tree_lock and the
330 * lru_lock. Because the page cache tree is emptied before
331 * the inode can be destroyed, holding the lru_lock pins any
332 * address_space that has radix tree nodes on the LRU.
334 * We can then safely transition to the mapping->tree_lock to
335 * pin only the address_space of the particular node we want
336 * to reclaim, take the node off-LRU, and drop the lru_lock.
339 node
= container_of(item
, struct radix_tree_node
, private_list
);
340 mapping
= node
->private_data
;
342 /* Coming from the list, invert the lock order */
343 if (!spin_trylock(&mapping
->tree_lock
)) {
344 spin_unlock(lru_lock
);
349 list_lru_isolate(lru
, item
);
350 spin_unlock(lru_lock
);
353 * The nodes should only contain one or more shadow entries,
354 * no pages, so we expect to be able to remove them all and
355 * delete and free the empty node afterwards.
358 BUG_ON(!node
->count
);
359 BUG_ON(node
->count
& RADIX_TREE_COUNT_MASK
);
361 for (i
= 0; i
< RADIX_TREE_MAP_SIZE
; i
++) {
362 if (node
->slots
[i
]) {
363 BUG_ON(!radix_tree_exceptional_entry(node
->slots
[i
]));
364 node
->slots
[i
] = NULL
;
365 BUG_ON(node
->count
< (1U << RADIX_TREE_COUNT_SHIFT
));
366 node
->count
-= 1U << RADIX_TREE_COUNT_SHIFT
;
367 BUG_ON(!mapping
->nrexceptional
);
368 mapping
->nrexceptional
--;
372 inc_zone_state(page_zone(virt_to_page(node
)), WORKINGSET_NODERECLAIM
);
373 if (!__radix_tree_delete_node(&mapping
->page_tree
, node
))
376 spin_unlock(&mapping
->tree_lock
);
377 ret
= LRU_REMOVED_RETRY
;
386 static unsigned long scan_shadow_nodes(struct shrinker
*shrinker
,
387 struct shrink_control
*sc
)
391 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
393 ret
= list_lru_shrink_walk(&workingset_shadow_nodes
, sc
,
394 shadow_lru_isolate
, NULL
);
399 static struct shrinker workingset_shadow_shrinker
= {
400 .count_objects
= count_shadow_nodes
,
401 .scan_objects
= scan_shadow_nodes
,
402 .seeks
= DEFAULT_SEEKS
,
403 .flags
= SHRINKER_NUMA_AWARE
,
407 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
408 * mapping->tree_lock.
410 static struct lock_class_key shadow_nodes_key
;
412 static int __init
workingset_init(void)
414 unsigned int timestamp_bits
;
415 unsigned int max_order
;
418 BUILD_BUG_ON(BITS_PER_LONG
< EVICTION_SHIFT
);
420 * Calculate the eviction bucket size to cover the longest
421 * actionable refault distance, which is currently half of
422 * memory (totalram_pages/2). However, memory hotplug may add
423 * some more pages at runtime, so keep working with up to
424 * double the initial memory by using totalram_pages as-is.
426 timestamp_bits
= BITS_PER_LONG
- EVICTION_SHIFT
;
427 max_order
= fls_long(totalram_pages
- 1);
428 if (max_order
> timestamp_bits
)
429 bucket_order
= max_order
- timestamp_bits
;
430 printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
431 timestamp_bits
, max_order
, bucket_order
);
433 ret
= list_lru_init_key(&workingset_shadow_nodes
, &shadow_nodes_key
);
436 ret
= register_shrinker(&workingset_shadow_shrinker
);
441 list_lru_destroy(&workingset_shadow_nodes
);
445 module_init(workingset_init
);