2 * Copyright (c) 2012, Microsoft Corporation.
5 * K. Y. Srinivasan <kys@microsoft.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
35 #include <linux/hyperv.h>
38 * We begin with definitions supporting the Dynamic Memory protocol
41 * Begin protocol definitions.
47 * Protocol versions. The low word is the minor version, the high word the major
52 * Changed to 0.1 on 2009/03/25
53 * Changes to 0.2 on 2009/05/14
54 * Changes to 0.3 on 2009/12/03
55 * Changed to 1.0 on 2011/04/05
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
63 DYNMEM_PROTOCOL_VERSION_1
= DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2
= DYNMEM_MAKE_VERSION(1, 0),
66 DYNMEM_PROTOCOL_VERSION_WIN7
= DYNMEM_PROTOCOL_VERSION_1
,
67 DYNMEM_PROTOCOL_VERSION_WIN8
= DYNMEM_PROTOCOL_VERSION_2
,
69 DYNMEM_PROTOCOL_VERSION_CURRENT
= DYNMEM_PROTOCOL_VERSION_WIN8
78 enum dm_message_type
{
83 DM_VERSION_REQUEST
= 1,
84 DM_VERSION_RESPONSE
= 2,
85 DM_CAPABILITIES_REPORT
= 3,
86 DM_CAPABILITIES_RESPONSE
= 4,
88 DM_BALLOON_REQUEST
= 6,
89 DM_BALLOON_RESPONSE
= 7,
90 DM_UNBALLOON_REQUEST
= 8,
91 DM_UNBALLOON_RESPONSE
= 9,
92 DM_MEM_HOT_ADD_REQUEST
= 10,
93 DM_MEM_HOT_ADD_RESPONSE
= 11,
94 DM_VERSION_03_MAX
= 11,
104 * Structures defining the dynamic memory management
122 * To support guests that may have alignment
123 * limitations on hot-add, the guest can specify
124 * its alignment requirements; a value of n
125 * represents an alignment of 2^n in mega bytes.
127 __u64 hot_add_alignment
:4;
133 union dm_mem_page_range
{
136 * The PFN number of the first page in the range.
137 * 40 bits is the architectural limit of a PFN
142 * The number of pages in the range.
152 * The header for all dynamic memory messages:
154 * type: Type of the message.
155 * size: Size of the message in bytes; including the header.
156 * trans_id: The guest is responsible for manufacturing this ID.
166 * A generic message format for dynamic memory.
167 * Specific message formats are defined later in the file.
171 struct dm_header hdr
;
172 __u8 data
[]; /* enclosed message */
177 * Specific message types supporting the dynamic memory protocol.
181 * Version negotiation message. Sent from the guest to the host.
182 * The guest is free to try different versions until the host
183 * accepts the version.
185 * dm_version: The protocol version requested.
186 * is_last_attempt: If TRUE, this is the last version guest will request.
187 * reservedz: Reserved field, set to zero.
190 struct dm_version_request
{
191 struct dm_header hdr
;
192 union dm_version version
;
193 __u32 is_last_attempt
:1;
198 * Version response message; Host to Guest and indicates
199 * if the host has accepted the version sent by the guest.
201 * is_accepted: If TRUE, host has accepted the version and the guest
202 * should proceed to the next stage of the protocol. FALSE indicates that
203 * guest should re-try with a different version.
205 * reservedz: Reserved field, set to zero.
208 struct dm_version_response
{
209 struct dm_header hdr
;
215 * Message reporting capabilities. This is sent from the guest to the
219 struct dm_capabilities
{
220 struct dm_header hdr
;
223 __u64 max_page_number
;
227 * Response to the capabilities message. This is sent from the host to the
228 * guest. This message notifies if the host has accepted the guest's
229 * capabilities. If the host has not accepted, the guest must shutdown
232 * is_accepted: Indicates if the host has accepted guest's capabilities.
233 * reservedz: Must be 0.
236 struct dm_capabilities_resp_msg
{
237 struct dm_header hdr
;
243 * This message is used to report memory pressure from the guest.
244 * This message is not part of any transaction and there is no
245 * response to this message.
247 * num_avail: Available memory in pages.
248 * num_committed: Committed memory in pages.
249 * page_file_size: The accumulated size of all page files
250 * in the system in pages.
251 * zero_free: The nunber of zero and free pages.
252 * page_file_writes: The writes to the page file in pages.
253 * io_diff: An indicator of file cache efficiency or page file activity,
254 * calculated as File Cache Page Fault Count - Page Read Count.
255 * This value is in pages.
257 * Some of these metrics are Windows specific and fortunately
258 * the algorithm on the host side that computes the guest memory
259 * pressure only uses num_committed value.
263 struct dm_header hdr
;
266 __u64 page_file_size
;
268 __u32 page_file_writes
;
274 * Message to ask the guest to allocate memory - balloon up message.
275 * This message is sent from the host to the guest. The guest may not be
276 * able to allocate as much memory as requested.
278 * num_pages: number of pages to allocate.
282 struct dm_header hdr
;
289 * Balloon response message; this message is sent from the guest
290 * to the host in response to the balloon message.
292 * reservedz: Reserved; must be set to zero.
293 * more_pages: If FALSE, this is the last message of the transaction.
294 * if TRUE there will atleast one more message from the guest.
296 * range_count: The number of ranges in the range array.
298 * range_array: An array of page ranges returned to the host.
302 struct dm_balloon_response
{
303 struct dm_header hdr
;
306 __u32 range_count
:31;
307 union dm_mem_page_range range_array
[];
311 * Un-balloon message; this message is sent from the host
312 * to the guest to give guest more memory.
314 * more_pages: If FALSE, this is the last message of the transaction.
315 * if TRUE there will atleast one more message from the guest.
317 * reservedz: Reserved; must be set to zero.
319 * range_count: The number of ranges in the range array.
321 * range_array: An array of page ranges returned to the host.
325 struct dm_unballoon_request
{
326 struct dm_header hdr
;
330 union dm_mem_page_range range_array
[];
334 * Un-balloon response message; this message is sent from the guest
335 * to the host in response to an unballoon request.
339 struct dm_unballoon_response
{
340 struct dm_header hdr
;
345 * Hot add request message. Message sent from the host to the guest.
347 * mem_range: Memory range to hot add.
349 * On Linux we currently don't support this since we cannot hot add
350 * arbitrary granularity of memory.
354 struct dm_header hdr
;
355 union dm_mem_page_range range
;
359 * Hot add response message.
360 * This message is sent by the guest to report the status of a hot add request.
361 * If page_count is less than the requested page count, then the host should
362 * assume all further hot add requests will fail, since this indicates that
363 * the guest has hit an upper physical memory barrier.
365 * Hot adds may also fail due to low resources; in this case, the guest must
366 * not complete this message until the hot add can succeed, and the host must
367 * not send a new hot add request until the response is sent.
368 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
369 * times it fails the request.
372 * page_count: number of pages that were successfully hot added.
374 * result: result of the operation 1: success, 0: failure.
378 struct dm_hot_add_response
{
379 struct dm_header hdr
;
385 * Types of information sent from host to the guest.
389 INFO_TYPE_MAX_PAGE_CNT
= 0,
395 * Header for the information message.
398 struct dm_info_header
{
399 enum dm_info_type type
;
404 * This message is sent from the host to the guest to pass
405 * some relevant information (win8 addition).
408 * info_size: size of the information blob.
409 * info: information blob.
413 struct dm_header hdr
;
420 * End protocol definitions.
424 * State to manage hot adding memory into the guest.
425 * The range start_pfn : end_pfn specifies the range
426 * that the host has asked us to hot add. The range
427 * start_pfn : ha_end_pfn specifies the range that we have
428 * currently hot added. We hot add in multiples of 128M
429 * chunks; it is possible that we may not be able to bring
430 * online all the pages in the region. The range
431 * covered_end_pfn defines the pages that can
435 struct hv_hotadd_state
{
436 struct list_head list
;
437 unsigned long start_pfn
;
438 unsigned long covered_end_pfn
;
439 unsigned long ha_end_pfn
;
440 unsigned long end_pfn
;
443 struct balloon_state
{
445 struct work_struct wrk
;
449 union dm_mem_page_range ha_page_range
;
450 union dm_mem_page_range ha_region_range
;
451 struct work_struct wrk
;
454 static bool hot_add
= true;
455 static bool do_hot_add
;
457 * Delay reporting memory pressure by
458 * the specified number of seconds.
460 static uint pressure_report_delay
= 45;
463 * The last time we posted a pressure report to host.
465 static unsigned long last_post_time
;
467 module_param(hot_add
, bool, (S_IRUGO
| S_IWUSR
));
468 MODULE_PARM_DESC(hot_add
, "If set attempt memory hot_add");
470 module_param(pressure_report_delay
, uint
, (S_IRUGO
| S_IWUSR
));
471 MODULE_PARM_DESC(pressure_report_delay
, "Delay in secs in reporting pressure");
472 static atomic_t trans_id
= ATOMIC_INIT(0);
474 static int dm_ring_size
= (5 * PAGE_SIZE
);
477 * Driver specific state.
490 static __u8 recv_buffer
[PAGE_SIZE
];
491 static __u8
*send_buffer
;
492 #define PAGES_IN_2M 512
493 #define HA_CHUNK (32 * 1024)
495 struct hv_dynmem_device
{
496 struct hv_device
*dev
;
497 enum hv_dm_state state
;
498 struct completion host_event
;
499 struct completion config_event
;
502 * Number of pages we have currently ballooned out.
504 unsigned int num_pages_ballooned
;
505 unsigned int num_pages_onlined
;
506 unsigned int num_pages_added
;
509 * State to manage the ballooning (up) operation.
511 struct balloon_state balloon_wrk
;
514 * State to execute the "hot-add" operation.
516 struct hot_add_wrk ha_wrk
;
519 * This state tracks if the host has specified a hot-add
522 bool host_specified_ha_region
;
525 * State to synchronize hot-add.
527 struct completion ol_waitevent
;
530 * This thread handles hot-add
531 * requests from the host as well as notifying
532 * the host with regards to memory pressure in
535 struct task_struct
*thread
;
537 struct mutex ha_region_mutex
;
540 * A list of hot-add regions.
542 struct list_head ha_region_list
;
545 * We start with the highest version we can support
546 * and downgrade based on the host; we save here the
547 * next version to try.
552 static struct hv_dynmem_device dm_device
;
554 static void post_status(struct hv_dynmem_device
*dm
);
556 #ifdef CONFIG_MEMORY_HOTPLUG
557 static int hv_memory_notifier(struct notifier_block
*nb
, unsigned long val
,
560 struct memory_notify
*mem
= (struct memory_notify
*)v
;
563 case MEM_GOING_ONLINE
:
564 mutex_lock(&dm_device
.ha_region_mutex
);
568 dm_device
.num_pages_onlined
+= mem
->nr_pages
;
569 case MEM_CANCEL_ONLINE
:
570 if (val
== MEM_ONLINE
||
571 mutex_is_locked(&dm_device
.ha_region_mutex
))
572 mutex_unlock(&dm_device
.ha_region_mutex
);
573 if (dm_device
.ha_waiting
) {
574 dm_device
.ha_waiting
= false;
575 complete(&dm_device
.ol_waitevent
);
580 mutex_lock(&dm_device
.ha_region_mutex
);
581 dm_device
.num_pages_onlined
-= mem
->nr_pages
;
582 mutex_unlock(&dm_device
.ha_region_mutex
);
584 case MEM_GOING_OFFLINE
:
585 case MEM_CANCEL_OFFLINE
:
591 static struct notifier_block hv_memory_nb
= {
592 .notifier_call
= hv_memory_notifier
,
597 static void hv_bring_pgs_online(unsigned long start_pfn
, unsigned long size
)
601 for (i
= 0; i
< size
; i
++) {
603 pg
= pfn_to_page(start_pfn
+ i
);
604 __online_page_set_limits(pg
);
605 __online_page_increment_counters(pg
);
606 __online_page_free(pg
);
610 static void hv_mem_hot_add(unsigned long start
, unsigned long size
,
611 unsigned long pfn_count
,
612 struct hv_hotadd_state
*has
)
616 unsigned long start_pfn
;
617 unsigned long processed_pfn
;
618 unsigned long total_pfn
= pfn_count
;
620 for (i
= 0; i
< (size
/HA_CHUNK
); i
++) {
621 start_pfn
= start
+ (i
* HA_CHUNK
);
622 has
->ha_end_pfn
+= HA_CHUNK
;
624 if (total_pfn
> HA_CHUNK
) {
625 processed_pfn
= HA_CHUNK
;
626 total_pfn
-= HA_CHUNK
;
628 processed_pfn
= total_pfn
;
632 has
->covered_end_pfn
+= processed_pfn
;
634 init_completion(&dm_device
.ol_waitevent
);
635 dm_device
.ha_waiting
= true;
637 mutex_unlock(&dm_device
.ha_region_mutex
);
638 nid
= memory_add_physaddr_to_nid(PFN_PHYS(start_pfn
));
639 ret
= add_memory(nid
, PFN_PHYS((start_pfn
)),
640 (HA_CHUNK
<< PAGE_SHIFT
));
643 pr_info("hot_add memory failed error is %d\n", ret
);
644 if (ret
== -EEXIST
) {
646 * This error indicates that the error
647 * is not a transient failure. This is the
648 * case where the guest's physical address map
649 * precludes hot adding memory. Stop all further
654 has
->ha_end_pfn
-= HA_CHUNK
;
655 has
->covered_end_pfn
-= processed_pfn
;
656 mutex_lock(&dm_device
.ha_region_mutex
);
661 * Wait for the memory block to be onlined.
662 * Since the hot add has succeeded, it is ok to
663 * proceed even if the pages in the hot added region
664 * have not been "onlined" within the allowed time.
666 wait_for_completion_timeout(&dm_device
.ol_waitevent
, 5*HZ
);
667 mutex_lock(&dm_device
.ha_region_mutex
);
668 post_status(&dm_device
);
674 static void hv_online_page(struct page
*pg
)
676 struct list_head
*cur
;
677 struct hv_hotadd_state
*has
;
678 unsigned long cur_start_pgp
;
679 unsigned long cur_end_pgp
;
681 list_for_each(cur
, &dm_device
.ha_region_list
) {
682 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
683 cur_start_pgp
= (unsigned long)pfn_to_page(has
->start_pfn
);
684 cur_end_pgp
= (unsigned long)pfn_to_page(has
->covered_end_pfn
);
686 if (((unsigned long)pg
>= cur_start_pgp
) &&
687 ((unsigned long)pg
< cur_end_pgp
)) {
689 * This frame is currently backed; online the
692 __online_page_set_limits(pg
);
693 __online_page_increment_counters(pg
);
694 __online_page_free(pg
);
699 static bool pfn_covered(unsigned long start_pfn
, unsigned long pfn_cnt
)
701 struct list_head
*cur
;
702 struct hv_hotadd_state
*has
;
703 unsigned long residual
, new_inc
;
705 if (list_empty(&dm_device
.ha_region_list
))
708 list_for_each(cur
, &dm_device
.ha_region_list
) {
709 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
712 * If the pfn range we are dealing with is not in the current
713 * "hot add block", move on.
715 if ((start_pfn
>= has
->end_pfn
))
718 * If the current hot add-request extends beyond
719 * our current limit; extend it.
721 if ((start_pfn
+ pfn_cnt
) > has
->end_pfn
) {
722 residual
= (start_pfn
+ pfn_cnt
- has
->end_pfn
);
724 * Extend the region by multiples of HA_CHUNK.
726 new_inc
= (residual
/ HA_CHUNK
) * HA_CHUNK
;
727 if (residual
% HA_CHUNK
)
730 has
->end_pfn
+= new_inc
;
734 * If the current start pfn is not where the covered_end
738 if (has
->covered_end_pfn
!= start_pfn
)
739 has
->covered_end_pfn
= start_pfn
;
748 static unsigned long handle_pg_range(unsigned long pg_start
,
749 unsigned long pg_count
)
751 unsigned long start_pfn
= pg_start
;
752 unsigned long pfn_cnt
= pg_count
;
754 struct list_head
*cur
;
755 struct hv_hotadd_state
*has
;
756 unsigned long pgs_ol
= 0;
757 unsigned long old_covered_state
;
759 if (list_empty(&dm_device
.ha_region_list
))
762 list_for_each(cur
, &dm_device
.ha_region_list
) {
763 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
766 * If the pfn range we are dealing with is not in the current
767 * "hot add block", move on.
769 if ((start_pfn
>= has
->end_pfn
))
772 old_covered_state
= has
->covered_end_pfn
;
774 if (start_pfn
< has
->ha_end_pfn
) {
776 * This is the case where we are backing pages
777 * in an already hot added region. Bring
778 * these pages online first.
780 pgs_ol
= has
->ha_end_pfn
- start_pfn
;
781 if (pgs_ol
> pfn_cnt
)
785 * Check if the corresponding memory block is already
786 * online by checking its last previously backed page.
787 * In case it is we need to bring rest (which was not
788 * backed previously) online too.
790 if (start_pfn
> has
->start_pfn
&&
791 !PageReserved(pfn_to_page(start_pfn
- 1)))
792 hv_bring_pgs_online(start_pfn
, pgs_ol
);
794 has
->covered_end_pfn
+= pgs_ol
;
798 if ((has
->ha_end_pfn
< has
->end_pfn
) && (pfn_cnt
> 0)) {
800 * We have some residual hot add range
801 * that needs to be hot added; hot add
802 * it now. Hot add a multiple of
803 * of HA_CHUNK that fully covers the pages
806 size
= (has
->end_pfn
- has
->ha_end_pfn
);
807 if (pfn_cnt
<= size
) {
808 size
= ((pfn_cnt
/ HA_CHUNK
) * HA_CHUNK
);
809 if (pfn_cnt
% HA_CHUNK
)
814 hv_mem_hot_add(has
->ha_end_pfn
, size
, pfn_cnt
, has
);
817 * If we managed to online any pages that were given to us,
818 * we declare success.
820 return has
->covered_end_pfn
- old_covered_state
;
827 static unsigned long process_hot_add(unsigned long pg_start
,
828 unsigned long pfn_cnt
,
829 unsigned long rg_start
,
830 unsigned long rg_size
)
832 struct hv_hotadd_state
*ha_region
= NULL
;
837 if (!dm_device
.host_specified_ha_region
)
838 if (pfn_covered(pg_start
, pfn_cnt
))
842 * If the host has specified a hot-add range; deal with it first.
846 ha_region
= kzalloc(sizeof(struct hv_hotadd_state
), GFP_KERNEL
);
850 INIT_LIST_HEAD(&ha_region
->list
);
852 list_add_tail(&ha_region
->list
, &dm_device
.ha_region_list
);
853 ha_region
->start_pfn
= rg_start
;
854 ha_region
->ha_end_pfn
= rg_start
;
855 ha_region
->covered_end_pfn
= pg_start
;
856 ha_region
->end_pfn
= rg_start
+ rg_size
;
861 * Process the page range specified; bringing them
862 * online if possible.
864 return handle_pg_range(pg_start
, pfn_cnt
);
869 static void hot_add_req(struct work_struct
*dummy
)
871 struct dm_hot_add_response resp
;
872 #ifdef CONFIG_MEMORY_HOTPLUG
873 unsigned long pg_start
, pfn_cnt
;
874 unsigned long rg_start
, rg_sz
;
876 struct hv_dynmem_device
*dm
= &dm_device
;
878 memset(&resp
, 0, sizeof(struct dm_hot_add_response
));
879 resp
.hdr
.type
= DM_MEM_HOT_ADD_RESPONSE
;
880 resp
.hdr
.size
= sizeof(struct dm_hot_add_response
);
882 #ifdef CONFIG_MEMORY_HOTPLUG
883 mutex_lock(&dm_device
.ha_region_mutex
);
884 pg_start
= dm
->ha_wrk
.ha_page_range
.finfo
.start_page
;
885 pfn_cnt
= dm
->ha_wrk
.ha_page_range
.finfo
.page_cnt
;
887 rg_start
= dm
->ha_wrk
.ha_region_range
.finfo
.start_page
;
888 rg_sz
= dm
->ha_wrk
.ha_region_range
.finfo
.page_cnt
;
890 if ((rg_start
== 0) && (!dm
->host_specified_ha_region
)) {
891 unsigned long region_size
;
892 unsigned long region_start
;
895 * The host has not specified the hot-add region.
896 * Based on the hot-add page range being specified,
897 * compute a hot-add region that can cover the pages
898 * that need to be hot-added while ensuring the alignment
899 * and size requirements of Linux as it relates to hot-add.
901 region_start
= pg_start
;
902 region_size
= (pfn_cnt
/ HA_CHUNK
) * HA_CHUNK
;
903 if (pfn_cnt
% HA_CHUNK
)
904 region_size
+= HA_CHUNK
;
906 region_start
= (pg_start
/ HA_CHUNK
) * HA_CHUNK
;
908 rg_start
= region_start
;
913 resp
.page_count
= process_hot_add(pg_start
, pfn_cnt
,
916 dm
->num_pages_added
+= resp
.page_count
;
917 mutex_unlock(&dm_device
.ha_region_mutex
);
920 * The result field of the response structure has the
921 * following semantics:
923 * 1. If all or some pages hot-added: Guest should return success.
925 * 2. If no pages could be hot-added:
927 * If the guest returns success, then the host
928 * will not attempt any further hot-add operations. This
929 * signifies a permanent failure.
931 * If the guest returns failure, then this failure will be
932 * treated as a transient failure and the host may retry the
933 * hot-add operation after some delay.
935 if (resp
.page_count
> 0)
937 else if (!do_hot_add
)
942 if (!do_hot_add
|| (resp
.page_count
== 0))
943 pr_info("Memory hot add failed\n");
945 dm
->state
= DM_INITIALIZED
;
946 resp
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
947 vmbus_sendpacket(dm
->dev
->channel
, &resp
,
948 sizeof(struct dm_hot_add_response
),
950 VM_PKT_DATA_INBAND
, 0);
953 static void process_info(struct hv_dynmem_device
*dm
, struct dm_info_msg
*msg
)
955 struct dm_info_header
*info_hdr
;
957 info_hdr
= (struct dm_info_header
*)msg
->info
;
959 switch (info_hdr
->type
) {
960 case INFO_TYPE_MAX_PAGE_CNT
:
961 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
962 pr_info("Data Size is %d\n", info_hdr
->data_size
);
965 pr_info("Received Unknown type: %d\n", info_hdr
->type
);
969 static unsigned long compute_balloon_floor(void)
971 unsigned long min_pages
;
972 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
973 /* Simple continuous piecewiese linear function:
974 * max MiB -> min MiB gradient
984 if (totalram_pages
< MB2PAGES(128))
985 min_pages
= MB2PAGES(8) + (totalram_pages
>> 1);
986 else if (totalram_pages
< MB2PAGES(512))
987 min_pages
= MB2PAGES(40) + (totalram_pages
>> 2);
988 else if (totalram_pages
< MB2PAGES(2048))
989 min_pages
= MB2PAGES(104) + (totalram_pages
>> 3);
990 else if (totalram_pages
< MB2PAGES(8192))
991 min_pages
= MB2PAGES(232) + (totalram_pages
>> 4);
993 min_pages
= MB2PAGES(488) + (totalram_pages
>> 5);
999 * Post our status as it relates memory pressure to the
1000 * host. Host expects the guests to post this status
1001 * periodically at 1 second intervals.
1003 * The metrics specified in this protocol are very Windows
1004 * specific and so we cook up numbers here to convey our memory
1008 static void post_status(struct hv_dynmem_device
*dm
)
1010 struct dm_status status
;
1012 unsigned long now
= jiffies
;
1013 unsigned long last_post
= last_post_time
;
1015 if (pressure_report_delay
> 0) {
1016 --pressure_report_delay
;
1020 if (!time_after(now
, (last_post_time
+ HZ
)))
1024 memset(&status
, 0, sizeof(struct dm_status
));
1025 status
.hdr
.type
= DM_STATUS_REPORT
;
1026 status
.hdr
.size
= sizeof(struct dm_status
);
1027 status
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1030 * The host expects the guest to report free and committed memory.
1031 * Furthermore, the host expects the pressure information to include
1032 * the ballooned out pages. For a given amount of memory that we are
1033 * managing we need to compute a floor below which we should not
1034 * balloon. Compute this and add it to the pressure report.
1035 * We also need to report all offline pages (num_pages_added -
1036 * num_pages_onlined) as committed to the host, otherwise it can try
1037 * asking us to balloon them out.
1039 status
.num_avail
= val
.freeram
;
1040 status
.num_committed
= vm_memory_committed() +
1041 dm
->num_pages_ballooned
+
1042 (dm
->num_pages_added
> dm
->num_pages_onlined
?
1043 dm
->num_pages_added
- dm
->num_pages_onlined
: 0) +
1044 compute_balloon_floor();
1047 * If our transaction ID is no longer current, just don't
1048 * send the status. This can happen if we were interrupted
1049 * after we picked our transaction ID.
1051 if (status
.hdr
.trans_id
!= atomic_read(&trans_id
))
1055 * If the last post time that we sampled has changed,
1056 * we have raced, don't post the status.
1058 if (last_post
!= last_post_time
)
1061 last_post_time
= jiffies
;
1062 vmbus_sendpacket(dm
->dev
->channel
, &status
,
1063 sizeof(struct dm_status
),
1064 (unsigned long)NULL
,
1065 VM_PKT_DATA_INBAND
, 0);
1069 static void free_balloon_pages(struct hv_dynmem_device
*dm
,
1070 union dm_mem_page_range
*range_array
)
1072 int num_pages
= range_array
->finfo
.page_cnt
;
1073 __u64 start_frame
= range_array
->finfo
.start_page
;
1077 for (i
= 0; i
< num_pages
; i
++) {
1078 pg
= pfn_to_page(i
+ start_frame
);
1080 dm
->num_pages_ballooned
--;
1086 static unsigned int alloc_balloon_pages(struct hv_dynmem_device
*dm
,
1087 unsigned int num_pages
,
1088 struct dm_balloon_response
*bl_resp
,
1094 if (num_pages
< alloc_unit
)
1097 for (i
= 0; (i
* alloc_unit
) < num_pages
; i
++) {
1098 if (bl_resp
->hdr
.size
+ sizeof(union dm_mem_page_range
) >
1100 return i
* alloc_unit
;
1103 * We execute this code in a thread context. Furthermore,
1104 * we don't want the kernel to try too hard.
1106 pg
= alloc_pages(GFP_HIGHUSER
| __GFP_NORETRY
|
1107 __GFP_NOMEMALLOC
| __GFP_NOWARN
,
1108 get_order(alloc_unit
<< PAGE_SHIFT
));
1111 return i
* alloc_unit
;
1113 dm
->num_pages_ballooned
+= alloc_unit
;
1116 * If we allocatted 2M pages; split them so we
1117 * can free them in any order we get.
1120 if (alloc_unit
!= 1)
1121 split_page(pg
, get_order(alloc_unit
<< PAGE_SHIFT
));
1123 bl_resp
->range_count
++;
1124 bl_resp
->range_array
[i
].finfo
.start_page
=
1126 bl_resp
->range_array
[i
].finfo
.page_cnt
= alloc_unit
;
1127 bl_resp
->hdr
.size
+= sizeof(union dm_mem_page_range
);
1136 static void balloon_up(struct work_struct
*dummy
)
1138 unsigned int num_pages
= dm_device
.balloon_wrk
.num_pages
;
1139 unsigned int num_ballooned
= 0;
1140 struct dm_balloon_response
*bl_resp
;
1146 unsigned long floor
;
1148 /* The host balloons pages in 2M granularity. */
1149 WARN_ON_ONCE(num_pages
% PAGES_IN_2M
!= 0);
1152 * We will attempt 2M allocations. However, if we fail to
1153 * allocate 2M chunks, we will go back to 4k allocations.
1158 floor
= compute_balloon_floor();
1160 /* Refuse to balloon below the floor, keep the 2M granularity. */
1161 if (val
.freeram
< num_pages
|| val
.freeram
- num_pages
< floor
) {
1162 num_pages
= val
.freeram
> floor
? (val
.freeram
- floor
) : 0;
1163 num_pages
-= num_pages
% PAGES_IN_2M
;
1167 bl_resp
= (struct dm_balloon_response
*)send_buffer
;
1168 memset(send_buffer
, 0, PAGE_SIZE
);
1169 bl_resp
->hdr
.type
= DM_BALLOON_RESPONSE
;
1170 bl_resp
->hdr
.size
= sizeof(struct dm_balloon_response
);
1171 bl_resp
->more_pages
= 1;
1174 num_pages
-= num_ballooned
;
1175 num_ballooned
= alloc_balloon_pages(&dm_device
, num_pages
,
1176 bl_resp
, alloc_unit
);
1178 if (alloc_unit
!= 1 && num_ballooned
== 0) {
1183 if (num_ballooned
== 0 || num_ballooned
== num_pages
) {
1184 bl_resp
->more_pages
= 0;
1186 dm_device
.state
= DM_INITIALIZED
;
1190 * We are pushing a lot of data through the channel;
1191 * deal with transient failures caused because of the
1192 * lack of space in the ring buffer.
1196 bl_resp
->hdr
.trans_id
= atomic_inc_return(&trans_id
);
1197 ret
= vmbus_sendpacket(dm_device
.dev
->channel
,
1200 (unsigned long)NULL
,
1201 VM_PKT_DATA_INBAND
, 0);
1205 post_status(&dm_device
);
1206 } while (ret
== -EAGAIN
);
1210 * Free up the memory we allocatted.
1212 pr_info("Balloon response failed\n");
1214 for (i
= 0; i
< bl_resp
->range_count
; i
++)
1215 free_balloon_pages(&dm_device
,
1216 &bl_resp
->range_array
[i
]);
1224 static void balloon_down(struct hv_dynmem_device
*dm
,
1225 struct dm_unballoon_request
*req
)
1227 union dm_mem_page_range
*range_array
= req
->range_array
;
1228 int range_count
= req
->range_count
;
1229 struct dm_unballoon_response resp
;
1232 for (i
= 0; i
< range_count
; i
++) {
1233 free_balloon_pages(dm
, &range_array
[i
]);
1234 complete(&dm_device
.config_event
);
1237 if (req
->more_pages
== 1)
1240 memset(&resp
, 0, sizeof(struct dm_unballoon_response
));
1241 resp
.hdr
.type
= DM_UNBALLOON_RESPONSE
;
1242 resp
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1243 resp
.hdr
.size
= sizeof(struct dm_unballoon_response
);
1245 vmbus_sendpacket(dm_device
.dev
->channel
, &resp
,
1246 sizeof(struct dm_unballoon_response
),
1247 (unsigned long)NULL
,
1248 VM_PKT_DATA_INBAND
, 0);
1250 dm
->state
= DM_INITIALIZED
;
1253 static void balloon_onchannelcallback(void *context
);
1255 static int dm_thread_func(void *dm_dev
)
1257 struct hv_dynmem_device
*dm
= dm_dev
;
1259 while (!kthread_should_stop()) {
1260 wait_for_completion_interruptible_timeout(
1261 &dm_device
.config_event
, 1*HZ
);
1263 * The host expects us to post information on the memory
1264 * pressure every second.
1266 reinit_completion(&dm_device
.config_event
);
1274 static void version_resp(struct hv_dynmem_device
*dm
,
1275 struct dm_version_response
*vresp
)
1277 struct dm_version_request version_req
;
1280 if (vresp
->is_accepted
) {
1282 * We are done; wakeup the
1283 * context waiting for version
1286 complete(&dm
->host_event
);
1290 * If there are more versions to try, continue
1291 * with negotiations; if not
1292 * shutdown the service since we are not able
1293 * to negotiate a suitable version number
1296 if (dm
->next_version
== 0)
1299 dm
->next_version
= 0;
1300 memset(&version_req
, 0, sizeof(struct dm_version_request
));
1301 version_req
.hdr
.type
= DM_VERSION_REQUEST
;
1302 version_req
.hdr
.size
= sizeof(struct dm_version_request
);
1303 version_req
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1304 version_req
.version
.version
= DYNMEM_PROTOCOL_VERSION_WIN7
;
1305 version_req
.is_last_attempt
= 1;
1307 ret
= vmbus_sendpacket(dm
->dev
->channel
, &version_req
,
1308 sizeof(struct dm_version_request
),
1309 (unsigned long)NULL
,
1310 VM_PKT_DATA_INBAND
, 0);
1318 dm
->state
= DM_INIT_ERROR
;
1319 complete(&dm
->host_event
);
1322 static void cap_resp(struct hv_dynmem_device
*dm
,
1323 struct dm_capabilities_resp_msg
*cap_resp
)
1325 if (!cap_resp
->is_accepted
) {
1326 pr_info("Capabilities not accepted by host\n");
1327 dm
->state
= DM_INIT_ERROR
;
1329 complete(&dm
->host_event
);
1332 static void balloon_onchannelcallback(void *context
)
1334 struct hv_device
*dev
= context
;
1337 struct dm_message
*dm_msg
;
1338 struct dm_header
*dm_hdr
;
1339 struct hv_dynmem_device
*dm
= hv_get_drvdata(dev
);
1340 struct dm_balloon
*bal_msg
;
1341 struct dm_hot_add
*ha_msg
;
1342 union dm_mem_page_range
*ha_pg_range
;
1343 union dm_mem_page_range
*ha_region
;
1345 memset(recv_buffer
, 0, sizeof(recv_buffer
));
1346 vmbus_recvpacket(dev
->channel
, recv_buffer
,
1347 PAGE_SIZE
, &recvlen
, &requestid
);
1350 dm_msg
= (struct dm_message
*)recv_buffer
;
1351 dm_hdr
= &dm_msg
->hdr
;
1353 switch (dm_hdr
->type
) {
1354 case DM_VERSION_RESPONSE
:
1356 (struct dm_version_response
*)dm_msg
);
1359 case DM_CAPABILITIES_RESPONSE
:
1361 (struct dm_capabilities_resp_msg
*)dm_msg
);
1364 case DM_BALLOON_REQUEST
:
1365 if (dm
->state
== DM_BALLOON_UP
)
1366 pr_warn("Currently ballooning\n");
1367 bal_msg
= (struct dm_balloon
*)recv_buffer
;
1368 dm
->state
= DM_BALLOON_UP
;
1369 dm_device
.balloon_wrk
.num_pages
= bal_msg
->num_pages
;
1370 schedule_work(&dm_device
.balloon_wrk
.wrk
);
1373 case DM_UNBALLOON_REQUEST
:
1374 dm
->state
= DM_BALLOON_DOWN
;
1376 (struct dm_unballoon_request
*)recv_buffer
);
1379 case DM_MEM_HOT_ADD_REQUEST
:
1380 if (dm
->state
== DM_HOT_ADD
)
1381 pr_warn("Currently hot-adding\n");
1382 dm
->state
= DM_HOT_ADD
;
1383 ha_msg
= (struct dm_hot_add
*)recv_buffer
;
1384 if (ha_msg
->hdr
.size
== sizeof(struct dm_hot_add
)) {
1386 * This is a normal hot-add request specifying
1389 ha_pg_range
= &ha_msg
->range
;
1390 dm
->ha_wrk
.ha_page_range
= *ha_pg_range
;
1391 dm
->ha_wrk
.ha_region_range
.page_range
= 0;
1394 * Host is specifying that we first hot-add
1395 * a region and then partially populate this
1398 dm
->host_specified_ha_region
= true;
1399 ha_pg_range
= &ha_msg
->range
;
1400 ha_region
= &ha_pg_range
[1];
1401 dm
->ha_wrk
.ha_page_range
= *ha_pg_range
;
1402 dm
->ha_wrk
.ha_region_range
= *ha_region
;
1404 schedule_work(&dm_device
.ha_wrk
.wrk
);
1407 case DM_INFO_MESSAGE
:
1408 process_info(dm
, (struct dm_info_msg
*)dm_msg
);
1412 pr_err("Unhandled message: type: %d\n", dm_hdr
->type
);
1419 static int balloon_probe(struct hv_device
*dev
,
1420 const struct hv_vmbus_device_id
*dev_id
)
1424 struct dm_version_request version_req
;
1425 struct dm_capabilities cap_msg
;
1427 do_hot_add
= hot_add
;
1430 * First allocate a send buffer.
1433 send_buffer
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1437 ret
= vmbus_open(dev
->channel
, dm_ring_size
, dm_ring_size
, NULL
, 0,
1438 balloon_onchannelcallback
, dev
);
1443 dm_device
.dev
= dev
;
1444 dm_device
.state
= DM_INITIALIZING
;
1445 dm_device
.next_version
= DYNMEM_PROTOCOL_VERSION_WIN7
;
1446 init_completion(&dm_device
.host_event
);
1447 init_completion(&dm_device
.config_event
);
1448 INIT_LIST_HEAD(&dm_device
.ha_region_list
);
1449 mutex_init(&dm_device
.ha_region_mutex
);
1450 INIT_WORK(&dm_device
.balloon_wrk
.wrk
, balloon_up
);
1451 INIT_WORK(&dm_device
.ha_wrk
.wrk
, hot_add_req
);
1452 dm_device
.host_specified_ha_region
= false;
1455 kthread_run(dm_thread_func
, &dm_device
, "hv_balloon");
1456 if (IS_ERR(dm_device
.thread
)) {
1457 ret
= PTR_ERR(dm_device
.thread
);
1461 #ifdef CONFIG_MEMORY_HOTPLUG
1462 set_online_page_callback(&hv_online_page
);
1463 register_memory_notifier(&hv_memory_nb
);
1466 hv_set_drvdata(dev
, &dm_device
);
1468 * Initiate the hand shake with the host and negotiate
1469 * a version that the host can support. We start with the
1470 * highest version number and go down if the host cannot
1473 memset(&version_req
, 0, sizeof(struct dm_version_request
));
1474 version_req
.hdr
.type
= DM_VERSION_REQUEST
;
1475 version_req
.hdr
.size
= sizeof(struct dm_version_request
);
1476 version_req
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1477 version_req
.version
.version
= DYNMEM_PROTOCOL_VERSION_WIN8
;
1478 version_req
.is_last_attempt
= 0;
1480 ret
= vmbus_sendpacket(dev
->channel
, &version_req
,
1481 sizeof(struct dm_version_request
),
1482 (unsigned long)NULL
,
1483 VM_PKT_DATA_INBAND
, 0);
1487 t
= wait_for_completion_timeout(&dm_device
.host_event
, 5*HZ
);
1494 * If we could not negotiate a compatible version with the host
1495 * fail the probe function.
1497 if (dm_device
.state
== DM_INIT_ERROR
) {
1502 * Now submit our capabilities to the host.
1504 memset(&cap_msg
, 0, sizeof(struct dm_capabilities
));
1505 cap_msg
.hdr
.type
= DM_CAPABILITIES_REPORT
;
1506 cap_msg
.hdr
.size
= sizeof(struct dm_capabilities
);
1507 cap_msg
.hdr
.trans_id
= atomic_inc_return(&trans_id
);
1509 cap_msg
.caps
.cap_bits
.balloon
= 1;
1510 cap_msg
.caps
.cap_bits
.hot_add
= 1;
1513 * Specify our alignment requirements as it relates
1514 * memory hot-add. Specify 128MB alignment.
1516 cap_msg
.caps
.cap_bits
.hot_add_alignment
= 7;
1519 * Currently the host does not use these
1520 * values and we set them to what is done in the
1523 cap_msg
.min_page_cnt
= 0;
1524 cap_msg
.max_page_number
= -1;
1526 ret
= vmbus_sendpacket(dev
->channel
, &cap_msg
,
1527 sizeof(struct dm_capabilities
),
1528 (unsigned long)NULL
,
1529 VM_PKT_DATA_INBAND
, 0);
1533 t
= wait_for_completion_timeout(&dm_device
.host_event
, 5*HZ
);
1540 * If the host does not like our capabilities,
1541 * fail the probe function.
1543 if (dm_device
.state
== DM_INIT_ERROR
) {
1548 dm_device
.state
= DM_INITIALIZED
;
1553 #ifdef CONFIG_MEMORY_HOTPLUG
1554 restore_online_page_callback(&hv_online_page
);
1556 kthread_stop(dm_device
.thread
);
1559 vmbus_close(dev
->channel
);
1565 static int balloon_remove(struct hv_device
*dev
)
1567 struct hv_dynmem_device
*dm
= hv_get_drvdata(dev
);
1568 struct list_head
*cur
, *tmp
;
1569 struct hv_hotadd_state
*has
;
1571 if (dm
->num_pages_ballooned
!= 0)
1572 pr_warn("Ballooned pages: %d\n", dm
->num_pages_ballooned
);
1574 cancel_work_sync(&dm
->balloon_wrk
.wrk
);
1575 cancel_work_sync(&dm
->ha_wrk
.wrk
);
1577 vmbus_close(dev
->channel
);
1578 kthread_stop(dm
->thread
);
1580 #ifdef CONFIG_MEMORY_HOTPLUG
1581 restore_online_page_callback(&hv_online_page
);
1582 unregister_memory_notifier(&hv_memory_nb
);
1584 list_for_each_safe(cur
, tmp
, &dm
->ha_region_list
) {
1585 has
= list_entry(cur
, struct hv_hotadd_state
, list
);
1586 list_del(&has
->list
);
1593 static const struct hv_vmbus_device_id id_table
[] = {
1594 /* Dynamic Memory Class ID */
1595 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1600 MODULE_DEVICE_TABLE(vmbus
, id_table
);
1602 static struct hv_driver balloon_drv
= {
1603 .name
= "hv_balloon",
1604 .id_table
= id_table
,
1605 .probe
= balloon_probe
,
1606 .remove
= balloon_remove
,
1609 static int __init
init_balloon_drv(void)
1612 return vmbus_driver_register(&balloon_drv
);
1615 module_init(init_balloon_drv
);
1617 MODULE_DESCRIPTION("Hyper-V Balloon");
1618 MODULE_LICENSE("GPL");