Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[deliverable/linux.git] / drivers / hv / hv_balloon.c
1 /*
2 * Copyright (c) 2012, Microsoft Corporation.
3 *
4 * Author:
5 * K. Y. Srinivasan <kys@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34
35 #include <linux/hyperv.h>
36
37 /*
38 * We begin with definitions supporting the Dynamic Memory protocol
39 * with the host.
40 *
41 * Begin protocol definitions.
42 */
43
44
45
46 /*
47 * Protocol versions. The low word is the minor version, the high word the major
48 * version.
49 *
50 * History:
51 * Initial version 1.0
52 * Changed to 0.1 on 2009/03/25
53 * Changes to 0.2 on 2009/05/14
54 * Changes to 0.3 on 2009/12/03
55 * Changed to 1.0 on 2011/04/05
56 */
57
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61
62 enum {
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65
66 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
67 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
68
69 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
70 };
71
72
73
74 /*
75 * Message Types
76 */
77
78 enum dm_message_type {
79 /*
80 * Version 0.3
81 */
82 DM_ERROR = 0,
83 DM_VERSION_REQUEST = 1,
84 DM_VERSION_RESPONSE = 2,
85 DM_CAPABILITIES_REPORT = 3,
86 DM_CAPABILITIES_RESPONSE = 4,
87 DM_STATUS_REPORT = 5,
88 DM_BALLOON_REQUEST = 6,
89 DM_BALLOON_RESPONSE = 7,
90 DM_UNBALLOON_REQUEST = 8,
91 DM_UNBALLOON_RESPONSE = 9,
92 DM_MEM_HOT_ADD_REQUEST = 10,
93 DM_MEM_HOT_ADD_RESPONSE = 11,
94 DM_VERSION_03_MAX = 11,
95 /*
96 * Version 1.0.
97 */
98 DM_INFO_MESSAGE = 12,
99 DM_VERSION_1_MAX = 12
100 };
101
102
103 /*
104 * Structures defining the dynamic memory management
105 * protocol.
106 */
107
108 union dm_version {
109 struct {
110 __u16 minor_version;
111 __u16 major_version;
112 };
113 __u32 version;
114 } __packed;
115
116
117 union dm_caps {
118 struct {
119 __u64 balloon:1;
120 __u64 hot_add:1;
121 /*
122 * To support guests that may have alignment
123 * limitations on hot-add, the guest can specify
124 * its alignment requirements; a value of n
125 * represents an alignment of 2^n in mega bytes.
126 */
127 __u64 hot_add_alignment:4;
128 __u64 reservedz:58;
129 } cap_bits;
130 __u64 caps;
131 } __packed;
132
133 union dm_mem_page_range {
134 struct {
135 /*
136 * The PFN number of the first page in the range.
137 * 40 bits is the architectural limit of a PFN
138 * number for AMD64.
139 */
140 __u64 start_page:40;
141 /*
142 * The number of pages in the range.
143 */
144 __u64 page_cnt:24;
145 } finfo;
146 __u64 page_range;
147 } __packed;
148
149
150
151 /*
152 * The header for all dynamic memory messages:
153 *
154 * type: Type of the message.
155 * size: Size of the message in bytes; including the header.
156 * trans_id: The guest is responsible for manufacturing this ID.
157 */
158
159 struct dm_header {
160 __u16 type;
161 __u16 size;
162 __u32 trans_id;
163 } __packed;
164
165 /*
166 * A generic message format for dynamic memory.
167 * Specific message formats are defined later in the file.
168 */
169
170 struct dm_message {
171 struct dm_header hdr;
172 __u8 data[]; /* enclosed message */
173 } __packed;
174
175
176 /*
177 * Specific message types supporting the dynamic memory protocol.
178 */
179
180 /*
181 * Version negotiation message. Sent from the guest to the host.
182 * The guest is free to try different versions until the host
183 * accepts the version.
184 *
185 * dm_version: The protocol version requested.
186 * is_last_attempt: If TRUE, this is the last version guest will request.
187 * reservedz: Reserved field, set to zero.
188 */
189
190 struct dm_version_request {
191 struct dm_header hdr;
192 union dm_version version;
193 __u32 is_last_attempt:1;
194 __u32 reservedz:31;
195 } __packed;
196
197 /*
198 * Version response message; Host to Guest and indicates
199 * if the host has accepted the version sent by the guest.
200 *
201 * is_accepted: If TRUE, host has accepted the version and the guest
202 * should proceed to the next stage of the protocol. FALSE indicates that
203 * guest should re-try with a different version.
204 *
205 * reservedz: Reserved field, set to zero.
206 */
207
208 struct dm_version_response {
209 struct dm_header hdr;
210 __u64 is_accepted:1;
211 __u64 reservedz:63;
212 } __packed;
213
214 /*
215 * Message reporting capabilities. This is sent from the guest to the
216 * host.
217 */
218
219 struct dm_capabilities {
220 struct dm_header hdr;
221 union dm_caps caps;
222 __u64 min_page_cnt;
223 __u64 max_page_number;
224 } __packed;
225
226 /*
227 * Response to the capabilities message. This is sent from the host to the
228 * guest. This message notifies if the host has accepted the guest's
229 * capabilities. If the host has not accepted, the guest must shutdown
230 * the service.
231 *
232 * is_accepted: Indicates if the host has accepted guest's capabilities.
233 * reservedz: Must be 0.
234 */
235
236 struct dm_capabilities_resp_msg {
237 struct dm_header hdr;
238 __u64 is_accepted:1;
239 __u64 reservedz:63;
240 } __packed;
241
242 /*
243 * This message is used to report memory pressure from the guest.
244 * This message is not part of any transaction and there is no
245 * response to this message.
246 *
247 * num_avail: Available memory in pages.
248 * num_committed: Committed memory in pages.
249 * page_file_size: The accumulated size of all page files
250 * in the system in pages.
251 * zero_free: The nunber of zero and free pages.
252 * page_file_writes: The writes to the page file in pages.
253 * io_diff: An indicator of file cache efficiency or page file activity,
254 * calculated as File Cache Page Fault Count - Page Read Count.
255 * This value is in pages.
256 *
257 * Some of these metrics are Windows specific and fortunately
258 * the algorithm on the host side that computes the guest memory
259 * pressure only uses num_committed value.
260 */
261
262 struct dm_status {
263 struct dm_header hdr;
264 __u64 num_avail;
265 __u64 num_committed;
266 __u64 page_file_size;
267 __u64 zero_free;
268 __u32 page_file_writes;
269 __u32 io_diff;
270 } __packed;
271
272
273 /*
274 * Message to ask the guest to allocate memory - balloon up message.
275 * This message is sent from the host to the guest. The guest may not be
276 * able to allocate as much memory as requested.
277 *
278 * num_pages: number of pages to allocate.
279 */
280
281 struct dm_balloon {
282 struct dm_header hdr;
283 __u32 num_pages;
284 __u32 reservedz;
285 } __packed;
286
287
288 /*
289 * Balloon response message; this message is sent from the guest
290 * to the host in response to the balloon message.
291 *
292 * reservedz: Reserved; must be set to zero.
293 * more_pages: If FALSE, this is the last message of the transaction.
294 * if TRUE there will atleast one more message from the guest.
295 *
296 * range_count: The number of ranges in the range array.
297 *
298 * range_array: An array of page ranges returned to the host.
299 *
300 */
301
302 struct dm_balloon_response {
303 struct dm_header hdr;
304 __u32 reservedz;
305 __u32 more_pages:1;
306 __u32 range_count:31;
307 union dm_mem_page_range range_array[];
308 } __packed;
309
310 /*
311 * Un-balloon message; this message is sent from the host
312 * to the guest to give guest more memory.
313 *
314 * more_pages: If FALSE, this is the last message of the transaction.
315 * if TRUE there will atleast one more message from the guest.
316 *
317 * reservedz: Reserved; must be set to zero.
318 *
319 * range_count: The number of ranges in the range array.
320 *
321 * range_array: An array of page ranges returned to the host.
322 *
323 */
324
325 struct dm_unballoon_request {
326 struct dm_header hdr;
327 __u32 more_pages:1;
328 __u32 reservedz:31;
329 __u32 range_count;
330 union dm_mem_page_range range_array[];
331 } __packed;
332
333 /*
334 * Un-balloon response message; this message is sent from the guest
335 * to the host in response to an unballoon request.
336 *
337 */
338
339 struct dm_unballoon_response {
340 struct dm_header hdr;
341 } __packed;
342
343
344 /*
345 * Hot add request message. Message sent from the host to the guest.
346 *
347 * mem_range: Memory range to hot add.
348 *
349 * On Linux we currently don't support this since we cannot hot add
350 * arbitrary granularity of memory.
351 */
352
353 struct dm_hot_add {
354 struct dm_header hdr;
355 union dm_mem_page_range range;
356 } __packed;
357
358 /*
359 * Hot add response message.
360 * This message is sent by the guest to report the status of a hot add request.
361 * If page_count is less than the requested page count, then the host should
362 * assume all further hot add requests will fail, since this indicates that
363 * the guest has hit an upper physical memory barrier.
364 *
365 * Hot adds may also fail due to low resources; in this case, the guest must
366 * not complete this message until the hot add can succeed, and the host must
367 * not send a new hot add request until the response is sent.
368 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
369 * times it fails the request.
370 *
371 *
372 * page_count: number of pages that were successfully hot added.
373 *
374 * result: result of the operation 1: success, 0: failure.
375 *
376 */
377
378 struct dm_hot_add_response {
379 struct dm_header hdr;
380 __u32 page_count;
381 __u32 result;
382 } __packed;
383
384 /*
385 * Types of information sent from host to the guest.
386 */
387
388 enum dm_info_type {
389 INFO_TYPE_MAX_PAGE_CNT = 0,
390 MAX_INFO_TYPE
391 };
392
393
394 /*
395 * Header for the information message.
396 */
397
398 struct dm_info_header {
399 enum dm_info_type type;
400 __u32 data_size;
401 } __packed;
402
403 /*
404 * This message is sent from the host to the guest to pass
405 * some relevant information (win8 addition).
406 *
407 * reserved: no used.
408 * info_size: size of the information blob.
409 * info: information blob.
410 */
411
412 struct dm_info_msg {
413 struct dm_header hdr;
414 __u32 reserved;
415 __u32 info_size;
416 __u8 info[];
417 };
418
419 /*
420 * End protocol definitions.
421 */
422
423 /*
424 * State to manage hot adding memory into the guest.
425 * The range start_pfn : end_pfn specifies the range
426 * that the host has asked us to hot add. The range
427 * start_pfn : ha_end_pfn specifies the range that we have
428 * currently hot added. We hot add in multiples of 128M
429 * chunks; it is possible that we may not be able to bring
430 * online all the pages in the region. The range
431 * covered_start_pfn : covered_end_pfn defines the pages that can
432 * be brough online.
433 */
434
435 struct hv_hotadd_state {
436 struct list_head list;
437 unsigned long start_pfn;
438 unsigned long covered_start_pfn;
439 unsigned long covered_end_pfn;
440 unsigned long ha_end_pfn;
441 unsigned long end_pfn;
442 };
443
444 struct balloon_state {
445 __u32 num_pages;
446 struct work_struct wrk;
447 };
448
449 struct hot_add_wrk {
450 union dm_mem_page_range ha_page_range;
451 union dm_mem_page_range ha_region_range;
452 struct work_struct wrk;
453 };
454
455 static bool hot_add = true;
456 static bool do_hot_add;
457 /*
458 * Delay reporting memory pressure by
459 * the specified number of seconds.
460 */
461 static uint pressure_report_delay = 45;
462
463 /*
464 * The last time we posted a pressure report to host.
465 */
466 static unsigned long last_post_time;
467
468 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
469 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
470
471 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
472 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
473 static atomic_t trans_id = ATOMIC_INIT(0);
474
475 static int dm_ring_size = (5 * PAGE_SIZE);
476
477 /*
478 * Driver specific state.
479 */
480
481 enum hv_dm_state {
482 DM_INITIALIZING = 0,
483 DM_INITIALIZED,
484 DM_BALLOON_UP,
485 DM_BALLOON_DOWN,
486 DM_HOT_ADD,
487 DM_INIT_ERROR
488 };
489
490
491 static __u8 recv_buffer[PAGE_SIZE];
492 static __u8 *send_buffer;
493 #define PAGES_IN_2M 512
494 #define HA_CHUNK (32 * 1024)
495
496 struct hv_dynmem_device {
497 struct hv_device *dev;
498 enum hv_dm_state state;
499 struct completion host_event;
500 struct completion config_event;
501
502 /*
503 * Number of pages we have currently ballooned out.
504 */
505 unsigned int num_pages_ballooned;
506
507 /*
508 * State to manage the ballooning (up) operation.
509 */
510 struct balloon_state balloon_wrk;
511
512 /*
513 * State to execute the "hot-add" operation.
514 */
515 struct hot_add_wrk ha_wrk;
516
517 /*
518 * This state tracks if the host has specified a hot-add
519 * region.
520 */
521 bool host_specified_ha_region;
522
523 /*
524 * State to synchronize hot-add.
525 */
526 struct completion ol_waitevent;
527 bool ha_waiting;
528 /*
529 * This thread handles hot-add
530 * requests from the host as well as notifying
531 * the host with regards to memory pressure in
532 * the guest.
533 */
534 struct task_struct *thread;
535
536 struct mutex ha_region_mutex;
537 struct completion waiter_event;
538
539 /*
540 * A list of hot-add regions.
541 */
542 struct list_head ha_region_list;
543
544 /*
545 * We start with the highest version we can support
546 * and downgrade based on the host; we save here the
547 * next version to try.
548 */
549 __u32 next_version;
550 };
551
552 static struct hv_dynmem_device dm_device;
553
554 static void post_status(struct hv_dynmem_device *dm);
555
556 #ifdef CONFIG_MEMORY_HOTPLUG
557 static void acquire_region_mutex(bool trylock)
558 {
559 if (trylock) {
560 reinit_completion(&dm_device.waiter_event);
561 while (!mutex_trylock(&dm_device.ha_region_mutex))
562 wait_for_completion(&dm_device.waiter_event);
563 } else {
564 mutex_lock(&dm_device.ha_region_mutex);
565 }
566 }
567
568 static void release_region_mutex(bool trylock)
569 {
570 if (trylock) {
571 mutex_unlock(&dm_device.ha_region_mutex);
572 } else {
573 mutex_unlock(&dm_device.ha_region_mutex);
574 complete(&dm_device.waiter_event);
575 }
576 }
577
578 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
579 void *v)
580 {
581 switch (val) {
582 case MEM_GOING_ONLINE:
583 acquire_region_mutex(true);
584 break;
585
586 case MEM_ONLINE:
587 case MEM_CANCEL_ONLINE:
588 release_region_mutex(true);
589 if (dm_device.ha_waiting) {
590 dm_device.ha_waiting = false;
591 complete(&dm_device.ol_waitevent);
592 }
593 break;
594
595 case MEM_GOING_OFFLINE:
596 case MEM_OFFLINE:
597 case MEM_CANCEL_OFFLINE:
598 break;
599 }
600 return NOTIFY_OK;
601 }
602
603 static struct notifier_block hv_memory_nb = {
604 .notifier_call = hv_memory_notifier,
605 .priority = 0
606 };
607
608
609 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
610 {
611 int i;
612
613 for (i = 0; i < size; i++) {
614 struct page *pg;
615 pg = pfn_to_page(start_pfn + i);
616 __online_page_set_limits(pg);
617 __online_page_increment_counters(pg);
618 __online_page_free(pg);
619 }
620 }
621
622 static void hv_mem_hot_add(unsigned long start, unsigned long size,
623 unsigned long pfn_count,
624 struct hv_hotadd_state *has)
625 {
626 int ret = 0;
627 int i, nid;
628 unsigned long start_pfn;
629 unsigned long processed_pfn;
630 unsigned long total_pfn = pfn_count;
631
632 for (i = 0; i < (size/HA_CHUNK); i++) {
633 start_pfn = start + (i * HA_CHUNK);
634 has->ha_end_pfn += HA_CHUNK;
635
636 if (total_pfn > HA_CHUNK) {
637 processed_pfn = HA_CHUNK;
638 total_pfn -= HA_CHUNK;
639 } else {
640 processed_pfn = total_pfn;
641 total_pfn = 0;
642 }
643
644 has->covered_end_pfn += processed_pfn;
645
646 init_completion(&dm_device.ol_waitevent);
647 dm_device.ha_waiting = true;
648
649 release_region_mutex(false);
650 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
651 ret = add_memory(nid, PFN_PHYS((start_pfn)),
652 (HA_CHUNK << PAGE_SHIFT));
653
654 if (ret) {
655 pr_info("hot_add memory failed error is %d\n", ret);
656 if (ret == -EEXIST) {
657 /*
658 * This error indicates that the error
659 * is not a transient failure. This is the
660 * case where the guest's physical address map
661 * precludes hot adding memory. Stop all further
662 * memory hot-add.
663 */
664 do_hot_add = false;
665 }
666 has->ha_end_pfn -= HA_CHUNK;
667 has->covered_end_pfn -= processed_pfn;
668 break;
669 }
670
671 /*
672 * Wait for the memory block to be onlined.
673 * Since the hot add has succeeded, it is ok to
674 * proceed even if the pages in the hot added region
675 * have not been "onlined" within the allowed time.
676 */
677 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
678 acquire_region_mutex(false);
679 post_status(&dm_device);
680 }
681
682 return;
683 }
684
685 static void hv_online_page(struct page *pg)
686 {
687 struct list_head *cur;
688 struct hv_hotadd_state *has;
689 unsigned long cur_start_pgp;
690 unsigned long cur_end_pgp;
691
692 list_for_each(cur, &dm_device.ha_region_list) {
693 has = list_entry(cur, struct hv_hotadd_state, list);
694 cur_start_pgp = (unsigned long)
695 pfn_to_page(has->covered_start_pfn);
696 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
697
698 if (((unsigned long)pg >= cur_start_pgp) &&
699 ((unsigned long)pg < cur_end_pgp)) {
700 /*
701 * This frame is currently backed; online the
702 * page.
703 */
704 __online_page_set_limits(pg);
705 __online_page_increment_counters(pg);
706 __online_page_free(pg);
707 has->covered_start_pfn++;
708 }
709 }
710 }
711
712 static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
713 {
714 struct list_head *cur;
715 struct hv_hotadd_state *has;
716 unsigned long residual, new_inc;
717
718 if (list_empty(&dm_device.ha_region_list))
719 return false;
720
721 list_for_each(cur, &dm_device.ha_region_list) {
722 has = list_entry(cur, struct hv_hotadd_state, list);
723
724 /*
725 * If the pfn range we are dealing with is not in the current
726 * "hot add block", move on.
727 */
728 if ((start_pfn >= has->end_pfn))
729 continue;
730 /*
731 * If the current hot add-request extends beyond
732 * our current limit; extend it.
733 */
734 if ((start_pfn + pfn_cnt) > has->end_pfn) {
735 residual = (start_pfn + pfn_cnt - has->end_pfn);
736 /*
737 * Extend the region by multiples of HA_CHUNK.
738 */
739 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
740 if (residual % HA_CHUNK)
741 new_inc += HA_CHUNK;
742
743 has->end_pfn += new_inc;
744 }
745
746 /*
747 * If the current start pfn is not where the covered_end
748 * is, update it.
749 */
750
751 if (has->covered_end_pfn != start_pfn) {
752 has->covered_end_pfn = start_pfn;
753 has->covered_start_pfn = start_pfn;
754 }
755 return true;
756
757 }
758
759 return false;
760 }
761
762 static unsigned long handle_pg_range(unsigned long pg_start,
763 unsigned long pg_count)
764 {
765 unsigned long start_pfn = pg_start;
766 unsigned long pfn_cnt = pg_count;
767 unsigned long size;
768 struct list_head *cur;
769 struct hv_hotadd_state *has;
770 unsigned long pgs_ol = 0;
771 unsigned long old_covered_state;
772
773 if (list_empty(&dm_device.ha_region_list))
774 return 0;
775
776 list_for_each(cur, &dm_device.ha_region_list) {
777 has = list_entry(cur, struct hv_hotadd_state, list);
778
779 /*
780 * If the pfn range we are dealing with is not in the current
781 * "hot add block", move on.
782 */
783 if ((start_pfn >= has->end_pfn))
784 continue;
785
786 old_covered_state = has->covered_end_pfn;
787
788 if (start_pfn < has->ha_end_pfn) {
789 /*
790 * This is the case where we are backing pages
791 * in an already hot added region. Bring
792 * these pages online first.
793 */
794 pgs_ol = has->ha_end_pfn - start_pfn;
795 if (pgs_ol > pfn_cnt)
796 pgs_ol = pfn_cnt;
797 hv_bring_pgs_online(start_pfn, pgs_ol);
798 has->covered_end_pfn += pgs_ol;
799 has->covered_start_pfn += pgs_ol;
800 pfn_cnt -= pgs_ol;
801 }
802
803 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
804 /*
805 * We have some residual hot add range
806 * that needs to be hot added; hot add
807 * it now. Hot add a multiple of
808 * of HA_CHUNK that fully covers the pages
809 * we have.
810 */
811 size = (has->end_pfn - has->ha_end_pfn);
812 if (pfn_cnt <= size) {
813 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
814 if (pfn_cnt % HA_CHUNK)
815 size += HA_CHUNK;
816 } else {
817 pfn_cnt = size;
818 }
819 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
820 }
821 /*
822 * If we managed to online any pages that were given to us,
823 * we declare success.
824 */
825 return has->covered_end_pfn - old_covered_state;
826
827 }
828
829 return 0;
830 }
831
832 static unsigned long process_hot_add(unsigned long pg_start,
833 unsigned long pfn_cnt,
834 unsigned long rg_start,
835 unsigned long rg_size)
836 {
837 struct hv_hotadd_state *ha_region = NULL;
838
839 if (pfn_cnt == 0)
840 return 0;
841
842 if (!dm_device.host_specified_ha_region)
843 if (pfn_covered(pg_start, pfn_cnt))
844 goto do_pg_range;
845
846 /*
847 * If the host has specified a hot-add range; deal with it first.
848 */
849
850 if (rg_size != 0) {
851 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
852 if (!ha_region)
853 return 0;
854
855 INIT_LIST_HEAD(&ha_region->list);
856
857 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
858 ha_region->start_pfn = rg_start;
859 ha_region->ha_end_pfn = rg_start;
860 ha_region->covered_start_pfn = pg_start;
861 ha_region->covered_end_pfn = pg_start;
862 ha_region->end_pfn = rg_start + rg_size;
863 }
864
865 do_pg_range:
866 /*
867 * Process the page range specified; bringing them
868 * online if possible.
869 */
870 return handle_pg_range(pg_start, pfn_cnt);
871 }
872
873 #endif
874
875 static void hot_add_req(struct work_struct *dummy)
876 {
877 struct dm_hot_add_response resp;
878 #ifdef CONFIG_MEMORY_HOTPLUG
879 unsigned long pg_start, pfn_cnt;
880 unsigned long rg_start, rg_sz;
881 #endif
882 struct hv_dynmem_device *dm = &dm_device;
883
884 memset(&resp, 0, sizeof(struct dm_hot_add_response));
885 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
886 resp.hdr.size = sizeof(struct dm_hot_add_response);
887
888 #ifdef CONFIG_MEMORY_HOTPLUG
889 acquire_region_mutex(false);
890 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
891 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
892
893 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
894 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
895
896 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
897 unsigned long region_size;
898 unsigned long region_start;
899
900 /*
901 * The host has not specified the hot-add region.
902 * Based on the hot-add page range being specified,
903 * compute a hot-add region that can cover the pages
904 * that need to be hot-added while ensuring the alignment
905 * and size requirements of Linux as it relates to hot-add.
906 */
907 region_start = pg_start;
908 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
909 if (pfn_cnt % HA_CHUNK)
910 region_size += HA_CHUNK;
911
912 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
913
914 rg_start = region_start;
915 rg_sz = region_size;
916 }
917
918 if (do_hot_add)
919 resp.page_count = process_hot_add(pg_start, pfn_cnt,
920 rg_start, rg_sz);
921 release_region_mutex(false);
922 #endif
923 /*
924 * The result field of the response structure has the
925 * following semantics:
926 *
927 * 1. If all or some pages hot-added: Guest should return success.
928 *
929 * 2. If no pages could be hot-added:
930 *
931 * If the guest returns success, then the host
932 * will not attempt any further hot-add operations. This
933 * signifies a permanent failure.
934 *
935 * If the guest returns failure, then this failure will be
936 * treated as a transient failure and the host may retry the
937 * hot-add operation after some delay.
938 */
939 if (resp.page_count > 0)
940 resp.result = 1;
941 else if (!do_hot_add)
942 resp.result = 1;
943 else
944 resp.result = 0;
945
946 if (!do_hot_add || (resp.page_count == 0))
947 pr_info("Memory hot add failed\n");
948
949 dm->state = DM_INITIALIZED;
950 resp.hdr.trans_id = atomic_inc_return(&trans_id);
951 vmbus_sendpacket(dm->dev->channel, &resp,
952 sizeof(struct dm_hot_add_response),
953 (unsigned long)NULL,
954 VM_PKT_DATA_INBAND, 0);
955 }
956
957 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
958 {
959 struct dm_info_header *info_hdr;
960
961 info_hdr = (struct dm_info_header *)msg->info;
962
963 switch (info_hdr->type) {
964 case INFO_TYPE_MAX_PAGE_CNT:
965 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
966 pr_info("Data Size is %d\n", info_hdr->data_size);
967 break;
968 default:
969 pr_info("Received Unknown type: %d\n", info_hdr->type);
970 }
971 }
972
973 static unsigned long compute_balloon_floor(void)
974 {
975 unsigned long min_pages;
976 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
977 /* Simple continuous piecewiese linear function:
978 * max MiB -> min MiB gradient
979 * 0 0
980 * 16 16
981 * 32 24
982 * 128 72 (1/2)
983 * 512 168 (1/4)
984 * 2048 360 (1/8)
985 * 8192 768 (1/16)
986 * 32768 1536 (1/32)
987 */
988 if (totalram_pages < MB2PAGES(128))
989 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
990 else if (totalram_pages < MB2PAGES(512))
991 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
992 else if (totalram_pages < MB2PAGES(2048))
993 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
994 else if (totalram_pages < MB2PAGES(8192))
995 min_pages = MB2PAGES(256) + (totalram_pages >> 4);
996 else
997 min_pages = MB2PAGES(512) + (totalram_pages >> 5);
998 #undef MB2PAGES
999 return min_pages;
1000 }
1001
1002 /*
1003 * Post our status as it relates memory pressure to the
1004 * host. Host expects the guests to post this status
1005 * periodically at 1 second intervals.
1006 *
1007 * The metrics specified in this protocol are very Windows
1008 * specific and so we cook up numbers here to convey our memory
1009 * pressure.
1010 */
1011
1012 static void post_status(struct hv_dynmem_device *dm)
1013 {
1014 struct dm_status status;
1015 struct sysinfo val;
1016 unsigned long now = jiffies;
1017 unsigned long last_post = last_post_time;
1018
1019 if (pressure_report_delay > 0) {
1020 --pressure_report_delay;
1021 return;
1022 }
1023
1024 if (!time_after(now, (last_post_time + HZ)))
1025 return;
1026
1027 si_meminfo(&val);
1028 memset(&status, 0, sizeof(struct dm_status));
1029 status.hdr.type = DM_STATUS_REPORT;
1030 status.hdr.size = sizeof(struct dm_status);
1031 status.hdr.trans_id = atomic_inc_return(&trans_id);
1032
1033 /*
1034 * The host expects the guest to report free memory.
1035 * Further, the host expects the pressure information to
1036 * include the ballooned out pages.
1037 * For a given amount of memory that we are managing, we
1038 * need to compute a floor below which we should not balloon.
1039 * Compute this and add it to the pressure report.
1040 */
1041 status.num_avail = val.freeram;
1042 status.num_committed = vm_memory_committed() +
1043 dm->num_pages_ballooned +
1044 compute_balloon_floor();
1045
1046 /*
1047 * If our transaction ID is no longer current, just don't
1048 * send the status. This can happen if we were interrupted
1049 * after we picked our transaction ID.
1050 */
1051 if (status.hdr.trans_id != atomic_read(&trans_id))
1052 return;
1053
1054 /*
1055 * If the last post time that we sampled has changed,
1056 * we have raced, don't post the status.
1057 */
1058 if (last_post != last_post_time)
1059 return;
1060
1061 last_post_time = jiffies;
1062 vmbus_sendpacket(dm->dev->channel, &status,
1063 sizeof(struct dm_status),
1064 (unsigned long)NULL,
1065 VM_PKT_DATA_INBAND, 0);
1066
1067 }
1068
1069 static void free_balloon_pages(struct hv_dynmem_device *dm,
1070 union dm_mem_page_range *range_array)
1071 {
1072 int num_pages = range_array->finfo.page_cnt;
1073 __u64 start_frame = range_array->finfo.start_page;
1074 struct page *pg;
1075 int i;
1076
1077 for (i = 0; i < num_pages; i++) {
1078 pg = pfn_to_page(i + start_frame);
1079 __free_page(pg);
1080 dm->num_pages_ballooned--;
1081 }
1082 }
1083
1084
1085
1086 static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
1087 struct dm_balloon_response *bl_resp, int alloc_unit,
1088 bool *alloc_error)
1089 {
1090 int i = 0;
1091 struct page *pg;
1092
1093 if (num_pages < alloc_unit)
1094 return 0;
1095
1096 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1097 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1098 PAGE_SIZE)
1099 return i * alloc_unit;
1100
1101 /*
1102 * We execute this code in a thread context. Furthermore,
1103 * we don't want the kernel to try too hard.
1104 */
1105 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1106 __GFP_NOMEMALLOC | __GFP_NOWARN,
1107 get_order(alloc_unit << PAGE_SHIFT));
1108
1109 if (!pg) {
1110 *alloc_error = true;
1111 return i * alloc_unit;
1112 }
1113
1114
1115 dm->num_pages_ballooned += alloc_unit;
1116
1117 /*
1118 * If we allocatted 2M pages; split them so we
1119 * can free them in any order we get.
1120 */
1121
1122 if (alloc_unit != 1)
1123 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1124
1125 bl_resp->range_count++;
1126 bl_resp->range_array[i].finfo.start_page =
1127 page_to_pfn(pg);
1128 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1129 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1130
1131 }
1132
1133 return num_pages;
1134 }
1135
1136
1137
1138 static void balloon_up(struct work_struct *dummy)
1139 {
1140 int num_pages = dm_device.balloon_wrk.num_pages;
1141 int num_ballooned = 0;
1142 struct dm_balloon_response *bl_resp;
1143 int alloc_unit;
1144 int ret;
1145 bool alloc_error;
1146 bool done = false;
1147 int i;
1148
1149 /* The host balloons pages in 2M granularity. */
1150 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1151
1152 /*
1153 * We will attempt 2M allocations. However, if we fail to
1154 * allocate 2M chunks, we will go back to 4k allocations.
1155 */
1156 alloc_unit = 512;
1157
1158 while (!done) {
1159 bl_resp = (struct dm_balloon_response *)send_buffer;
1160 memset(send_buffer, 0, PAGE_SIZE);
1161 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1162 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1163 bl_resp->more_pages = 1;
1164
1165
1166 num_pages -= num_ballooned;
1167 alloc_error = false;
1168 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1169 bl_resp, alloc_unit,
1170 &alloc_error);
1171
1172 if (alloc_unit != 1 && num_ballooned == 0) {
1173 alloc_unit = 1;
1174 continue;
1175 }
1176
1177 if ((alloc_unit == 1 && alloc_error) ||
1178 (num_ballooned == num_pages)) {
1179 bl_resp->more_pages = 0;
1180 done = true;
1181 dm_device.state = DM_INITIALIZED;
1182 }
1183
1184 /*
1185 * We are pushing a lot of data through the channel;
1186 * deal with transient failures caused because of the
1187 * lack of space in the ring buffer.
1188 */
1189
1190 do {
1191 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1192 ret = vmbus_sendpacket(dm_device.dev->channel,
1193 bl_resp,
1194 bl_resp->hdr.size,
1195 (unsigned long)NULL,
1196 VM_PKT_DATA_INBAND, 0);
1197
1198 if (ret == -EAGAIN)
1199 msleep(20);
1200 post_status(&dm_device);
1201 } while (ret == -EAGAIN);
1202
1203 if (ret) {
1204 /*
1205 * Free up the memory we allocatted.
1206 */
1207 pr_info("Balloon response failed\n");
1208
1209 for (i = 0; i < bl_resp->range_count; i++)
1210 free_balloon_pages(&dm_device,
1211 &bl_resp->range_array[i]);
1212
1213 done = true;
1214 }
1215 }
1216
1217 }
1218
1219 static void balloon_down(struct hv_dynmem_device *dm,
1220 struct dm_unballoon_request *req)
1221 {
1222 union dm_mem_page_range *range_array = req->range_array;
1223 int range_count = req->range_count;
1224 struct dm_unballoon_response resp;
1225 int i;
1226
1227 for (i = 0; i < range_count; i++) {
1228 free_balloon_pages(dm, &range_array[i]);
1229 complete(&dm_device.config_event);
1230 }
1231
1232 if (req->more_pages == 1)
1233 return;
1234
1235 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1236 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1237 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1238 resp.hdr.size = sizeof(struct dm_unballoon_response);
1239
1240 vmbus_sendpacket(dm_device.dev->channel, &resp,
1241 sizeof(struct dm_unballoon_response),
1242 (unsigned long)NULL,
1243 VM_PKT_DATA_INBAND, 0);
1244
1245 dm->state = DM_INITIALIZED;
1246 }
1247
1248 static void balloon_onchannelcallback(void *context);
1249
1250 static int dm_thread_func(void *dm_dev)
1251 {
1252 struct hv_dynmem_device *dm = dm_dev;
1253
1254 while (!kthread_should_stop()) {
1255 wait_for_completion_interruptible_timeout(
1256 &dm_device.config_event, 1*HZ);
1257 /*
1258 * The host expects us to post information on the memory
1259 * pressure every second.
1260 */
1261 reinit_completion(&dm_device.config_event);
1262 post_status(dm);
1263 }
1264
1265 return 0;
1266 }
1267
1268
1269 static void version_resp(struct hv_dynmem_device *dm,
1270 struct dm_version_response *vresp)
1271 {
1272 struct dm_version_request version_req;
1273 int ret;
1274
1275 if (vresp->is_accepted) {
1276 /*
1277 * We are done; wakeup the
1278 * context waiting for version
1279 * negotiation.
1280 */
1281 complete(&dm->host_event);
1282 return;
1283 }
1284 /*
1285 * If there are more versions to try, continue
1286 * with negotiations; if not
1287 * shutdown the service since we are not able
1288 * to negotiate a suitable version number
1289 * with the host.
1290 */
1291 if (dm->next_version == 0)
1292 goto version_error;
1293
1294 dm->next_version = 0;
1295 memset(&version_req, 0, sizeof(struct dm_version_request));
1296 version_req.hdr.type = DM_VERSION_REQUEST;
1297 version_req.hdr.size = sizeof(struct dm_version_request);
1298 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1299 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
1300 version_req.is_last_attempt = 1;
1301
1302 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1303 sizeof(struct dm_version_request),
1304 (unsigned long)NULL,
1305 VM_PKT_DATA_INBAND, 0);
1306
1307 if (ret)
1308 goto version_error;
1309
1310 return;
1311
1312 version_error:
1313 dm->state = DM_INIT_ERROR;
1314 complete(&dm->host_event);
1315 }
1316
1317 static void cap_resp(struct hv_dynmem_device *dm,
1318 struct dm_capabilities_resp_msg *cap_resp)
1319 {
1320 if (!cap_resp->is_accepted) {
1321 pr_info("Capabilities not accepted by host\n");
1322 dm->state = DM_INIT_ERROR;
1323 }
1324 complete(&dm->host_event);
1325 }
1326
1327 static void balloon_onchannelcallback(void *context)
1328 {
1329 struct hv_device *dev = context;
1330 u32 recvlen;
1331 u64 requestid;
1332 struct dm_message *dm_msg;
1333 struct dm_header *dm_hdr;
1334 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1335 struct dm_balloon *bal_msg;
1336 struct dm_hot_add *ha_msg;
1337 union dm_mem_page_range *ha_pg_range;
1338 union dm_mem_page_range *ha_region;
1339
1340 memset(recv_buffer, 0, sizeof(recv_buffer));
1341 vmbus_recvpacket(dev->channel, recv_buffer,
1342 PAGE_SIZE, &recvlen, &requestid);
1343
1344 if (recvlen > 0) {
1345 dm_msg = (struct dm_message *)recv_buffer;
1346 dm_hdr = &dm_msg->hdr;
1347
1348 switch (dm_hdr->type) {
1349 case DM_VERSION_RESPONSE:
1350 version_resp(dm,
1351 (struct dm_version_response *)dm_msg);
1352 break;
1353
1354 case DM_CAPABILITIES_RESPONSE:
1355 cap_resp(dm,
1356 (struct dm_capabilities_resp_msg *)dm_msg);
1357 break;
1358
1359 case DM_BALLOON_REQUEST:
1360 if (dm->state == DM_BALLOON_UP)
1361 pr_warn("Currently ballooning\n");
1362 bal_msg = (struct dm_balloon *)recv_buffer;
1363 dm->state = DM_BALLOON_UP;
1364 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1365 schedule_work(&dm_device.balloon_wrk.wrk);
1366 break;
1367
1368 case DM_UNBALLOON_REQUEST:
1369 dm->state = DM_BALLOON_DOWN;
1370 balloon_down(dm,
1371 (struct dm_unballoon_request *)recv_buffer);
1372 break;
1373
1374 case DM_MEM_HOT_ADD_REQUEST:
1375 if (dm->state == DM_HOT_ADD)
1376 pr_warn("Currently hot-adding\n");
1377 dm->state = DM_HOT_ADD;
1378 ha_msg = (struct dm_hot_add *)recv_buffer;
1379 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1380 /*
1381 * This is a normal hot-add request specifying
1382 * hot-add memory.
1383 */
1384 ha_pg_range = &ha_msg->range;
1385 dm->ha_wrk.ha_page_range = *ha_pg_range;
1386 dm->ha_wrk.ha_region_range.page_range = 0;
1387 } else {
1388 /*
1389 * Host is specifying that we first hot-add
1390 * a region and then partially populate this
1391 * region.
1392 */
1393 dm->host_specified_ha_region = true;
1394 ha_pg_range = &ha_msg->range;
1395 ha_region = &ha_pg_range[1];
1396 dm->ha_wrk.ha_page_range = *ha_pg_range;
1397 dm->ha_wrk.ha_region_range = *ha_region;
1398 }
1399 schedule_work(&dm_device.ha_wrk.wrk);
1400 break;
1401
1402 case DM_INFO_MESSAGE:
1403 process_info(dm, (struct dm_info_msg *)dm_msg);
1404 break;
1405
1406 default:
1407 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1408
1409 }
1410 }
1411
1412 }
1413
1414 static int balloon_probe(struct hv_device *dev,
1415 const struct hv_vmbus_device_id *dev_id)
1416 {
1417 int ret, t;
1418 struct dm_version_request version_req;
1419 struct dm_capabilities cap_msg;
1420
1421 do_hot_add = hot_add;
1422
1423 /*
1424 * First allocate a send buffer.
1425 */
1426
1427 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1428 if (!send_buffer)
1429 return -ENOMEM;
1430
1431 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1432 balloon_onchannelcallback, dev);
1433
1434 if (ret)
1435 goto probe_error0;
1436
1437 dm_device.dev = dev;
1438 dm_device.state = DM_INITIALIZING;
1439 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1440 init_completion(&dm_device.host_event);
1441 init_completion(&dm_device.config_event);
1442 init_completion(&dm_device.waiter_event);
1443 INIT_LIST_HEAD(&dm_device.ha_region_list);
1444 mutex_init(&dm_device.ha_region_mutex);
1445 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1446 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1447 dm_device.host_specified_ha_region = false;
1448
1449 dm_device.thread =
1450 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1451 if (IS_ERR(dm_device.thread)) {
1452 ret = PTR_ERR(dm_device.thread);
1453 goto probe_error1;
1454 }
1455
1456 #ifdef CONFIG_MEMORY_HOTPLUG
1457 set_online_page_callback(&hv_online_page);
1458 register_memory_notifier(&hv_memory_nb);
1459 #endif
1460
1461 hv_set_drvdata(dev, &dm_device);
1462 /*
1463 * Initiate the hand shake with the host and negotiate
1464 * a version that the host can support. We start with the
1465 * highest version number and go down if the host cannot
1466 * support it.
1467 */
1468 memset(&version_req, 0, sizeof(struct dm_version_request));
1469 version_req.hdr.type = DM_VERSION_REQUEST;
1470 version_req.hdr.size = sizeof(struct dm_version_request);
1471 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1472 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
1473 version_req.is_last_attempt = 0;
1474
1475 ret = vmbus_sendpacket(dev->channel, &version_req,
1476 sizeof(struct dm_version_request),
1477 (unsigned long)NULL,
1478 VM_PKT_DATA_INBAND, 0);
1479 if (ret)
1480 goto probe_error2;
1481
1482 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1483 if (t == 0) {
1484 ret = -ETIMEDOUT;
1485 goto probe_error2;
1486 }
1487
1488 /*
1489 * If we could not negotiate a compatible version with the host
1490 * fail the probe function.
1491 */
1492 if (dm_device.state == DM_INIT_ERROR) {
1493 ret = -ETIMEDOUT;
1494 goto probe_error2;
1495 }
1496 /*
1497 * Now submit our capabilities to the host.
1498 */
1499 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1500 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1501 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1502 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1503
1504 cap_msg.caps.cap_bits.balloon = 1;
1505 cap_msg.caps.cap_bits.hot_add = 1;
1506
1507 /*
1508 * Specify our alignment requirements as it relates
1509 * memory hot-add. Specify 128MB alignment.
1510 */
1511 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1512
1513 /*
1514 * Currently the host does not use these
1515 * values and we set them to what is done in the
1516 * Windows driver.
1517 */
1518 cap_msg.min_page_cnt = 0;
1519 cap_msg.max_page_number = -1;
1520
1521 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1522 sizeof(struct dm_capabilities),
1523 (unsigned long)NULL,
1524 VM_PKT_DATA_INBAND, 0);
1525 if (ret)
1526 goto probe_error2;
1527
1528 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1529 if (t == 0) {
1530 ret = -ETIMEDOUT;
1531 goto probe_error2;
1532 }
1533
1534 /*
1535 * If the host does not like our capabilities,
1536 * fail the probe function.
1537 */
1538 if (dm_device.state == DM_INIT_ERROR) {
1539 ret = -ETIMEDOUT;
1540 goto probe_error2;
1541 }
1542
1543 dm_device.state = DM_INITIALIZED;
1544
1545 return 0;
1546
1547 probe_error2:
1548 #ifdef CONFIG_MEMORY_HOTPLUG
1549 restore_online_page_callback(&hv_online_page);
1550 #endif
1551 kthread_stop(dm_device.thread);
1552
1553 probe_error1:
1554 vmbus_close(dev->channel);
1555 probe_error0:
1556 kfree(send_buffer);
1557 return ret;
1558 }
1559
1560 static int balloon_remove(struct hv_device *dev)
1561 {
1562 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1563 struct list_head *cur, *tmp;
1564 struct hv_hotadd_state *has;
1565
1566 if (dm->num_pages_ballooned != 0)
1567 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1568
1569 cancel_work_sync(&dm->balloon_wrk.wrk);
1570 cancel_work_sync(&dm->ha_wrk.wrk);
1571
1572 vmbus_close(dev->channel);
1573 kthread_stop(dm->thread);
1574 kfree(send_buffer);
1575 #ifdef CONFIG_MEMORY_HOTPLUG
1576 restore_online_page_callback(&hv_online_page);
1577 unregister_memory_notifier(&hv_memory_nb);
1578 #endif
1579 list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1580 has = list_entry(cur, struct hv_hotadd_state, list);
1581 list_del(&has->list);
1582 kfree(has);
1583 }
1584
1585 return 0;
1586 }
1587
1588 static const struct hv_vmbus_device_id id_table[] = {
1589 /* Dynamic Memory Class ID */
1590 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1591 { HV_DM_GUID, },
1592 { },
1593 };
1594
1595 MODULE_DEVICE_TABLE(vmbus, id_table);
1596
1597 static struct hv_driver balloon_drv = {
1598 .name = "hv_balloon",
1599 .id_table = id_table,
1600 .probe = balloon_probe,
1601 .remove = balloon_remove,
1602 };
1603
1604 static int __init init_balloon_drv(void)
1605 {
1606
1607 return vmbus_driver_register(&balloon_drv);
1608 }
1609
1610 module_init(init_balloon_drv);
1611
1612 MODULE_DESCRIPTION("Hyper-V Balloon");
1613 MODULE_LICENSE("GPL");
This page took 0.062076 seconds and 5 git commands to generate.