]> Git Repo - linux.git/blob - drivers/hv/hv_balloon.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / hv / hv_balloon.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012, Microsoft Corporation.
4  *
5  * Author:
6  *   K. Y. Srinivasan <[email protected]>
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/cleanup.h>
12 #include <linux/kernel.h>
13 #include <linux/jiffies.h>
14 #include <linux/mman.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/kthread.h>
21 #include <linux/completion.h>
22 #include <linux/count_zeros.h>
23 #include <linux/memory_hotplug.h>
24 #include <linux/memory.h>
25 #include <linux/notifier.h>
26 #include <linux/percpu_counter.h>
27 #include <linux/page_reporting.h>
28 #include <linux/sizes.h>
29
30 #include <linux/hyperv.h>
31 #include <asm/hyperv-tlfs.h>
32
33 #include <asm/mshyperv.h>
34
35 #define CREATE_TRACE_POINTS
36 #include "hv_trace_balloon.h"
37
38 /*
39  * We begin with definitions supporting the Dynamic Memory protocol
40  * with the host.
41  *
42  * Begin protocol definitions.
43  */
44
45 /*
46  * Protocol versions. The low word is the minor version, the high word the major
47  * version.
48  *
49  * History:
50  * Initial version 1.0
51  * Changed to 0.1 on 2009/03/25
52  * Changes to 0.2 on 2009/05/14
53  * Changes to 0.3 on 2009/12/03
54  * Changed to 1.0 on 2011/04/05
55  */
56
57 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
58 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
59 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
60
61 enum {
62         DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
63         DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
64         DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
65
66         DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
67         DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
68         DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
69
70         DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
71 };
72
73 /*
74  * Message Types
75  */
76
77 enum dm_message_type {
78         /*
79          * Version 0.3
80          */
81         DM_ERROR                        = 0,
82         DM_VERSION_REQUEST              = 1,
83         DM_VERSION_RESPONSE             = 2,
84         DM_CAPABILITIES_REPORT          = 3,
85         DM_CAPABILITIES_RESPONSE        = 4,
86         DM_STATUS_REPORT                = 5,
87         DM_BALLOON_REQUEST              = 6,
88         DM_BALLOON_RESPONSE             = 7,
89         DM_UNBALLOON_REQUEST            = 8,
90         DM_UNBALLOON_RESPONSE           = 9,
91         DM_MEM_HOT_ADD_REQUEST          = 10,
92         DM_MEM_HOT_ADD_RESPONSE         = 11,
93         DM_VERSION_03_MAX               = 11,
94         /*
95          * Version 1.0.
96          */
97         DM_INFO_MESSAGE                 = 12,
98         DM_VERSION_1_MAX                = 12
99 };
100
101 /*
102  * Structures defining the dynamic memory management
103  * protocol.
104  */
105
106 union dm_version {
107         struct {
108                 __u16 minor_version;
109                 __u16 major_version;
110         };
111         __u32 version;
112 } __packed;
113
114 union dm_caps {
115         struct {
116                 __u64 balloon:1;
117                 __u64 hot_add:1;
118                 /*
119                  * To support guests that may have alignment
120                  * limitations on hot-add, the guest can specify
121                  * its alignment requirements; a value of n
122                  * represents an alignment of 2^n in mega bytes.
123                  */
124                 __u64 hot_add_alignment:4;
125                 __u64 reservedz:58;
126         } cap_bits;
127         __u64 caps;
128 } __packed;
129
130 union dm_mem_page_range {
131         struct  {
132                 /*
133                  * The PFN number of the first page in the range.
134                  * 40 bits is the architectural limit of a PFN
135                  * number for AMD64.
136                  */
137                 __u64 start_page:40;
138                 /*
139                  * The number of pages in the range.
140                  */
141                 __u64 page_cnt:24;
142         } finfo;
143         __u64  page_range;
144 } __packed;
145
146 /*
147  * The header for all dynamic memory messages:
148  *
149  * type: Type of the message.
150  * size: Size of the message in bytes; including the header.
151  * trans_id: The guest is responsible for manufacturing this ID.
152  */
153
154 struct dm_header {
155         __u16 type;
156         __u16 size;
157         __u32 trans_id;
158 } __packed;
159
160 /*
161  * A generic message format for dynamic memory.
162  * Specific message formats are defined later in the file.
163  */
164
165 struct dm_message {
166         struct dm_header hdr;
167         __u8 data[]; /* enclosed message */
168 } __packed;
169
170 /*
171  * Specific message types supporting the dynamic memory protocol.
172  */
173
174 /*
175  * Version negotiation message. Sent from the guest to the host.
176  * The guest is free to try different versions until the host
177  * accepts the version.
178  *
179  * dm_version: The protocol version requested.
180  * is_last_attempt: If TRUE, this is the last version guest will request.
181  * reservedz: Reserved field, set to zero.
182  */
183
184 struct dm_version_request {
185         struct dm_header hdr;
186         union dm_version version;
187         __u32 is_last_attempt:1;
188         __u32 reservedz:31;
189 } __packed;
190
191 /*
192  * Version response message; Host to Guest and indicates
193  * if the host has accepted the version sent by the guest.
194  *
195  * is_accepted: If TRUE, host has accepted the version and the guest
196  * should proceed to the next stage of the protocol. FALSE indicates that
197  * guest should re-try with a different version.
198  *
199  * reservedz: Reserved field, set to zero.
200  */
201
202 struct dm_version_response {
203         struct dm_header hdr;
204         __u64 is_accepted:1;
205         __u64 reservedz:63;
206 } __packed;
207
208 /*
209  * Message reporting capabilities. This is sent from the guest to the
210  * host.
211  */
212
213 struct dm_capabilities {
214         struct dm_header hdr;
215         union dm_caps caps;
216         __u64 min_page_cnt;
217         __u64 max_page_number;
218 } __packed;
219
220 /*
221  * Response to the capabilities message. This is sent from the host to the
222  * guest. This message notifies if the host has accepted the guest's
223  * capabilities. If the host has not accepted, the guest must shutdown
224  * the service.
225  *
226  * is_accepted: Indicates if the host has accepted guest's capabilities.
227  * reservedz: Must be 0.
228  */
229
230 struct dm_capabilities_resp_msg {
231         struct dm_header hdr;
232         __u64 is_accepted:1;
233         __u64 reservedz:63;
234 } __packed;
235
236 /*
237  * This message is used to report memory pressure from the guest.
238  * This message is not part of any transaction and there is no
239  * response to this message.
240  *
241  * num_avail: Available memory in pages.
242  * num_committed: Committed memory in pages.
243  * page_file_size: The accumulated size of all page files
244  *                 in the system in pages.
245  * zero_free: The number of zero and free pages.
246  * page_file_writes: The writes to the page file in pages.
247  * io_diff: An indicator of file cache efficiency or page file activity,
248  *          calculated as File Cache Page Fault Count - Page Read Count.
249  *          This value is in pages.
250  *
251  * Some of these metrics are Windows specific and fortunately
252  * the algorithm on the host side that computes the guest memory
253  * pressure only uses num_committed value.
254  */
255
256 struct dm_status {
257         struct dm_header hdr;
258         __u64 num_avail;
259         __u64 num_committed;
260         __u64 page_file_size;
261         __u64 zero_free;
262         __u32 page_file_writes;
263         __u32 io_diff;
264 } __packed;
265
266 /*
267  * Message to ask the guest to allocate memory - balloon up message.
268  * This message is sent from the host to the guest. The guest may not be
269  * able to allocate as much memory as requested.
270  *
271  * num_pages: number of pages to allocate.
272  */
273
274 struct dm_balloon {
275         struct dm_header hdr;
276         __u32 num_pages;
277         __u32 reservedz;
278 } __packed;
279
280 /*
281  * Balloon response message; this message is sent from the guest
282  * to the host in response to the balloon message.
283  *
284  * reservedz: Reserved; must be set to zero.
285  * more_pages: If FALSE, this is the last message of the transaction.
286  * if TRUE there will be at least one more message from the guest.
287  *
288  * range_count: The number of ranges in the range array.
289  *
290  * range_array: An array of page ranges returned to the host.
291  *
292  */
293
294 struct dm_balloon_response {
295         struct dm_header hdr;
296         __u32 reservedz;
297         __u32 more_pages:1;
298         __u32 range_count:31;
299         union dm_mem_page_range range_array[];
300 } __packed;
301
302 /*
303  * Un-balloon message; this message is sent from the host
304  * to the guest to give guest more memory.
305  *
306  * more_pages: If FALSE, this is the last message of the transaction.
307  * if TRUE there will be at least one more message from the guest.
308  *
309  * reservedz: Reserved; must be set to zero.
310  *
311  * range_count: The number of ranges in the range array.
312  *
313  * range_array: An array of page ranges returned to the host.
314  *
315  */
316
317 struct dm_unballoon_request {
318         struct dm_header hdr;
319         __u32 more_pages:1;
320         __u32 reservedz:31;
321         __u32 range_count;
322         union dm_mem_page_range range_array[];
323 } __packed;
324
325 /*
326  * Un-balloon response message; this message is sent from the guest
327  * to the host in response to an unballoon request.
328  *
329  */
330
331 struct dm_unballoon_response {
332         struct dm_header hdr;
333 } __packed;
334
335 /*
336  * Hot add request message. Message sent from the host to the guest.
337  *
338  * mem_range: Memory range to hot add.
339  *
340  */
341
342 struct dm_hot_add {
343         struct dm_header hdr;
344         union dm_mem_page_range range;
345 } __packed;
346
347 /*
348  * Hot add response message.
349  * This message is sent by the guest to report the status of a hot add request.
350  * If page_count is less than the requested page count, then the host should
351  * assume all further hot add requests will fail, since this indicates that
352  * the guest has hit an upper physical memory barrier.
353  *
354  * Hot adds may also fail due to low resources; in this case, the guest must
355  * not complete this message until the hot add can succeed, and the host must
356  * not send a new hot add request until the response is sent.
357  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
358  * times it fails the request.
359  *
360  *
361  * page_count: number of pages that were successfully hot added.
362  *
363  * result: result of the operation 1: success, 0: failure.
364  *
365  */
366
367 struct dm_hot_add_response {
368         struct dm_header hdr;
369         __u32 page_count;
370         __u32 result;
371 } __packed;
372
373 /*
374  * Types of information sent from host to the guest.
375  */
376
377 enum dm_info_type {
378         INFO_TYPE_MAX_PAGE_CNT = 0,
379         MAX_INFO_TYPE
380 };
381
382 /*
383  * Header for the information message.
384  */
385
386 struct dm_info_header {
387         enum dm_info_type type;
388         __u32 data_size;
389 } __packed;
390
391 /*
392  * This message is sent from the host to the guest to pass
393  * some relevant information (win8 addition).
394  *
395  * reserved: no used.
396  * info_size: size of the information blob.
397  * info: information blob.
398  */
399
400 struct dm_info_msg {
401         struct dm_header hdr;
402         __u32 reserved;
403         __u32 info_size;
404         __u8  info[];
405 };
406
407 /*
408  * End protocol definitions.
409  */
410
411 /*
412  * State to manage hot adding memory into the guest.
413  * The range start_pfn : end_pfn specifies the range
414  * that the host has asked us to hot add. The range
415  * start_pfn : ha_end_pfn specifies the range that we have
416  * currently hot added. We hot add in chunks equal to the
417  * memory block size; it is possible that we may not be able
418  * to bring online all the pages in the region. The range
419  * covered_start_pfn:covered_end_pfn defines the pages that can
420  * be brought online.
421  */
422
423 struct hv_hotadd_state {
424         struct list_head list;
425         unsigned long start_pfn;
426         unsigned long covered_start_pfn;
427         unsigned long covered_end_pfn;
428         unsigned long ha_end_pfn;
429         unsigned long end_pfn;
430         /*
431          * A list of gaps.
432          */
433         struct list_head gap_list;
434 };
435
436 struct hv_hotadd_gap {
437         struct list_head list;
438         unsigned long start_pfn;
439         unsigned long end_pfn;
440 };
441
442 struct balloon_state {
443         __u32 num_pages;
444         struct work_struct wrk;
445 };
446
447 struct hot_add_wrk {
448         union dm_mem_page_range ha_page_range;
449         union dm_mem_page_range ha_region_range;
450         struct work_struct wrk;
451 };
452
453 static bool allow_hibernation;
454 static bool hot_add = true;
455 static bool do_hot_add;
456 /*
457  * Delay reporting memory pressure by
458  * the specified number of seconds.
459  */
460 static uint pressure_report_delay = 45;
461 extern unsigned int page_reporting_order;
462 #define HV_MAX_FAILURES 2
463
464 /*
465  * The last time we posted a pressure report to host.
466  */
467 static unsigned long last_post_time;
468
469 static int hv_hypercall_multi_failure;
470
471 module_param(hot_add, bool, 0644);
472 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
473
474 module_param(pressure_report_delay, uint, 0644);
475 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
476 static atomic_t trans_id = ATOMIC_INIT(0);
477
478 static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024);
479
480 /*
481  * Driver specific state.
482  */
483
484 enum hv_dm_state {
485         DM_INITIALIZING = 0,
486         DM_INITIALIZED,
487         DM_BALLOON_UP,
488         DM_BALLOON_DOWN,
489         DM_HOT_ADD,
490         DM_INIT_ERROR
491 };
492
493 static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
494 static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
495
496 static unsigned long ha_pages_in_chunk;
497 #define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT)
498
499 #define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
500
501 struct hv_dynmem_device {
502         struct hv_device *dev;
503         enum hv_dm_state state;
504         struct completion host_event;
505         struct completion config_event;
506
507         /*
508          * Number of pages we have currently ballooned out.
509          */
510         unsigned int num_pages_ballooned;
511         unsigned int num_pages_onlined;
512         unsigned int num_pages_added;
513
514         /*
515          * State to manage the ballooning (up) operation.
516          */
517         struct balloon_state balloon_wrk;
518
519         /*
520          * State to execute the "hot-add" operation.
521          */
522         struct hot_add_wrk ha_wrk;
523
524         /*
525          * This state tracks if the host has specified a hot-add
526          * region.
527          */
528         bool host_specified_ha_region;
529
530         /*
531          * State to synchronize hot-add.
532          */
533         struct completion  ol_waitevent;
534         /*
535          * This thread handles hot-add
536          * requests from the host as well as notifying
537          * the host with regards to memory pressure in
538          * the guest.
539          */
540         struct task_struct *thread;
541
542         /*
543          * Protects ha_region_list, num_pages_onlined counter and individual
544          * regions from ha_region_list.
545          */
546         spinlock_t ha_lock;
547
548         /*
549          * A list of hot-add regions.
550          */
551         struct list_head ha_region_list;
552
553         /*
554          * We start with the highest version we can support
555          * and downgrade based on the host; we save here the
556          * next version to try.
557          */
558         __u32 next_version;
559
560         /*
561          * The negotiated version agreed by host.
562          */
563         __u32 version;
564
565         struct page_reporting_dev_info pr_dev_info;
566
567         /*
568          * Maximum number of pages that can be hot_add-ed
569          */
570         __u64 max_dynamic_page_count;
571 };
572
573 static struct hv_dynmem_device dm_device;
574
575 static void post_status(struct hv_dynmem_device *dm);
576
577 static void enable_page_reporting(void);
578
579 static void disable_page_reporting(void);
580
581 #ifdef CONFIG_MEMORY_HOTPLUG
582 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
583                                      unsigned long pfn)
584 {
585         struct hv_hotadd_gap *gap;
586
587         /* The page is not backed. */
588         if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
589                 return false;
590
591         /* Check for gaps. */
592         list_for_each_entry(gap, &has->gap_list, list) {
593                 if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
594                         return false;
595         }
596
597         return true;
598 }
599
600 static unsigned long hv_page_offline_check(unsigned long start_pfn,
601                                            unsigned long nr_pages)
602 {
603         unsigned long pfn = start_pfn, count = 0;
604         struct hv_hotadd_state *has;
605         bool found;
606
607         while (pfn < start_pfn + nr_pages) {
608                 /*
609                  * Search for HAS which covers the pfn and when we find one
610                  * count how many consequitive PFNs are covered.
611                  */
612                 found = false;
613                 list_for_each_entry(has, &dm_device.ha_region_list, list) {
614                         while ((pfn >= has->start_pfn) &&
615                                (pfn < has->end_pfn) &&
616                                (pfn < start_pfn + nr_pages)) {
617                                 found = true;
618                                 if (has_pfn_is_backed(has, pfn))
619                                         count++;
620                                 pfn++;
621                         }
622                 }
623
624                 /*
625                  * This PFN is not in any HAS (e.g. we're offlining a region
626                  * which was present at boot), no need to account for it. Go
627                  * to the next one.
628                  */
629                 if (!found)
630                         pfn++;
631         }
632
633         return count;
634 }
635
636 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
637                               void *v)
638 {
639         struct memory_notify *mem = (struct memory_notify *)v;
640         unsigned long pfn_count;
641
642         switch (val) {
643         case MEM_ONLINE:
644         case MEM_CANCEL_ONLINE:
645                 complete(&dm_device.ol_waitevent);
646                 break;
647
648         case MEM_OFFLINE:
649                 scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
650                         pfn_count = hv_page_offline_check(mem->start_pfn,
651                                                           mem->nr_pages);
652                         if (pfn_count <= dm_device.num_pages_onlined) {
653                                 dm_device.num_pages_onlined -= pfn_count;
654                         } else {
655                                 /*
656                                  * We're offlining more pages than we
657                                  * managed to online. This is
658                                  * unexpected. In any case don't let
659                                  * num_pages_onlined wrap around zero.
660                                  */
661                                 WARN_ON_ONCE(1);
662                                 dm_device.num_pages_onlined = 0;
663                         }
664                 }
665                 break;
666         case MEM_GOING_ONLINE:
667         case MEM_GOING_OFFLINE:
668         case MEM_CANCEL_OFFLINE:
669                 break;
670         }
671         return NOTIFY_OK;
672 }
673
674 static struct notifier_block hv_memory_nb = {
675         .notifier_call = hv_memory_notifier,
676         .priority = 0
677 };
678
679 /* Check if the particular page is backed and can be onlined and online it. */
680 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
681 {
682         if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
683                 if (!PageOffline(pg))
684                         __SetPageOffline(pg);
685                 return;
686         } else if (!PageOffline(pg))
687                 return;
688
689         /* This frame is currently backed; online the page. */
690         generic_online_page(pg, 0);
691
692         lockdep_assert_held(&dm_device.ha_lock);
693         dm_device.num_pages_onlined++;
694 }
695
696 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
697                                 unsigned long start_pfn, unsigned long size)
698 {
699         int i;
700
701         pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
702         for (i = 0; i < size; i++)
703                 hv_page_online_one(has, pfn_to_page(start_pfn + i));
704 }
705
706 static void hv_mem_hot_add(unsigned long start, unsigned long size,
707                                 unsigned long pfn_count,
708                                 struct hv_hotadd_state *has)
709 {
710         int ret = 0;
711         int i, nid;
712         unsigned long start_pfn;
713         unsigned long processed_pfn;
714         unsigned long total_pfn = pfn_count;
715
716         for (i = 0; i < (size/ha_pages_in_chunk); i++) {
717                 start_pfn = start + (i * ha_pages_in_chunk);
718
719                 scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
720                         has->ha_end_pfn += ha_pages_in_chunk;
721                         processed_pfn = umin(total_pfn, ha_pages_in_chunk);
722                         total_pfn -= processed_pfn;
723                         has->covered_end_pfn += processed_pfn;
724                 }
725
726                 reinit_completion(&dm_device.ol_waitevent);
727
728                 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
729                 ret = add_memory(nid, PFN_PHYS((start_pfn)),
730                                  HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
731
732                 if (ret) {
733                         pr_err("hot_add memory failed error is %d\n", ret);
734                         if (ret == -EEXIST) {
735                                 /*
736                                  * This error indicates that the error
737                                  * is not a transient failure. This is the
738                                  * case where the guest's physical address map
739                                  * precludes hot adding memory. Stop all further
740                                  * memory hot-add.
741                                  */
742                                 do_hot_add = false;
743                         }
744                         scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
745                                 has->ha_end_pfn -= ha_pages_in_chunk;
746                                 has->covered_end_pfn -=  processed_pfn;
747                         }
748                         break;
749                 }
750
751                 /*
752                  * Wait for memory to get onlined. If the kernel onlined the
753                  * memory when adding it, this will return directly. Otherwise,
754                  * it will wait for user space to online the memory. This helps
755                  * to avoid adding memory faster than it is getting onlined. As
756                  * adding succeeded, it is ok to proceed even if the memory was
757                  * not onlined in time.
758                  */
759                 wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
760                 post_status(&dm_device);
761         }
762 }
763
764 static void hv_online_page(struct page *pg, unsigned int order)
765 {
766         struct hv_hotadd_state *has;
767         unsigned long pfn = page_to_pfn(pg);
768
769         guard(spinlock_irqsave)(&dm_device.ha_lock);
770         list_for_each_entry(has, &dm_device.ha_region_list, list) {
771                 /* The page belongs to a different HAS. */
772                 if (pfn < has->start_pfn ||
773                     (pfn + (1UL << order) > has->end_pfn))
774                         continue;
775
776                 hv_bring_pgs_online(has, pfn, 1UL << order);
777                 break;
778         }
779 }
780
781 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
782 {
783         struct hv_hotadd_state *has;
784         struct hv_hotadd_gap *gap;
785         unsigned long residual;
786         int ret = 0;
787
788         guard(spinlock_irqsave)(&dm_device.ha_lock);
789         list_for_each_entry(has, &dm_device.ha_region_list, list) {
790                 /*
791                  * If the pfn range we are dealing with is not in the current
792                  * "hot add block", move on.
793                  */
794                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
795                         continue;
796
797                 /*
798                  * If the current start pfn is not where the covered_end
799                  * is, create a gap and update covered_end_pfn.
800                  */
801                 if (has->covered_end_pfn != start_pfn) {
802                         gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
803                         if (!gap) {
804                                 ret = -ENOMEM;
805                                 break;
806                         }
807
808                         INIT_LIST_HEAD(&gap->list);
809                         gap->start_pfn = has->covered_end_pfn;
810                         gap->end_pfn = start_pfn;
811                         list_add_tail(&gap->list, &has->gap_list);
812
813                         has->covered_end_pfn = start_pfn;
814                 }
815
816                 /*
817                  * If the current hot add-request extends beyond
818                  * our current limit; extend it.
819                  */
820                 if ((start_pfn + pfn_cnt) > has->end_pfn) {
821                         /* Extend the region by multiples of ha_pages_in_chunk */
822                         residual = (start_pfn + pfn_cnt - has->end_pfn);
823                         has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
824                 }
825
826                 ret = 1;
827                 break;
828         }
829
830         return ret;
831 }
832
833 static unsigned long handle_pg_range(unsigned long pg_start,
834                                      unsigned long pg_count)
835 {
836         unsigned long start_pfn = pg_start;
837         unsigned long pfn_cnt = pg_count;
838         unsigned long size;
839         struct hv_hotadd_state *has;
840         unsigned long pgs_ol = 0;
841         unsigned long old_covered_state;
842         unsigned long res = 0, flags;
843
844         pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
845                  pg_start);
846
847         spin_lock_irqsave(&dm_device.ha_lock, flags);
848         list_for_each_entry(has, &dm_device.ha_region_list, list) {
849                 /*
850                  * If the pfn range we are dealing with is not in the current
851                  * "hot add block", move on.
852                  */
853                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
854                         continue;
855
856                 old_covered_state = has->covered_end_pfn;
857
858                 if (start_pfn < has->ha_end_pfn) {
859                         /*
860                          * This is the case where we are backing pages
861                          * in an already hot added region. Bring
862                          * these pages online first.
863                          */
864                         pgs_ol = has->ha_end_pfn - start_pfn;
865                         if (pgs_ol > pfn_cnt)
866                                 pgs_ol = pfn_cnt;
867
868                         has->covered_end_pfn +=  pgs_ol;
869                         pfn_cnt -= pgs_ol;
870                         /*
871                          * Check if the corresponding memory block is already
872                          * online. It is possible to observe struct pages still
873                          * being uninitialized here so check section instead.
874                          * In case the section is online we need to bring the
875                          * rest of pfns (which were not backed previously)
876                          * online too.
877                          */
878                         if (start_pfn > has->start_pfn &&
879                             online_section_nr(pfn_to_section_nr(start_pfn)))
880                                 hv_bring_pgs_online(has, start_pfn, pgs_ol);
881                 }
882
883                 if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
884                         /*
885                          * We have some residual hot add range
886                          * that needs to be hot added; hot add
887                          * it now. Hot add a multiple of
888                          * ha_pages_in_chunk that fully covers the pages
889                          * we have.
890                          */
891                         size = (has->end_pfn - has->ha_end_pfn);
892                         if (pfn_cnt <= size) {
893                                 size = ALIGN(pfn_cnt, ha_pages_in_chunk);
894                         } else {
895                                 pfn_cnt = size;
896                         }
897                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
898                         hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
899                         spin_lock_irqsave(&dm_device.ha_lock, flags);
900                 }
901                 /*
902                  * If we managed to online any pages that were given to us,
903                  * we declare success.
904                  */
905                 res = has->covered_end_pfn - old_covered_state;
906                 break;
907         }
908         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
909
910         return res;
911 }
912
913 static unsigned long process_hot_add(unsigned long pg_start,
914                                         unsigned long pfn_cnt,
915                                         unsigned long rg_start,
916                                         unsigned long rg_size)
917 {
918         struct hv_hotadd_state *ha_region = NULL;
919         int covered;
920
921         if (pfn_cnt == 0)
922                 return 0;
923
924         if (!dm_device.host_specified_ha_region) {
925                 covered = pfn_covered(pg_start, pfn_cnt);
926                 if (covered < 0)
927                         return 0;
928
929                 if (covered)
930                         goto do_pg_range;
931         }
932
933         /*
934          * If the host has specified a hot-add range; deal with it first.
935          */
936
937         if (rg_size != 0) {
938                 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
939                 if (!ha_region)
940                         return 0;
941
942                 INIT_LIST_HEAD(&ha_region->list);
943                 INIT_LIST_HEAD(&ha_region->gap_list);
944
945                 ha_region->start_pfn = rg_start;
946                 ha_region->ha_end_pfn = rg_start;
947                 ha_region->covered_start_pfn = pg_start;
948                 ha_region->covered_end_pfn = pg_start;
949                 ha_region->end_pfn = rg_start + rg_size;
950
951                 scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
952                         list_add_tail(&ha_region->list, &dm_device.ha_region_list);
953                 }
954         }
955
956 do_pg_range:
957         /*
958          * Process the page range specified; bringing them
959          * online if possible.
960          */
961         return handle_pg_range(pg_start, pfn_cnt);
962 }
963
964 #endif
965
966 static void hot_add_req(struct work_struct *dummy)
967 {
968         struct dm_hot_add_response resp;
969 #ifdef CONFIG_MEMORY_HOTPLUG
970         unsigned long pg_start, pfn_cnt;
971         unsigned long rg_start, rg_sz;
972 #endif
973         struct hv_dynmem_device *dm = &dm_device;
974
975         memset(&resp, 0, sizeof(struct dm_hot_add_response));
976         resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
977         resp.hdr.size = sizeof(struct dm_hot_add_response);
978
979 #ifdef CONFIG_MEMORY_HOTPLUG
980         pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
981         pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
982
983         rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
984         rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
985
986         if (rg_start == 0 && !dm->host_specified_ha_region) {
987                 /*
988                  * The host has not specified the hot-add region.
989                  * Based on the hot-add page range being specified,
990                  * compute a hot-add region that can cover the pages
991                  * that need to be hot-added while ensuring the alignment
992                  * and size requirements of Linux as it relates to hot-add.
993                  */
994                 rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
995                 rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
996         }
997
998         if (do_hot_add)
999                 resp.page_count = process_hot_add(pg_start, pfn_cnt,
1000                                                   rg_start, rg_sz);
1001
1002         dm->num_pages_added += resp.page_count;
1003 #endif
1004         /*
1005          * The result field of the response structure has the
1006          * following semantics:
1007          *
1008          * 1. If all or some pages hot-added: Guest should return success.
1009          *
1010          * 2. If no pages could be hot-added:
1011          *
1012          * If the guest returns success, then the host
1013          * will not attempt any further hot-add operations. This
1014          * signifies a permanent failure.
1015          *
1016          * If the guest returns failure, then this failure will be
1017          * treated as a transient failure and the host may retry the
1018          * hot-add operation after some delay.
1019          */
1020         if (resp.page_count > 0)
1021                 resp.result = 1;
1022         else if (!do_hot_add)
1023                 resp.result = 1;
1024         else
1025                 resp.result = 0;
1026
1027         if (!do_hot_add || resp.page_count == 0) {
1028                 if (!allow_hibernation)
1029                         pr_err("Memory hot add failed\n");
1030                 else
1031                         pr_info("Ignore hot-add request!\n");
1032         }
1033
1034         dm->state = DM_INITIALIZED;
1035         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1036         vmbus_sendpacket(dm->dev->channel, &resp,
1037                         sizeof(struct dm_hot_add_response),
1038                         (unsigned long)NULL,
1039                         VM_PKT_DATA_INBAND, 0);
1040 }
1041
1042 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1043 {
1044         struct dm_info_header *info_hdr;
1045
1046         info_hdr = (struct dm_info_header *)msg->info;
1047
1048         switch (info_hdr->type) {
1049         case INFO_TYPE_MAX_PAGE_CNT:
1050                 if (info_hdr->data_size == sizeof(__u64)) {
1051                         __u64 *max_page_count = (__u64 *)&info_hdr[1];
1052
1053                         pr_info("Max. dynamic memory size: %llu MB\n",
1054                                 (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
1055                         dm->max_dynamic_page_count = *max_page_count;
1056                 }
1057
1058                 break;
1059         default:
1060                 pr_warn("Received Unknown type: %d\n", info_hdr->type);
1061         }
1062 }
1063
1064 static unsigned long compute_balloon_floor(void)
1065 {
1066         unsigned long min_pages;
1067         unsigned long nr_pages = totalram_pages();
1068 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1069         /* Simple continuous piecewiese linear function:
1070          *  max MiB -> min MiB  gradient
1071          *       0         0
1072          *      16        16
1073          *      32        24
1074          *     128        72    (1/2)
1075          *     512       168    (1/4)
1076          *    2048       360    (1/8)
1077          *    8192       744    (1/16)
1078          *   32768      1512    (1/32)
1079          */
1080         if (nr_pages < MB2PAGES(128))
1081                 min_pages = MB2PAGES(8) + (nr_pages >> 1);
1082         else if (nr_pages < MB2PAGES(512))
1083                 min_pages = MB2PAGES(40) + (nr_pages >> 2);
1084         else if (nr_pages < MB2PAGES(2048))
1085                 min_pages = MB2PAGES(104) + (nr_pages >> 3);
1086         else if (nr_pages < MB2PAGES(8192))
1087                 min_pages = MB2PAGES(232) + (nr_pages >> 4);
1088         else
1089                 min_pages = MB2PAGES(488) + (nr_pages >> 5);
1090 #undef MB2PAGES
1091         return min_pages;
1092 }
1093
1094 /*
1095  * Compute total committed memory pages
1096  */
1097
1098 static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
1099 {
1100         return vm_memory_committed() +
1101                 dm->num_pages_ballooned +
1102                 (dm->num_pages_added > dm->num_pages_onlined ?
1103                  dm->num_pages_added - dm->num_pages_onlined : 0) +
1104                 compute_balloon_floor();
1105 }
1106
1107 /*
1108  * Post our status as it relates memory pressure to the
1109  * host. Host expects the guests to post this status
1110  * periodically at 1 second intervals.
1111  *
1112  * The metrics specified in this protocol are very Windows
1113  * specific and so we cook up numbers here to convey our memory
1114  * pressure.
1115  */
1116
1117 static void post_status(struct hv_dynmem_device *dm)
1118 {
1119         struct dm_status status;
1120         unsigned long now = jiffies;
1121         unsigned long last_post = last_post_time;
1122         unsigned long num_pages_avail, num_pages_committed;
1123
1124         if (pressure_report_delay > 0) {
1125                 --pressure_report_delay;
1126                 return;
1127         }
1128
1129         if (!time_after(now, (last_post_time + HZ)))
1130                 return;
1131
1132         memset(&status, 0, sizeof(struct dm_status));
1133         status.hdr.type = DM_STATUS_REPORT;
1134         status.hdr.size = sizeof(struct dm_status);
1135         status.hdr.trans_id = atomic_inc_return(&trans_id);
1136
1137         /*
1138          * The host expects the guest to report free and committed memory.
1139          * Furthermore, the host expects the pressure information to include
1140          * the ballooned out pages. For a given amount of memory that we are
1141          * managing we need to compute a floor below which we should not
1142          * balloon. Compute this and add it to the pressure report.
1143          * We also need to report all offline pages (num_pages_added -
1144          * num_pages_onlined) as committed to the host, otherwise it can try
1145          * asking us to balloon them out.
1146          */
1147         num_pages_avail = si_mem_available();
1148         num_pages_committed = get_pages_committed(dm);
1149
1150         trace_balloon_status(num_pages_avail, num_pages_committed,
1151                              vm_memory_committed(), dm->num_pages_ballooned,
1152                              dm->num_pages_added, dm->num_pages_onlined);
1153
1154         /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
1155         status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
1156         status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
1157
1158         /*
1159          * If our transaction ID is no longer current, just don't
1160          * send the status. This can happen if we were interrupted
1161          * after we picked our transaction ID.
1162          */
1163         if (status.hdr.trans_id != atomic_read(&trans_id))
1164                 return;
1165
1166         /*
1167          * If the last post time that we sampled has changed,
1168          * we have raced, don't post the status.
1169          */
1170         if (last_post != last_post_time)
1171                 return;
1172
1173         last_post_time = jiffies;
1174         vmbus_sendpacket(dm->dev->channel, &status,
1175                                 sizeof(struct dm_status),
1176                                 (unsigned long)NULL,
1177                                 VM_PKT_DATA_INBAND, 0);
1178 }
1179
1180 static void free_balloon_pages(struct hv_dynmem_device *dm,
1181                                union dm_mem_page_range *range_array)
1182 {
1183         int num_pages = range_array->finfo.page_cnt;
1184         __u64 start_frame = range_array->finfo.start_page;
1185         struct page *pg;
1186         int i;
1187
1188         for (i = 0; i < num_pages; i++) {
1189                 pg = pfn_to_page(i + start_frame);
1190                 __ClearPageOffline(pg);
1191                 __free_page(pg);
1192                 dm->num_pages_ballooned--;
1193                 adjust_managed_page_count(pg, 1);
1194         }
1195 }
1196
1197 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1198                                         unsigned int num_pages,
1199                                         struct dm_balloon_response *bl_resp,
1200                                         int alloc_unit)
1201 {
1202         unsigned int i, j;
1203         struct page *pg;
1204
1205         for (i = 0; i < num_pages / alloc_unit; i++) {
1206                 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1207                         HV_HYP_PAGE_SIZE)
1208                         return i * alloc_unit;
1209
1210                 /*
1211                  * We execute this code in a thread context. Furthermore,
1212                  * we don't want the kernel to try too hard.
1213                  */
1214                 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1215                                 __GFP_NOMEMALLOC | __GFP_NOWARN,
1216                                 get_order(alloc_unit << PAGE_SHIFT));
1217
1218                 if (!pg)
1219                         return i * alloc_unit;
1220
1221                 dm->num_pages_ballooned += alloc_unit;
1222
1223                 /*
1224                  * If we allocatted 2M pages; split them so we
1225                  * can free them in any order we get.
1226                  */
1227
1228                 if (alloc_unit != 1)
1229                         split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1230
1231                 /* mark all pages offline */
1232                 for (j = 0; j < alloc_unit; j++) {
1233                         __SetPageOffline(pg + j);
1234                         adjust_managed_page_count(pg + j, -1);
1235                 }
1236
1237                 bl_resp->range_count++;
1238                 bl_resp->range_array[i].finfo.start_page =
1239                         page_to_pfn(pg);
1240                 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1241                 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1242         }
1243
1244         return i * alloc_unit;
1245 }
1246
1247 static void balloon_up(struct work_struct *dummy)
1248 {
1249         unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1250         unsigned int num_ballooned = 0;
1251         struct dm_balloon_response *bl_resp;
1252         int alloc_unit;
1253         int ret;
1254         bool done = false;
1255         int i;
1256         long avail_pages;
1257         unsigned long floor;
1258
1259         /*
1260          * We will attempt 2M allocations. However, if we fail to
1261          * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
1262          */
1263         alloc_unit = PAGES_IN_2M;
1264
1265         avail_pages = si_mem_available();
1266         floor = compute_balloon_floor();
1267
1268         /* Refuse to balloon below the floor. */
1269         if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1270                 pr_info("Balloon request will be partially fulfilled. %s\n",
1271                         avail_pages < num_pages ? "Not enough memory." :
1272                         "Balloon floor reached.");
1273
1274                 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1275         }
1276
1277         while (!done) {
1278                 memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
1279                 bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
1280                 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1281                 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1282                 bl_resp->more_pages = 1;
1283
1284                 num_pages -= num_ballooned;
1285                 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1286                                                     bl_resp, alloc_unit);
1287
1288                 if (alloc_unit != 1 && num_ballooned == 0) {
1289                         alloc_unit = 1;
1290                         continue;
1291                 }
1292
1293                 if (num_ballooned == 0 || num_ballooned == num_pages) {
1294                         pr_debug("Ballooned %u out of %u requested pages.\n",
1295                                  num_pages, dm_device.balloon_wrk.num_pages);
1296
1297                         bl_resp->more_pages = 0;
1298                         done = true;
1299                         dm_device.state = DM_INITIALIZED;
1300                 }
1301
1302                 /*
1303                  * We are pushing a lot of data through the channel;
1304                  * deal with transient failures caused because of the
1305                  * lack of space in the ring buffer.
1306                  */
1307
1308                 do {
1309                         bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1310                         ret = vmbus_sendpacket(dm_device.dev->channel,
1311                                                 bl_resp,
1312                                                 bl_resp->hdr.size,
1313                                                 (unsigned long)NULL,
1314                                                 VM_PKT_DATA_INBAND, 0);
1315
1316                         if (ret == -EAGAIN)
1317                                 msleep(20);
1318                         post_status(&dm_device);
1319                 } while (ret == -EAGAIN);
1320
1321                 if (ret) {
1322                         /*
1323                          * Free up the memory we allocatted.
1324                          */
1325                         pr_err("Balloon response failed\n");
1326
1327                         for (i = 0; i < bl_resp->range_count; i++)
1328                                 free_balloon_pages(&dm_device,
1329                                                    &bl_resp->range_array[i]);
1330
1331                         done = true;
1332                 }
1333         }
1334 }
1335
1336 static void balloon_down(struct hv_dynmem_device *dm,
1337                          struct dm_unballoon_request *req)
1338 {
1339         union dm_mem_page_range *range_array = req->range_array;
1340         int range_count = req->range_count;
1341         struct dm_unballoon_response resp;
1342         int i;
1343         unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1344
1345         for (i = 0; i < range_count; i++) {
1346                 free_balloon_pages(dm, &range_array[i]);
1347                 complete(&dm_device.config_event);
1348         }
1349
1350         pr_debug("Freed %u ballooned pages.\n",
1351                  prev_pages_ballooned - dm->num_pages_ballooned);
1352
1353         if (req->more_pages == 1)
1354                 return;
1355
1356         memset(&resp, 0, sizeof(struct dm_unballoon_response));
1357         resp.hdr.type = DM_UNBALLOON_RESPONSE;
1358         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1359         resp.hdr.size = sizeof(struct dm_unballoon_response);
1360
1361         vmbus_sendpacket(dm_device.dev->channel, &resp,
1362                                 sizeof(struct dm_unballoon_response),
1363                                 (unsigned long)NULL,
1364                                 VM_PKT_DATA_INBAND, 0);
1365
1366         dm->state = DM_INITIALIZED;
1367 }
1368
1369 static void balloon_onchannelcallback(void *context);
1370
1371 static int dm_thread_func(void *dm_dev)
1372 {
1373         struct hv_dynmem_device *dm = dm_dev;
1374
1375         while (!kthread_should_stop()) {
1376                 wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
1377                 /*
1378                  * The host expects us to post information on the memory
1379                  * pressure every second.
1380                  */
1381                 reinit_completion(&dm_device.config_event);
1382                 post_status(dm);
1383                 /*
1384                  * disable free page reporting if multiple hypercall
1385                  * failure flag set. It is not done in the page_reporting
1386                  * callback context as that causes a deadlock between
1387                  * page_reporting_process() and page_reporting_unregister()
1388                  */
1389                 if (hv_hypercall_multi_failure >= HV_MAX_FAILURES) {
1390                         pr_err("Multiple failures in cold memory discard hypercall, disabling page reporting\n");
1391                         disable_page_reporting();
1392                         /* Reset the flag after disabling reporting */
1393                         hv_hypercall_multi_failure = 0;
1394                 }
1395         }
1396
1397         return 0;
1398 }
1399
1400 static void version_resp(struct hv_dynmem_device *dm,
1401                          struct dm_version_response *vresp)
1402 {
1403         struct dm_version_request version_req;
1404         int ret;
1405
1406         if (vresp->is_accepted) {
1407                 /*
1408                  * We are done; wakeup the
1409                  * context waiting for version
1410                  * negotiation.
1411                  */
1412                 complete(&dm->host_event);
1413                 return;
1414         }
1415         /*
1416          * If there are more versions to try, continue
1417          * with negotiations; if not
1418          * shutdown the service since we are not able
1419          * to negotiate a suitable version number
1420          * with the host.
1421          */
1422         if (dm->next_version == 0)
1423                 goto version_error;
1424
1425         memset(&version_req, 0, sizeof(struct dm_version_request));
1426         version_req.hdr.type = DM_VERSION_REQUEST;
1427         version_req.hdr.size = sizeof(struct dm_version_request);
1428         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1429         version_req.version.version = dm->next_version;
1430         dm->version = version_req.version.version;
1431
1432         /*
1433          * Set the next version to try in case current version fails.
1434          * Win7 protocol ought to be the last one to try.
1435          */
1436         switch (version_req.version.version) {
1437         case DYNMEM_PROTOCOL_VERSION_WIN8:
1438                 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1439                 version_req.is_last_attempt = 0;
1440                 break;
1441         default:
1442                 dm->next_version = 0;
1443                 version_req.is_last_attempt = 1;
1444         }
1445
1446         ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1447                                 sizeof(struct dm_version_request),
1448                                 (unsigned long)NULL,
1449                                 VM_PKT_DATA_INBAND, 0);
1450
1451         if (ret)
1452                 goto version_error;
1453
1454         return;
1455
1456 version_error:
1457         dm->state = DM_INIT_ERROR;
1458         complete(&dm->host_event);
1459 }
1460
1461 static void cap_resp(struct hv_dynmem_device *dm,
1462                      struct dm_capabilities_resp_msg *cap_resp)
1463 {
1464         if (!cap_resp->is_accepted) {
1465                 pr_err("Capabilities not accepted by host\n");
1466                 dm->state = DM_INIT_ERROR;
1467         }
1468         complete(&dm->host_event);
1469 }
1470
1471 static void balloon_onchannelcallback(void *context)
1472 {
1473         struct hv_device *dev = context;
1474         u32 recvlen;
1475         u64 requestid;
1476         struct dm_message *dm_msg;
1477         struct dm_header *dm_hdr;
1478         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1479         struct dm_balloon *bal_msg;
1480         struct dm_hot_add *ha_msg;
1481         union dm_mem_page_range *ha_pg_range;
1482         union dm_mem_page_range *ha_region;
1483
1484         memset(recv_buffer, 0, sizeof(recv_buffer));
1485         vmbus_recvpacket(dev->channel, recv_buffer,
1486                          HV_HYP_PAGE_SIZE, &recvlen, &requestid);
1487
1488         if (recvlen > 0) {
1489                 dm_msg = (struct dm_message *)recv_buffer;
1490                 dm_hdr = &dm_msg->hdr;
1491
1492                 switch (dm_hdr->type) {
1493                 case DM_VERSION_RESPONSE:
1494                         version_resp(dm,
1495                                      (struct dm_version_response *)dm_msg);
1496                         break;
1497
1498                 case DM_CAPABILITIES_RESPONSE:
1499                         cap_resp(dm,
1500                                  (struct dm_capabilities_resp_msg *)dm_msg);
1501                         break;
1502
1503                 case DM_BALLOON_REQUEST:
1504                         if (allow_hibernation) {
1505                                 pr_info("Ignore balloon-up request!\n");
1506                                 break;
1507                         }
1508
1509                         if (dm->state == DM_BALLOON_UP)
1510                                 pr_warn("Currently ballooning\n");
1511                         bal_msg = (struct dm_balloon *)recv_buffer;
1512                         dm->state = DM_BALLOON_UP;
1513                         dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1514                         schedule_work(&dm_device.balloon_wrk.wrk);
1515                         break;
1516
1517                 case DM_UNBALLOON_REQUEST:
1518                         if (allow_hibernation) {
1519                                 pr_info("Ignore balloon-down request!\n");
1520                                 break;
1521                         }
1522
1523                         dm->state = DM_BALLOON_DOWN;
1524                         balloon_down(dm,
1525                                      (struct dm_unballoon_request *)recv_buffer);
1526                         break;
1527
1528                 case DM_MEM_HOT_ADD_REQUEST:
1529                         if (dm->state == DM_HOT_ADD)
1530                                 pr_warn("Currently hot-adding\n");
1531                         dm->state = DM_HOT_ADD;
1532                         ha_msg = (struct dm_hot_add *)recv_buffer;
1533                         if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1534                                 /*
1535                                  * This is a normal hot-add request specifying
1536                                  * hot-add memory.
1537                                  */
1538                                 dm->host_specified_ha_region = false;
1539                                 ha_pg_range = &ha_msg->range;
1540                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1541                                 dm->ha_wrk.ha_region_range.page_range = 0;
1542                         } else {
1543                                 /*
1544                                  * Host is specifying that we first hot-add
1545                                  * a region and then partially populate this
1546                                  * region.
1547                                  */
1548                                 dm->host_specified_ha_region = true;
1549                                 ha_pg_range = &ha_msg->range;
1550                                 ha_region = &ha_pg_range[1];
1551                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1552                                 dm->ha_wrk.ha_region_range = *ha_region;
1553                         }
1554                         schedule_work(&dm_device.ha_wrk.wrk);
1555                         break;
1556
1557                 case DM_INFO_MESSAGE:
1558                         process_info(dm, (struct dm_info_msg *)dm_msg);
1559                         break;
1560
1561                 default:
1562                         pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
1563                 }
1564         }
1565 }
1566
1567 #define HV_LARGE_REPORTING_ORDER        9
1568 #define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
1569                 HV_LARGE_REPORTING_ORDER)
1570 static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
1571                                struct scatterlist *sgl, unsigned int nents)
1572 {
1573         unsigned long flags;
1574         struct hv_memory_hint *hint;
1575         int i, order;
1576         u64 status;
1577         struct scatterlist *sg;
1578
1579         WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
1580         WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order));
1581         local_irq_save(flags);
1582         hint = *this_cpu_ptr(hyperv_pcpu_input_arg);
1583         if (!hint) {
1584                 local_irq_restore(flags);
1585                 return -ENOSPC;
1586         }
1587
1588         hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
1589         hint->reserved = 0;
1590         for_each_sg(sgl, sg, nents, i) {
1591                 union hv_gpa_page_range *range;
1592
1593                 range = &hint->ranges[i];
1594                 range->address_space = 0;
1595                 order = get_order(sg->length);
1596                 /*
1597                  * Hyper-V expects the additional_pages field in the units
1598                  * of one of these 3 sizes, 4Kbytes, 2Mbytes or 1Gbytes.
1599                  * This is dictated by the values of the fields page.largesize
1600                  * and page_size.
1601                  * This code however, only uses 4Kbytes and 2Mbytes units
1602                  * and not 1Gbytes unit.
1603                  */
1604
1605                 /* page reporting for pages 2MB or higher */
1606                 if (order >= HV_LARGE_REPORTING_ORDER) {
1607                         range->page.largepage = 1;
1608                         range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
1609                         range->base_large_pfn = page_to_hvpfn(
1610                                         sg_page(sg)) >> HV_LARGE_REPORTING_ORDER;
1611                         range->page.additional_pages =
1612                                 (sg->length / HV_LARGE_REPORTING_LEN) - 1;
1613                 } else {
1614                         /* Page reporting for pages below 2MB */
1615                         range->page.basepfn = page_to_hvpfn(sg_page(sg));
1616                         range->page.largepage = false;
1617                         range->page.additional_pages =
1618                                 (sg->length / HV_HYP_PAGE_SIZE) - 1;
1619                 }
1620         }
1621
1622         status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
1623                                      hint, NULL);
1624         local_irq_restore(flags);
1625         if (!hv_result_success(status)) {
1626                 pr_err("Cold memory discard hypercall failed with status %llx\n",
1627                        status);
1628                 if (hv_hypercall_multi_failure > 0)
1629                         hv_hypercall_multi_failure++;
1630
1631                 if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
1632                         pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
1633                         pr_err("Defaulting to page_reporting_order %d\n",
1634                                pageblock_order);
1635                         page_reporting_order = pageblock_order;
1636                         hv_hypercall_multi_failure++;
1637                         return -EINVAL;
1638                 }
1639
1640                 return -EINVAL;
1641         }
1642
1643         return 0;
1644 }
1645
1646 static void enable_page_reporting(void)
1647 {
1648         int ret;
1649
1650         if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
1651                 pr_debug("Cold memory discard hint not supported by Hyper-V\n");
1652                 return;
1653         }
1654
1655         BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
1656         dm_device.pr_dev_info.report = hv_free_page_report;
1657         /*
1658          * We let the page_reporting_order parameter decide the order
1659          * in the page_reporting code
1660          */
1661         dm_device.pr_dev_info.order = 0;
1662         ret = page_reporting_register(&dm_device.pr_dev_info);
1663         if (ret < 0) {
1664                 dm_device.pr_dev_info.report = NULL;
1665                 pr_err("Failed to enable cold memory discard: %d\n", ret);
1666         } else {
1667                 pr_info("Cold memory discard hint enabled with order %d\n",
1668                         page_reporting_order);
1669         }
1670 }
1671
1672 static void disable_page_reporting(void)
1673 {
1674         if (dm_device.pr_dev_info.report) {
1675                 page_reporting_unregister(&dm_device.pr_dev_info);
1676                 dm_device.pr_dev_info.report = NULL;
1677         }
1678 }
1679
1680 static int ballooning_enabled(void)
1681 {
1682         /*
1683          * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
1684          * since currently it's unclear to us whether an unballoon request can
1685          * make sure all page ranges are guest page size aligned.
1686          */
1687         if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
1688                 pr_info("Ballooning disabled because page size is not 4096 bytes\n");
1689                 return 0;
1690         }
1691
1692         return 1;
1693 }
1694
1695 static int hot_add_enabled(void)
1696 {
1697         /*
1698          * Disable hot add on ARM64, because we currently rely on
1699          * memory_add_physaddr_to_nid() to get a node id of a hot add range,
1700          * however ARM64's memory_add_physaddr_to_nid() always return 0 and
1701          * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
1702          * add_memory().
1703          */
1704         if (IS_ENABLED(CONFIG_ARM64)) {
1705                 pr_info("Memory hot add disabled on ARM64\n");
1706                 return 0;
1707         }
1708
1709         return 1;
1710 }
1711
1712 static int balloon_connect_vsp(struct hv_device *dev)
1713 {
1714         struct dm_version_request version_req;
1715         struct dm_capabilities cap_msg;
1716         unsigned long t;
1717         int ret;
1718
1719         /*
1720          * max_pkt_size should be large enough for one vmbus packet header plus
1721          * our receive buffer size. Hyper-V sends messages up to
1722          * HV_HYP_PAGE_SIZE bytes long on balloon channel.
1723          */
1724         dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
1725
1726         ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1727                          balloon_onchannelcallback, dev);
1728         if (ret)
1729                 return ret;
1730
1731         /*
1732          * Initiate the hand shake with the host and negotiate
1733          * a version that the host can support. We start with the
1734          * highest version number and go down if the host cannot
1735          * support it.
1736          */
1737         memset(&version_req, 0, sizeof(struct dm_version_request));
1738         version_req.hdr.type = DM_VERSION_REQUEST;
1739         version_req.hdr.size = sizeof(struct dm_version_request);
1740         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1741         version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1742         version_req.is_last_attempt = 0;
1743         dm_device.version = version_req.version.version;
1744
1745         ret = vmbus_sendpacket(dev->channel, &version_req,
1746                                sizeof(struct dm_version_request),
1747                                (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1748         if (ret)
1749                 goto out;
1750
1751         t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
1752         if (t == 0) {
1753                 ret = -ETIMEDOUT;
1754                 goto out;
1755         }
1756
1757         /*
1758          * If we could not negotiate a compatible version with the host
1759          * fail the probe function.
1760          */
1761         if (dm_device.state == DM_INIT_ERROR) {
1762                 ret = -EPROTO;
1763                 goto out;
1764         }
1765
1766         pr_info("Using Dynamic Memory protocol version %u.%u\n",
1767                 DYNMEM_MAJOR_VERSION(dm_device.version),
1768                 DYNMEM_MINOR_VERSION(dm_device.version));
1769
1770         /*
1771          * Now submit our capabilities to the host.
1772          */
1773         memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1774         cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1775         cap_msg.hdr.size = sizeof(struct dm_capabilities);
1776         cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1777
1778         /*
1779          * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
1780          * currently still requires the bits to be set, so we have to add code
1781          * to fail the host's hot-add and balloon up/down requests, if any.
1782          */
1783         cap_msg.caps.cap_bits.balloon = ballooning_enabled();
1784         cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
1785
1786         /*
1787          * Specify our alignment requirements for memory hot-add. The value is
1788          * the log base 2 of the number of megabytes in a chunk. For example,
1789          * with 256 MiB chunks, the value is 8. The number of MiB in a chunk
1790          * must be a power of 2.
1791          */
1792         cap_msg.caps.cap_bits.hot_add_alignment =
1793                                         ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
1794
1795         /*
1796          * Currently the host does not use these
1797          * values and we set them to what is done in the
1798          * Windows driver.
1799          */
1800         cap_msg.min_page_cnt = 0;
1801         cap_msg.max_page_number = -1;
1802
1803         ret = vmbus_sendpacket(dev->channel, &cap_msg,
1804                                sizeof(struct dm_capabilities),
1805                                (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1806         if (ret)
1807                 goto out;
1808
1809         t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
1810         if (t == 0) {
1811                 ret = -ETIMEDOUT;
1812                 goto out;
1813         }
1814
1815         /*
1816          * If the host does not like our capabilities,
1817          * fail the probe function.
1818          */
1819         if (dm_device.state == DM_INIT_ERROR) {
1820                 ret = -EPROTO;
1821                 goto out;
1822         }
1823
1824         return 0;
1825 out:
1826         vmbus_close(dev->channel);
1827         return ret;
1828 }
1829
1830 /*
1831  * DEBUGFS Interface
1832  */
1833 #ifdef CONFIG_DEBUG_FS
1834
1835 /**
1836  * hv_balloon_debug_show - shows statistics of balloon operations.
1837  * @f: pointer to the &struct seq_file.
1838  * @offset: ignored.
1839  *
1840  * Provides the statistics that can be accessed in hv-balloon in the debugfs.
1841  *
1842  * Return: zero on success or an error code.
1843  */
1844 static int hv_balloon_debug_show(struct seq_file *f, void *offset)
1845 {
1846         struct hv_dynmem_device *dm = f->private;
1847         char *sname;
1848
1849         seq_printf(f, "%-22s: %u.%u\n", "host_version",
1850                         DYNMEM_MAJOR_VERSION(dm->version),
1851                         DYNMEM_MINOR_VERSION(dm->version));
1852
1853         seq_printf(f, "%-22s:", "capabilities");
1854         if (ballooning_enabled())
1855                 seq_puts(f, " enabled");
1856
1857         if (hot_add_enabled())
1858                 seq_puts(f, " hot_add");
1859
1860         seq_puts(f, "\n");
1861
1862         seq_printf(f, "%-22s: %u", "state", dm->state);
1863         switch (dm->state) {
1864         case DM_INITIALIZING:
1865                         sname = "Initializing";
1866                         break;
1867         case DM_INITIALIZED:
1868                         sname = "Initialized";
1869                         break;
1870         case DM_BALLOON_UP:
1871                         sname = "Balloon Up";
1872                         break;
1873         case DM_BALLOON_DOWN:
1874                         sname = "Balloon Down";
1875                         break;
1876         case DM_HOT_ADD:
1877                         sname = "Hot Add";
1878                         break;
1879         case DM_INIT_ERROR:
1880                         sname = "Error";
1881                         break;
1882         default:
1883                         sname = "Unknown";
1884         }
1885         seq_printf(f, " (%s)\n", sname);
1886
1887         /* HV Page Size */
1888         seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
1889
1890         /* Pages added with hot_add */
1891         seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
1892
1893         /* pages that are "onlined"/used from pages_added */
1894         seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
1895
1896         /* pages we have given back to host */
1897         seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
1898
1899         seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
1900                    get_pages_committed(dm));
1901
1902         seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
1903                    dm->max_dynamic_page_count);
1904
1905         return 0;
1906 }
1907
1908 DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
1909
1910 static void  hv_balloon_debugfs_init(struct hv_dynmem_device *b)
1911 {
1912         debugfs_create_file("hv-balloon", 0444, NULL, b,
1913                             &hv_balloon_debug_fops);
1914 }
1915
1916 static void  hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
1917 {
1918         debugfs_lookup_and_remove("hv-balloon", NULL);
1919 }
1920
1921 #else
1922
1923 static inline void hv_balloon_debugfs_init(struct hv_dynmem_device  *b)
1924 {
1925 }
1926
1927 static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
1928 {
1929 }
1930
1931 #endif  /* CONFIG_DEBUG_FS */
1932
1933 static int balloon_probe(struct hv_device *dev,
1934                          const struct hv_vmbus_device_id *dev_id)
1935 {
1936         int ret;
1937
1938         allow_hibernation = hv_is_hibernation_supported();
1939         if (allow_hibernation)
1940                 hot_add = false;
1941
1942 #ifdef CONFIG_MEMORY_HOTPLUG
1943         /*
1944          * Hot-add must operate in chunks that are of size equal to the
1945          * memory block size because that's what the core add_memory()
1946          * interface requires. The Hyper-V interface requires that the memory
1947          * block size be a power of 2, which is guaranteed by the check in
1948          * memory_dev_init().
1949          */
1950         ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
1951         do_hot_add = hot_add;
1952 #else
1953         /*
1954          * Without MEMORY_HOTPLUG, the guest returns a failure status for all
1955          * hot add requests from Hyper-V, and the chunk size is used only to
1956          * specify alignment to Hyper-V as required by the host/guest protocol.
1957          * Somewhat arbitrarily, use 128 MiB.
1958          */
1959         ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
1960         do_hot_add = false;
1961 #endif
1962         dm_device.dev = dev;
1963         dm_device.state = DM_INITIALIZING;
1964         dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1965         init_completion(&dm_device.host_event);
1966         init_completion(&dm_device.config_event);
1967         INIT_LIST_HEAD(&dm_device.ha_region_list);
1968         spin_lock_init(&dm_device.ha_lock);
1969         INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1970         INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1971         dm_device.host_specified_ha_region = false;
1972
1973 #ifdef CONFIG_MEMORY_HOTPLUG
1974         set_online_page_callback(&hv_online_page);
1975         init_completion(&dm_device.ol_waitevent);
1976         register_memory_notifier(&hv_memory_nb);
1977 #endif
1978
1979         hv_set_drvdata(dev, &dm_device);
1980
1981         ret = balloon_connect_vsp(dev);
1982         if (ret != 0)
1983                 goto connect_error;
1984
1985         enable_page_reporting();
1986         dm_device.state = DM_INITIALIZED;
1987
1988         dm_device.thread =
1989                  kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1990         if (IS_ERR(dm_device.thread)) {
1991                 ret = PTR_ERR(dm_device.thread);
1992                 goto probe_error;
1993         }
1994
1995         hv_balloon_debugfs_init(&dm_device);
1996
1997         return 0;
1998
1999 probe_error:
2000         dm_device.state = DM_INIT_ERROR;
2001         dm_device.thread  = NULL;
2002         disable_page_reporting();
2003         vmbus_close(dev->channel);
2004 connect_error:
2005 #ifdef CONFIG_MEMORY_HOTPLUG
2006         unregister_memory_notifier(&hv_memory_nb);
2007         restore_online_page_callback(&hv_online_page);
2008 #endif
2009         return ret;
2010 }
2011
2012 static void balloon_remove(struct hv_device *dev)
2013 {
2014         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
2015         struct hv_hotadd_state *has, *tmp;
2016         struct hv_hotadd_gap *gap, *tmp_gap;
2017
2018         if (dm->num_pages_ballooned != 0)
2019                 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
2020
2021         hv_balloon_debugfs_exit(dm);
2022
2023         cancel_work_sync(&dm->balloon_wrk.wrk);
2024         cancel_work_sync(&dm->ha_wrk.wrk);
2025
2026         kthread_stop(dm->thread);
2027
2028         /*
2029          * This is to handle the case when balloon_resume()
2030          * call has failed and some cleanup has been done as
2031          * a part of the error handling.
2032          */
2033         if (dm_device.state != DM_INIT_ERROR) {
2034                 disable_page_reporting();
2035                 vmbus_close(dev->channel);
2036 #ifdef CONFIG_MEMORY_HOTPLUG
2037                 unregister_memory_notifier(&hv_memory_nb);
2038                 restore_online_page_callback(&hv_online_page);
2039 #endif
2040         }
2041
2042         guard(spinlock_irqsave)(&dm_device.ha_lock);
2043         list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
2044                 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
2045                         list_del(&gap->list);
2046                         kfree(gap);
2047                 }
2048                 list_del(&has->list);
2049                 kfree(has);
2050         }
2051 }
2052
2053 static int balloon_suspend(struct hv_device *hv_dev)
2054 {
2055         struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
2056
2057         tasklet_disable(&hv_dev->channel->callback_event);
2058
2059         cancel_work_sync(&dm->balloon_wrk.wrk);
2060         cancel_work_sync(&dm->ha_wrk.wrk);
2061
2062         if (dm->thread) {
2063                 kthread_stop(dm->thread);
2064                 dm->thread = NULL;
2065                 vmbus_close(hv_dev->channel);
2066         }
2067
2068         tasklet_enable(&hv_dev->channel->callback_event);
2069
2070         return 0;
2071 }
2072
2073 static int balloon_resume(struct hv_device *dev)
2074 {
2075         int ret;
2076
2077         dm_device.state = DM_INITIALIZING;
2078
2079         ret = balloon_connect_vsp(dev);
2080
2081         if (ret != 0)
2082                 goto out;
2083
2084         dm_device.thread =
2085                  kthread_run(dm_thread_func, &dm_device, "hv_balloon");
2086         if (IS_ERR(dm_device.thread)) {
2087                 ret = PTR_ERR(dm_device.thread);
2088                 dm_device.thread = NULL;
2089                 goto close_channel;
2090         }
2091
2092         dm_device.state = DM_INITIALIZED;
2093         return 0;
2094 close_channel:
2095         vmbus_close(dev->channel);
2096 out:
2097         dm_device.state = DM_INIT_ERROR;
2098         disable_page_reporting();
2099 #ifdef CONFIG_MEMORY_HOTPLUG
2100         unregister_memory_notifier(&hv_memory_nb);
2101         restore_online_page_callback(&hv_online_page);
2102 #endif
2103         return ret;
2104 }
2105
2106 static const struct hv_vmbus_device_id id_table[] = {
2107         /* Dynamic Memory Class ID */
2108         /* 525074DC-8985-46e2-8057-A307DC18A502 */
2109         { HV_DM_GUID, },
2110         { },
2111 };
2112
2113 MODULE_DEVICE_TABLE(vmbus, id_table);
2114
2115 static  struct hv_driver balloon_drv = {
2116         .name = "hv_balloon",
2117         .id_table = id_table,
2118         .probe =  balloon_probe,
2119         .remove =  balloon_remove,
2120         .suspend = balloon_suspend,
2121         .resume = balloon_resume,
2122         .driver = {
2123                 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2124         },
2125 };
2126
2127 static int __init init_balloon_drv(void)
2128 {
2129         return vmbus_driver_register(&balloon_drv);
2130 }
2131
2132 module_init(init_balloon_drv);
2133
2134 MODULE_DESCRIPTION("Hyper-V Balloon");
2135 MODULE_LICENSE("GPL");
This page took 0.150852 seconds and 4 git commands to generate.