From 49f653d0610ce0d66131bc782673801a0556b3af Mon Sep 17 00:00:00 2001 From: leizongkun Date: Tue, 4 Nov 2025 11:53:06 +0800 Subject: [PATCH 1/2] system: Add support of hugepage use on demand Optimize memory regions that use 2MB huge pages and are not pre-allocated, this defers physical memory allocation, reduces memory overhead, and achieves on-demand memory usage when starting virtual machines. Signed-off-by: wangzhigang Signed-off-by: zhangliang Signed-off-by: leizongkun --- backends/hostmem.c | 5 + hw/virtio/virtio-balloon.c | 181 ++++++++++++++++++++++++++++++++++ include/exec/memory.h | 11 +++ include/qemu/madvise.h | 5 + meson.build | 8 ++ meson_options.txt | 3 + scripts/meson-buildoptions.sh | 3 + system/memory.c | 25 +++++ 8 files changed, 241 insertions(+) diff --git a/backends/hostmem.c b/backends/hostmem.c index 747e7838c0..eb4d21dedb 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -343,6 +343,11 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) if (!backend->dump) { qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); } +#ifdef CONFIG_HUGEPAGE_POD + if (memory_region_is_huge_pod(&backend->mr) == true) { + qemu_madvise(ptr, sz, QEMU_MADV_HUGETLB_ZEROPAGE); + } +#endif #ifdef CONFIG_NUMA unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); /* lastbit == MAX_NODES means maxnode = 0 */ diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index d004cf29d2..14d511b2e1 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -38,6 +38,154 @@ #include "hw/virtio/virtio-access.h" #define BALLOON_PAGE_SIZE (1 << VIRTIO_BALLOON_PFN_SHIFT) +#ifdef CONFIG_HUGEPAGE_POD +#define ULONGS_PER_HUGEPAGE 8 /* Number of unsigned longs per huge page in the bitmap */ + +static bool guest_enabled_fpr = false; + +/* Set if guest support and enabled free-page-reporting */ +static void set_guest_enabled_fpr(bool enabled) { + guest_enabled_fpr = enabled; +} + +/* Represent of a RAMBlock */ +typedef struct GlobalBalloonedPage { + void *base_hva; /* start HVA of a RAMBlock */ + size_t page_nr; /* total 4KiB page count of a RAMBlock */ + unsigned long *freed_page_bitmap; /* every set bit represent a freed 4KiB page */ + int *hugepage_freed_pages; /* every element represent freed subpages count in a hugepage */ +} GlobalBalloonedPage; + +#define PAGES_IN_HUGEPAGE 512 +#define HUGEPAGE_SHIFT 21 +#define GBP_LIST_LENGTH 8 +GlobalBalloonedPage *gbp_list[GBP_LIST_LENGTH] = { 0 }; + +static GlobalBalloonedPage *find_gbp_by_addr(void *base_hva) +{ + int i; + + for (i = 0; i < GBP_LIST_LENGTH; i++) { + GlobalBalloonedPage *gbp = gbp_list[i]; + if (gbp == NULL) { + continue; + } + + if (gbp->base_hva == base_hva) { + return gbp; + } + } + return NULL; +} + +static GlobalBalloonedPage *alloc_new_gbp(void *base_hva, ram_addr_t length) +{ + int i; + + for (i = 0; i < GBP_LIST_LENGTH; i++) { + GlobalBalloonedPage *gbp = gbp_list[i]; + if (gbp == NULL) { + gbp = g_malloc0(sizeof(GlobalBalloonedPage)); + gbp->base_hva = base_hva; + gbp->page_nr = length >> VIRTIO_BALLOON_PFN_SHIFT; + gbp->freed_page_bitmap = bitmap_new(gbp->page_nr); + gbp->hugepage_freed_pages = g_malloc0(gbp->page_nr / PAGES_IN_HUGEPAGE * sizeof(int)); + + gbp_list[i] = gbp; + } + } + + return NULL; +} + +static void free_gbp(void) +{ + int i; + + for (i = 0; i < GBP_LIST_LENGTH; i++) { + GlobalBalloonedPage *gbp = gbp_list[i]; + if (gbp == NULL) { + continue; + } + + g_free(gbp->freed_page_bitmap); + g_free(gbp->hugepage_freed_pages); + g_free(gbp); + + gbp_list[i] = NULL; + } +} + +static inline void clear_subpages_in_hugepage(GlobalBalloonedPage *gbp, unsigned long hugepage_index) +{ + bitmap_zero(&gbp->freed_page_bitmap[hugepage_index * 8], PAGES_IN_HUGEPAGE); +} + +static inline bool all_subpages_in_hugepage_freed(GlobalBalloonedPage *gbp, unsigned long hugepage_index) +{ + return bitmap_full(&gbp->freed_page_bitmap[hugepage_index * ULONGS_PER_HUGEPAGE], PAGES_IN_HUGEPAGE); +} + +static void mark_freed_subpage(RAMBlock *rb, ram_addr_t rb_offset) +{ + void *base_hva = qemu_ram_get_host_addr(rb); + ram_addr_t length = qemu_ram_get_max_length(rb); + ram_addr_t rb_page_size = qemu_ram_pagesize(rb); + ram_addr_t rb_aligned_offset = QEMU_ALIGN_DOWN(rb_offset, rb_page_size); + unsigned long page_index = rb_offset >> VIRTIO_BALLOON_PFN_SHIFT; + unsigned long hugepage_index = rb_offset >> HUGEPAGE_SHIFT; + + GlobalBalloonedPage *gbp = find_gbp_by_addr(base_hva); + if (gbp == NULL) { + gbp = alloc_new_gbp(base_hva, length); + if (gbp == NULL) { + return; + } + } + + /* When one subpage released by balloon, set the bit of this page */ + if (!test_and_set_bit(page_index, gbp->freed_page_bitmap)) { + gbp->hugepage_freed_pages[hugepage_index]++; + + /* + * All bits have been set meaning that all subpages of a hugepage is freed + * by balloon, So we can release this hugepage back to Host. + */ + if (gbp->hugepage_freed_pages[hugepage_index] == PAGES_IN_HUGEPAGE) { + clear_subpages_in_hugepage(gbp, hugepage_index); + gbp->hugepage_freed_pages[hugepage_index] = 0; + + /* Release this hugepage back to Host */ + ram_block_discard_range(rb, rb_aligned_offset, rb_page_size); + } + } +} + +static void mark_used_subpage(RAMBlock *rb, ram_addr_t rb_offset) +{ + void *base_hva = qemu_ram_get_host_addr(rb); + unsigned long page_index = rb_offset >> VIRTIO_BALLOON_PFN_SHIFT; + unsigned long hugepage_index = rb_offset >> HUGEPAGE_SHIFT; + + GlobalBalloonedPage *gbp = find_gbp_by_addr(base_hva); + if (gbp == NULL) { + warn_report("Couldn't find gbp of rb_offset 0x%lx\n", rb_offset); + return; + } + + /* + * When one subpage deflated back to the Guest, clear the bit of this page. + * This means that this subpage could be used by Guest, so we cannot + * release to Host by mark_freed_subpage. + */ + if (test_and_clear_bit(page_index, gbp->freed_page_bitmap)) { + gbp->hugepage_freed_pages[hugepage_index]--; + } else { + /* Probably cleared by mark_freed_subpage */ + /* warn_report("rb_offset 0x%lx have not set in gbp->freed_page_bitmap\n", rb_offset); */ + } +} +#endif typedef struct PartiallyBalloonedPage { ram_addr_t base_gpa; @@ -92,6 +240,14 @@ static void balloon_inflate_page(VirtIOBalloon *balloon, rb = qemu_ram_block_from_host(addr, false, &rb_offset); rb_page_size = qemu_ram_pagesize(rb); +#ifdef CONFIG_HUGEPAGE_POD + if (rb_page_size == (1 << HUGEPAGE_SHIFT)) { + /* 2M pagesize case */ + mark_freed_subpage(rb, rb_offset); + return; + } +#endif + if (rb_page_size == BALLOON_PAGE_SIZE) { /* Easy case */ @@ -157,6 +313,14 @@ static void balloon_deflate_page(VirtIOBalloon *balloon, rb = qemu_ram_block_from_host(addr, false, &rb_offset); rb_page_size = qemu_ram_pagesize(rb); +#ifdef CONFIG_HUGEPAGE_POD + if (rb_page_size == (1 << HUGEPAGE_SHIFT)) { + /* 2M pagesize case */ + mark_used_subpage(rb, rb_offset); + return; + } +#endif + host_addr = (void *)((uintptr_t)addr & ~(rb_page_size - 1)); /* When a page is deflated, we hint the whole host page it lives @@ -257,6 +421,12 @@ static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, goto out_end; } for (i = 0; i < VIRTIO_BALLOON_S_NR; i++) { +#ifdef CONFIG_HUGEPAGE_POD + if (guest_enabled_fpr && i == VIRTIO_BALLOON_S_CACHES) { + s->stats[i] |= 1024; + } +#endif + if (!visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], errp)) { goto out_nested; } @@ -378,6 +548,9 @@ static void virtio_balloon_handle_report(VirtIODevice *vdev, VirtQueue *vq) ram_block_discard_range(rb, ram_offset, size); } +#ifdef CONFIG_HUGEPAGE_POD + set_guest_enabled_fpr(true); +#endif skip_element: virtqueue_push(vq, elem, 0); @@ -923,6 +1096,10 @@ static void virtio_balloon_device_unrealize(DeviceState *dev) virtio_delete_queue(s->reporting_vq); } virtio_cleanup(vdev); + +#ifdef CONFIG_HUGEPAGE_POD + free_gbp(); +#endif } static void virtio_balloon_device_reset(VirtIODevice *vdev) @@ -940,6 +1117,10 @@ static void virtio_balloon_device_reset(VirtIODevice *vdev) } s->poison_val = 0; + +#ifdef CONFIG_HUGEPAGE_POD + set_guest_enabled_fpr(false); +#endif } static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status) diff --git a/include/exec/memory.h b/include/exec/memory.h index 51fe10d4a0..c5edf864e1 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -2109,6 +2109,17 @@ static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) return mr->nonvolatile; } +#ifdef CONFIG_HUGEPAGE_POD +/** + * memory_region_is_huge_pod: check whether a memory region is POD hugepage + * + * Returns %true if a memory region is POD hugepage. + * + * @mr: the memory region being queried + */ +bool memory_region_is_huge_pod(MemoryRegion *mr); +#endif + /** * memory_region_get_fd: Get a file descriptor backing a RAM memory region. * diff --git a/include/qemu/madvise.h b/include/qemu/madvise.h index e155f59a0d..599549d3ca 100644 --- a/include/qemu/madvise.h +++ b/include/qemu/madvise.h @@ -60,6 +60,11 @@ #define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID #endif +#ifdef CONFIG_HUGEPAGE_POD +#define MADV_HUGETLB_ZEROPAGE 0x1110 +#define QEMU_MADV_HUGETLB_ZEROPAGE MADV_HUGETLB_ZEROPAGE +#endif + #elif defined(CONFIG_POSIX_MADVISE) #define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED diff --git a/meson.build b/meson.build index 6a8410fabb..7323351eb7 100644 --- a/meson.build +++ b/meson.build @@ -566,6 +566,13 @@ have_tpm = get_option('tpm') \ .require(targetos != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ .allowed() +# hugepage pod +have_hugepage_pod = get_option('hugepage_pod') \ + .require(targetos == 'linux', error_message: 'hugepage_pod is supported only on Linux') \ + .allowed() + +config_host_data.set('CONFIG_HUGEPAGE_POD', have_hugepage_pod) + # vhost have_vhost_user = get_option('vhost_user') \ .disable_auto_if(targetos != 'linux') \ @@ -4487,6 +4494,7 @@ summary_info += {'libudev': libudev} summary_info += {'FUSE lseek': fuse_lseek.found()} summary_info += {'selinux': selinux} summary_info += {'libdw': libdw} +summary_info += {'hugepage pod': have_hugepage_pod} summary(summary_info, bool_yn: true, section: 'Dependencies') if host_arch == 'unknown' diff --git a/meson_options.txt b/meson_options.txt index 61996300d5..339fcc193c 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -374,3 +374,6 @@ option('qemu_ga_version', type: 'string', value: '', option('hexagon_idef_parser', type : 'boolean', value : true, description: 'use idef-parser to automatically generate TCG code for the Hexagon frontend') + +option('hugepage_pod', type: 'feature', value: 'disabled', + description: ' support of hugepage use on demand') diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index 8604fe8ffa..88ebfb3e7a 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -225,6 +225,7 @@ meson_options_help() { printf "%s\n" ' zstd zstd compression support' printf "%s\n" ' qpl Query Processing Library support' printf "%s\n" ' uadk UADK Library support' + printf "%s\n" ' hugepage-pod support of hugepage use on demand' } _meson_option_parse() { case $1 in @@ -571,6 +572,8 @@ _meson_option_parse() { --disable-qpl) printf "%s" -Dqpl=disabled ;; --enable-uadk) printf "%s" -Duadk=enabled ;; --disable-uadk) printf "%s" -Duadk=disabled ;; + --enable-hugepage-pod) printf "%s" -Dhugepage_pod=enabled ;; + --disable-hugepage-pod) printf "%s" -Dhugepage_pod=disabled ;; *) return 1 ;; esac } diff --git a/system/memory.c b/system/memory.c index fa99009701..bf331d0e7b 100644 --- a/system/memory.c +++ b/system/memory.c @@ -3786,3 +3786,28 @@ static void memory_register_types(void) } type_init(memory_register_types) + +#ifdef CONFIG_HUGEPAGE_POD +#define HUGEPAGESIZE (1 << 21) +bool memory_region_is_huge_pod(MemoryRegion *mr) +{ + HostMemoryBackend *backend; + + rcu_read_lock(); + while (mr->alias) { + mr = mr->alias; + } + backend = (HostMemoryBackend *)object_dynamic_cast(mr->owner, TYPE_MEMORY_BACKEND); + rcu_read_unlock(); + + if (backend == NULL || backend->prealloc) { + return false; + } + + if (host_memory_backend_pagesize(backend) != HUGEPAGESIZE) { + return false; + } + + return true; +} +#endif -- Gitee From 1a76538c8f4f3587864db8dd990efbae27919e88 Mon Sep 17 00:00:00 2001 From: leizongkun Date: Tue, 4 Nov 2025 12:01:14 +0800 Subject: [PATCH 2/2] backends: Add support of one guest numa node alloc memory from multi host nodes It provides QEMU with a more flexible memory NUMA binding approach, allowing a guest NUMA node to allocate memory to different host NUMA nodes according to specified proportions. Signed-off-by: wangzhigang Signed-off-by: zhangliang Signed-off-by: leizongkun --- backends/hostmem.c | 131 ++++++++++++++++++++++++++++++++++ include/sysemu/hostmem.h | 3 + meson.build | 8 +++ meson_options.txt | 3 + qapi/qom.json | 1 + scripts/meson-buildoptions.sh | 5 ++ 6 files changed, 151 insertions(+) diff --git a/backends/hostmem.c b/backends/hostmem.c index eb4d21dedb..06a744525e 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -20,6 +20,12 @@ #include "qom/object_interfaces.h" #include "qemu/mmap-alloc.h" #include "qemu/madvise.h" +#ifdef CONFIG_MBIND_PROPORTION +#include "qemu/option.h" +#include "sysemu/sysemu.h" +#include "qemu/log.h" +#include "qemu/units.h" +#endif #ifdef CONFIG_NUMA #include @@ -32,6 +38,12 @@ QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT); QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED); QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND); QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE); + +#ifdef CONFIG_MBIND_PROPORTION +#define PROPORTION_MAX_NUM 11 +#define PER_PROPORTION_MAX_LENGTH 32 +#define PROPORTION_MAX_LENGTH 512 +#endif #endif char * @@ -137,6 +149,22 @@ out: #endif } +#ifdef CONFIG_MBIND_PROPORTION +static void +host_memory_backend_set_propertion(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ +#ifdef CONFIG_NUMA + HostMemoryBackend *backend = MEMORY_BACKEND(obj); + char *pro; + visit_type_str(v, name, &pro, errp); + backend->propertion = pro; +#else + error_setg(errp, "NUMA node binding are not supported by this QEMU"); +#endif +} +#endif + static int host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED) { @@ -319,6 +347,95 @@ size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) return pagesize; } +#ifdef CONFIG_MBIND_PROPORTION +static int mbind_by_proportions(void *ptr, const char *bind_proportions, uint64_t sz) +{ + char proportion_str[PROPORTION_MAX_LENGTH]; + char proportions[PROPORTION_MAX_NUM][PER_PROPORTION_MAX_LENGTH]; + int proportions_num, i; + char *token; + uint64_t size = 0; + long mbind_ret = -1; + uint64_t size_total = 0; + + if (strlen(bind_proportions) >= PROPORTION_MAX_LENGTH) { + qemu_log("the lenth of bind_proportions is too long, max len is %d\n", PROPORTION_MAX_LENGTH); + return -1; + } + if (memcpy(proportion_str, bind_proportions, strlen(bind_proportions) + 1) == NULL) { + qemu_log("failed to copy bind_proportions\n"); + return -1; + } + proportions_num = 0; + token = strtok(proportion_str, ":"); + while (token != NULL) { + if (strlen(token) + 1 >= PER_PROPORTION_MAX_LENGTH ) { + qemu_log("bind_proportions token is too long, max len is %d\n", PER_PROPORTION_MAX_LENGTH); + return -1; + } + if (memcpy(proportions[proportions_num], token, strlen(token) + 1) == NULL) { + qemu_log("failed to copy token\n"); + return -1; + } + proportions_num++; + if (proportions_num >= PROPORTION_MAX_NUM) { + qemu_log("invalid proportions number, max is %d\n", PROPORTION_MAX_NUM); + return -1; + } + token = strtok(NULL, ":"); + } + + for (i = 0; i < proportions_num; i++) { + unsigned long tmp_lastbit, tmp_maxnode; + char prop[PROPORTION_MAX_LENGTH]; + char *end, *prop_token, *pos; + long int node_id; + long size_token; + DECLARE_BITMAP(tmp_host_nodes, MAX_NODES + 1) = {0}; + + ptr = (void*)((char*)ptr + size); + if (memcpy(prop, proportions[i], strlen(proportions[i]) + 1) == NULL) { + qemu_log("failed to copy propertion"); + return -1; + } + prop_token = strtok(prop, "-"); + if (prop_token == NULL) { + return -1; + } + size_token = strtol(prop_token, &end, 10); + if (*end != '\0') { + return -1; + } + size = size_token * MiB; + size_total += size; + prop_token = strtok(NULL, "-"); + pos = strstr(prop_token, "node"); + pos += strlen("node"); + node_id = strtol(pos, &end, 10); + if (*end != '\0') { + perror("failed to convert node_id from string to number"); + return -1; + } + bitmap_set(tmp_host_nodes, node_id, 1); + tmp_lastbit = find_last_bit(tmp_host_nodes, MAX_NODES); + tmp_maxnode = (tmp_lastbit + 1) % (MAX_NODES + 1); + qemu_log("mbind args: addr %p, size '%" PRIu64 "', node: %ld, \n", ptr, size, node_id); + mbind_ret = mbind(ptr, size, MPOL_BIND, tmp_host_nodes, tmp_maxnode + 1, + MPOL_MF_STRICT | MPOL_MF_MOVE); + if (mbind_ret < 0) { + perror("failed to mbind address to host node"); + return -1; + } + } + if (size_total != sz) { + qemu_log("invalid proportion config, length %" PRIu64 " is not same as " + "all tokens '%" PRIu64 "'", sz, size_total); + return -1; + } + return 0; +} +#endif + static void host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) { @@ -357,6 +474,17 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) * this doesn't catch hugepage case. */ unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; int mode = backend->policy; +#ifdef CONFIG_MBIND_PROPORTION + const char *proportion = backend->propertion; + if (proportion != NULL) { + if (mbind_by_proportions(ptr, proportion, sz) < 0) { + error_setg(errp, "failed to mbind_by_proportions"); + return; + } + free(backend->propertion); + goto prealloc; + } +#endif /* check for invalid host-nodes and policies and give more verbose * error messages than mbind(). */ @@ -403,6 +531,9 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) * This is necessary to guarantee memory is allocated with * specified NUMA policy in place. */ +#ifdef CONFIG_MBIND_PROPORTION +prealloc: +#endif if (backend->prealloc) { qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, backend->prealloc_threads, diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h index 39326f1d4f..83cb9e468d 100644 --- a/include/sysemu/hostmem.h +++ b/include/sysemu/hostmem.h @@ -70,6 +70,9 @@ struct HostMemoryBackend { ThreadContext *prealloc_context; DECLARE_BITMAP(host_nodes, MAX_NODES + 1); HostMemPolicy policy; +#ifdef CONFIG_MBIND_PROPORTION + char *propertion; +#endif MemoryRegion mr; }; diff --git a/meson.build b/meson.build index 7323351eb7..9e8099ece4 100644 --- a/meson.build +++ b/meson.build @@ -573,6 +573,13 @@ have_hugepage_pod = get_option('hugepage_pod') \ config_host_data.set('CONFIG_HUGEPAGE_POD', have_hugepage_pod) +# mbind_proportion +have_mbind_proportion = get_option('mbind_by_proportion') \ + .require(targetos == 'linux', error_message: 'mbind_by_proportion is supported only on Linux') \ + .allowed() + +config_host_data.set('CONFIG_MBIND_PROPORTION', have_mbind_proportion) + # vhost have_vhost_user = get_option('vhost_user') \ .disable_auto_if(targetos != 'linux') \ @@ -4495,6 +4502,7 @@ summary_info += {'FUSE lseek': fuse_lseek.found()} summary_info += {'selinux': selinux} summary_info += {'libdw': libdw} summary_info += {'hugepage pod': have_hugepage_pod} +summary_info += {'mbind proportion': have_mbind_proportion} summary(summary_info, bool_yn: true, section: 'Dependencies') if host_arch == 'unknown' diff --git a/meson_options.txt b/meson_options.txt index 339fcc193c..5b28afa5c9 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -377,3 +377,6 @@ option('hexagon_idef_parser', type : 'boolean', value : true, option('hugepage_pod', type: 'feature', value: 'disabled', description: ' support of hugepage use on demand') + +option('mbind_by_proportion', type: 'feature', value: 'disabled', + description: ' support of one guest numa node alloc memory from multi host nodes') diff --git a/qapi/qom.json b/qapi/qom.json index e0590a6019..d1fbe2b0a2 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -624,6 +624,7 @@ { 'struct': 'MemoryBackendProperties', 'data': { '*dump': 'bool', '*host-nodes': ['uint16'], + '*host-nodes-propertion': 'str', '*merge': 'bool', '*policy': 'HostMemPolicy', '*prealloc': 'bool', diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index 88ebfb3e7a..6e90582b65 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -226,6 +226,9 @@ meson_options_help() { printf "%s\n" ' qpl Query Processing Library support' printf "%s\n" ' uadk UADK Library support' printf "%s\n" ' hugepage-pod support of hugepage use on demand' + printf "%s\n" ' mbind-by-proportion' + printf "%s\n" ' support of one guest numa node alloc memory from multi' + printf "%s\n" ' host nodes' } _meson_option_parse() { case $1 in @@ -574,6 +577,8 @@ _meson_option_parse() { --disable-uadk) printf "%s" -Duadk=disabled ;; --enable-hugepage-pod) printf "%s" -Dhugepage_pod=enabled ;; --disable-hugepage-pod) printf "%s" -Dhugepage_pod=disabled ;; + --enable-mbind-by-proportion) printf "%s" -Dmbind_by_proportion=enabled ;; + --disable-mbind-by-proportion) printf "%s" -Dmbind_by_proportion=disabled ;; *) return 1 ;; esac } -- Gitee