📄 xc_ia64_linux_restore.c
字号:
/****************************************************************************** * xc_ia64_linux_restore.c * * Restore the state of a Linux session. * * Copyright (c) 2003, K A Fraser. * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net> * * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp> * Use foreign p2m exposure. * VTi domain support */#include <stdlib.h>#include <unistd.h>#include "xg_private.h"#include "xc_ia64_save_restore.h"#include "xc_ia64.h"#include "xc_efi.h"#include "xen/hvm/params.h"#define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))/* number of pfns this guest has (i.e. number of entries in the P2M) */static unsigned long p2m_size;/* number of 'in use' pfns in the guest (i.e. #P2M entries with a valid mfn) */static unsigned long nr_pfns;static intpopulate_page_if_necessary(int xc_handle, uint32_t dom, unsigned long gmfn, struct xen_ia64_p2m_table *p2m_table){ if (xc_ia64_p2m_present(p2m_table, gmfn)) return 0; return xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0, 0, &gmfn);}static intread_page(int xc_handle, int io_fd, uint32_t dom, unsigned long pfn){ void *mem; mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, pfn); if (mem == NULL) { ERROR("cannot map page"); return -1; } if (read_exact(io_fd, mem, PAGE_SIZE)) { ERROR("Error when reading from state file (5)"); munmap(mem, PAGE_SIZE); return -1; } munmap(mem, PAGE_SIZE); return 0;}/* * Get the list of PFNs that are not in the psuedo-phys map. * Although we allocate pages on demand, balloon driver may * decreased simaltenously. So we have to free the freed * pages here. */static intxc_ia64_recv_unallocated_list(int xc_handle, int io_fd, uint32_t dom, struct xen_ia64_p2m_table *p2m_table){ int rc = -1; unsigned int i; unsigned int count; unsigned long *pfntab = NULL; unsigned int nr_frees; if (read_exact(io_fd, &count, sizeof(count))) { ERROR("Error when reading pfn count"); goto out; } pfntab = malloc(sizeof(unsigned long) * count); if (pfntab == NULL) { ERROR("Out of memory"); goto out; } if (read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) { ERROR("Error when reading pfntab"); goto out; } nr_frees = 0; for (i = 0; i < count; i++) { if (xc_ia64_p2m_allocated(p2m_table, pfntab[i])) { pfntab[nr_frees] = pfntab[i]; nr_frees++; } } if (nr_frees > 0) { if (xc_domain_memory_decrease_reservation(xc_handle, dom, nr_frees, 0, pfntab) < 0) { PERROR("Could not decrease reservation"); goto out; } else DPRINTF("Decreased reservation by %d / %d pages\n", nr_frees, count); } rc = 0; out: if (pfntab != NULL) free(pfntab); return rc;}static intxc_ia64_recv_vcpu_context(int xc_handle, int io_fd, uint32_t dom, uint32_t vcpu, vcpu_guest_context_t *ctxt){ if (read_exact(io_fd, ctxt, sizeof(*ctxt))) { ERROR("Error when reading ctxt"); return -1; } fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]); /* Initialize and set registers. */ ctxt->flags = VGCF_EXTRA_REGS | VGCF_SET_CR_IRR; if (xc_vcpu_setcontext(xc_handle, dom, vcpu, ctxt) != 0) { ERROR("Couldn't set vcpu context"); return -1; } /* Just a check. */ ctxt->flags = 0; if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) { ERROR("Could not get vcpu context"); return -1; } return 0;}/* Read shared info. */static intxc_ia64_recv_shared_info(int xc_handle, int io_fd, uint32_t dom, unsigned long shared_info_frame, unsigned long *start_info_pfn){ unsigned int i; /* The new domain's shared-info frame. */ shared_info_t *shared_info; /* Read shared info. */ shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame); if (shared_info == NULL) { ERROR("cannot map page"); return -1; } if (read_exact(io_fd, shared_info, PAGE_SIZE)) { ERROR("Error when reading shared_info page"); munmap(shared_info, PAGE_SIZE); return -1; } /* clear any pending events and the selector */ memset(&(shared_info->evtchn_pending[0]), 0, sizeof (shared_info->evtchn_pending)); for (i = 0; i < MAX_VIRT_CPUS; i++) shared_info->vcpu_info[i].evtchn_pending_sel = 0; if (start_info_pfn != NULL) *start_info_pfn = shared_info->arch.start_info_pfn; munmap (shared_info, PAGE_SIZE); return 0;}static intxc_ia64_pv_recv_context(int xc_handle, int io_fd, uint32_t dom, unsigned long shared_info_frame, struct xen_ia64_p2m_table *p2m_table, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn){ int rc = -1; unsigned long gmfn; /* A copy of the CPU context of the guest. */ vcpu_guest_context_t ctxt; /* A temporary mapping of the guest's start_info page. */ start_info_t *start_info; if (lock_pages(&ctxt, sizeof(ctxt))) { /* needed for build domctl, but might as well do early */ ERROR("Unable to lock_pages ctxt"); return -1; } if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt)) goto out; /* Then get privreg page. */ if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) { ERROR("Could not read vcpu privregs"); goto out; } /* Read shared info. */ if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame, &gmfn)) goto out; /* Uncanonicalise the suspend-record frame number and poke resume rec. */ if (populate_page_if_necessary(xc_handle, dom, gmfn, p2m_table)) { ERROR("cannot populate page 0x%lx", gmfn); goto out; } start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, gmfn); if (start_info == NULL) { ERROR("cannot map start_info page"); goto out; } start_info->nr_pages = p2m_size; start_info->shared_info = shared_info_frame << PAGE_SHIFT; start_info->flags = 0; *store_mfn = start_info->store_mfn; start_info->store_evtchn = store_evtchn; *console_mfn = start_info->console.domU.mfn; start_info->console.domU.evtchn = console_evtchn; munmap(start_info, PAGE_SIZE); rc = 0; out: unlock_pages(&ctxt, sizeof(ctxt)); return rc;}static intxc_ia64_hvm_recv_context(int xc_handle, int io_fd, uint32_t dom, unsigned long shared_info_frame, struct xen_ia64_p2m_table *p2m_table, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn){ int rc = -1; xc_dominfo_t info; unsigned int i; /* cpu */ uint64_t max_virt_cpus; unsigned long vcpumap_size; uint64_t *vcpumap = NULL; /* HVM: magic frames for ioreqs and xenstore comms */ const int hvm_params[] = { HVM_PARAM_STORE_PFN, HVM_PARAM_IOREQ_PFN, HVM_PARAM_BUFIOREQ_PFN, HVM_PARAM_BUFPIOREQ_PFN, }; const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]); /* ioreq_pfn, bufioreq_pfn, store_pfn */ uint64_t magic_pfns[NR_PARAMS]; /* HVM: a buffer for holding HVM contxt */ uint64_t rec_size = 0; uint8_t *hvm_buf = NULL; /* Read shared info. */ if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame, NULL)) goto out; /* vcpu map */ if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) { ERROR("Could not get domain info"); goto out; } if (read_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) { ERROR("error reading max_virt_cpus"); goto out; } if (max_virt_cpus < info.max_vcpu_id) { ERROR("too large max_virt_cpus %i < %i\n", max_virt_cpus, info.max_vcpu_id); goto out;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -