📄 dom_fw_dom0.c
字号:
md->num_pages > ((num_pages + 1) << (PAGE_SHIFT - EFI_PAGE_SHIFT))) { last_mem_md = md; break; } } if (last_mem_md == NULL) { printk("%s: warning: " "no dom0 contiguous memory to hold memory map\n", __func__); return; } paddr_end = last_mem_md->phys_addr + (last_mem_md->num_pages << EFI_PAGE_SHIFT); paddr_start = (paddr_end - (num_pages << PAGE_SHIFT)) & PAGE_MASK; last_mem_md->num_pages -= (paddr_end - paddr_start) >> EFI_PAGE_SHIFT; md = &tables->efi_memmap[tables->num_mds]; tables->num_mds++; md->type = EFI_RUNTIME_SERVICES_DATA; md->phys_addr = paddr_start; md->virt_addr = 0; md->num_pages = num_pages << (PAGE_SHIFT - EFI_PAGE_SHIFT); md->attribute = EFI_MEMORY_WB; BUG_ON(tables->fw_tables_size < sizeof(*tables) + sizeof(tables->efi_memmap[0]) * tables->num_mds); /* with this sort, md doesn't point memmap table */ sort(tables->efi_memmap, tables->num_mds, sizeof(efi_memory_desc_t), efi_mdt_cmp, NULL); memmap_info = domain_mpa_to_imva(d, paddr_start); memmap_info->efi_memdesc_size = sizeof(md[0]); memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION; memmap_info->efi_memmap_size = tables->num_mds * sizeof(md[0]); dom_fw_copy_to(d, paddr_start + offsetof(xen_ia64_memmap_info_t, memdesc), &tables->efi_memmap[0], memmap_info->efi_memmap_size); d->shared_info->arch.memmap_info_num_pages = num_pages; d->shared_info->arch.memmap_info_pfn = paddr_start >> PAGE_SHIFT;}/* setup_guest() @ libxc/xc_linux_build() arranges memory for domU. * however no one arranges memory for dom0, * instead we allocate pages manually. */static voidassign_new_domain0_range(struct domain *d, const efi_memory_desc_t * md){ if (md->type == EFI_PAL_CODE || md->type == EFI_RUNTIME_SERVICES_DATA || md->type == EFI_CONVENTIONAL_MEMORY) { unsigned long start = md->phys_addr & PAGE_MASK; unsigned long end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); unsigned long addr; if (end == start) { /* md->num_pages = 0 is allowed. */ return; } for (addr = start; addr < end; addr += PAGE_SIZE) assign_new_domain0_page(d, addr); }}/* Complete the dom0 memmap. */int __initcomplete_dom0_memmap(struct domain *d, struct fw_tables *tables){ u64 addr; void *efi_map_start, *efi_map_end, *p; u64 efi_desc_size; int i; for (i = 0; i < tables->num_mds; i++) assign_new_domain0_range(d, &tables->efi_memmap[i]); /* Walk through all MDT entries. Copy all interesting entries. */ efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; /* EFI memory descriptor is using 4k page, while xen is using 16k page. * To avoid identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. being * blocked by WB mapping, scan memory descriptor twice. * First: setup identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. * Second: setup mapping for EFI_CONVENTIONAL_MEMORY etc. */ /* first scan, setup identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. */ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { const efi_memory_desc_t *md = p; efi_memory_desc_t *dom_md = &tables->efi_memmap[tables->num_mds]; u64 start = md->phys_addr; u64 size = md->num_pages << EFI_PAGE_SHIFT; u64 end = start + size; u64 mpaddr; unsigned long flags; switch (md->type) { case EFI_RUNTIME_SERVICES_CODE: case EFI_RUNTIME_SERVICES_DATA: case EFI_ACPI_RECLAIM_MEMORY: case EFI_ACPI_MEMORY_NVS: case EFI_RESERVED_TYPE: /* * Map into dom0 - We must respect protection * and cache attributes. Not all of these pages * are writable!!! */ flags = ASSIGN_writable; /* dummy - zero */ if (md->attribute & EFI_MEMORY_WP) flags |= ASSIGN_readonly; if ((md->attribute & EFI_MEMORY_UC) && !(md->attribute & EFI_MEMORY_WB)) flags |= ASSIGN_nocache; assign_domain_mach_page(d, start, size, flags); /* Fall-through. */ case EFI_MEMORY_MAPPED_IO: /* Will be mapped with ioremap. */ /* Copy descriptor. */ *dom_md = *md; dom_md->virt_addr = 0; tables->num_mds++; break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: flags = ASSIGN_writable; /* dummy - zero */ if (md->attribute & EFI_MEMORY_UC) flags |= ASSIGN_nocache; if (start > 0x1ffffffff0000000UL) { mpaddr = 0x4000000000000UL - size; printk(XENLOG_INFO "Remapping IO ports from " "%lx to %lx\n", start, mpaddr); } else mpaddr = start; /* Map into dom0. */ assign_domain_mmio_page(d, mpaddr, start, size, flags); /* Copy descriptor. */ *dom_md = *md; dom_md->phys_addr = mpaddr; dom_md->virt_addr = 0; tables->num_mds++; break; case EFI_CONVENTIONAL_MEMORY: case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: break; case EFI_UNUSABLE_MEMORY: case EFI_PAL_CODE: /* * We don't really need these, but holes in the * memory map may cause Linux to assume there are * uncacheable ranges within a granule. */ dom_md->type = EFI_UNUSABLE_MEMORY; dom_md->phys_addr = start; dom_md->virt_addr = 0; dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT; dom_md->attribute = EFI_MEMORY_WB; tables->num_mds++; break; default: /* Print a warning but continue. */ printk("complete_dom0_memmap: warning: " "unhandled MDT entry type %u\n", md->type); } } /* secend scan, setup mapping for EFI_CONVENTIONAL_MEMORY etc. */ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { const efi_memory_desc_t *md = p; efi_memory_desc_t *dom_md = &tables->efi_memmap[tables->num_mds]; u64 start = md->phys_addr; u64 size = md->num_pages << EFI_PAGE_SHIFT; u64 end = start + size; switch (md->type) { case EFI_CONVENTIONAL_MEMORY: case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: { u64 dom_md_start; u64 dom_md_end; unsigned long left_mem = (unsigned long)(d->max_pages - d->tot_pages) << PAGE_SHIFT; if (!(md->attribute & EFI_MEMORY_WB)) break; dom_md_start = max(tables->fw_end_paddr, start); dom_md_end = dom_md_start; do { dom_md_end = min(dom_md_end + left_mem, end); if (dom_md_end < dom_md_start + PAGE_SIZE) break; dom_md->type = EFI_CONVENTIONAL_MEMORY; dom_md->phys_addr = dom_md_start; dom_md->virt_addr = 0; dom_md->num_pages = (dom_md_end - dom_md_start) >> EFI_PAGE_SHIFT; dom_md->attribute = EFI_MEMORY_WB; assign_new_domain0_range(d, dom_md); /* * recalculate left_mem. * we might already allocated memory in * this region because of kernel loader. * So we might consumed less than * (dom_md_end - dom_md_start) above. */ left_mem = (unsigned long) (d->max_pages - d->tot_pages) << PAGE_SHIFT; } while (left_mem > 0 && dom_md_end < end); if (!(dom_md_end < dom_md_start + PAGE_SIZE)) tables->num_mds++; break; } default: break; } } BUG_ON(tables->fw_tables_size < sizeof(*tables) + sizeof(tables->efi_memmap[0]) * tables->num_mds); sort(tables->efi_memmap, tables->num_mds, sizeof(efi_memory_desc_t), efi_mdt_cmp, NULL); // Map low-memory holes & unmapped MMIO for legacy drivers for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) { if (domain_page_mapped(d, addr)) continue; if (efi_mmio(addr, PAGE_SIZE)) { unsigned long flags; flags = ASSIGN_writable | ASSIGN_nocache; assign_domain_mmio_page(d, addr, addr, PAGE_SIZE, flags); } } setup_dom0_memmap_info(d, tables); return tables->num_mds;}/* * Local variables: * mode: C * c-set-style: "linux" * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -