⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 intel-iommu.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
}static int domain_init(struct dmar_domain *domain, int guest_width){	struct intel_iommu *iommu;	int adjust_width, agaw;	unsigned long sagaw;	init_iova_domain(&domain->iovad);	spin_lock_init(&domain->mapping_lock);	domain_reserve_special_ranges(domain);	/* calculate AGAW */	iommu = domain->iommu;	if (guest_width > cap_mgaw(iommu->cap))		guest_width = cap_mgaw(iommu->cap);	domain->gaw = guest_width;	adjust_width = guestwidth_to_adjustwidth(guest_width);	agaw = width_to_agaw(adjust_width);	sagaw = cap_sagaw(iommu->cap);	if (!test_bit(agaw, &sagaw)) {		/* hardware doesn't support it, choose a bigger one */		pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);		agaw = find_next_bit(&sagaw, 5, agaw);		if (agaw >= 5)			return -ENODEV;	}	domain->agaw = agaw;	INIT_LIST_HEAD(&domain->devices);	/* always allocate the top pgd */	domain->pgd = (struct dma_pte *)alloc_pgtable_page();	if (!domain->pgd)		return -ENOMEM;	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);	return 0;}static void domain_exit(struct dmar_domain *domain){	u64 end;	/* Domain 0 is reserved, so dont process it */	if (!domain)		return;	domain_remove_dev_info(domain);	/* destroy iovas */	put_iova_domain(&domain->iovad);	end = DOMAIN_MAX_ADDR(domain->gaw);	end = end & (~PAGE_MASK_4K);	/* clear ptes */	dma_pte_clear_range(domain, 0, end);	/* free page tables */	dma_pte_free_pagetable(domain, 0, end);	iommu_free_domain(domain);	free_domain_mem(domain);}static int domain_context_mapping_one(struct dmar_domain *domain,		u8 bus, u8 devfn){	struct context_entry *context;	struct intel_iommu *iommu = domain->iommu;	unsigned long flags;	pr_debug("Set context mapping for %02x:%02x.%d\n",		bus, PCI_SLOT(devfn), PCI_FUNC(devfn));	BUG_ON(!domain->pgd);	context = device_to_context_entry(iommu, bus, devfn);	if (!context)		return -ENOMEM;	spin_lock_irqsave(&iommu->lock, flags);	if (context_present(*context)) {		spin_unlock_irqrestore(&iommu->lock, flags);		return 0;	}	context_set_domain_id(*context, domain->id);	context_set_address_width(*context, domain->agaw);	context_set_address_root(*context, virt_to_phys(domain->pgd));	context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);	context_set_fault_enable(*context);	context_set_present(*context);	__iommu_flush_cache(iommu, context, sizeof(*context));	/* it's a non-present to present mapping */	if (iommu_flush_context_device(iommu, domain->id,			(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))		iommu_flush_write_buffer(iommu);	else		iommu_flush_iotlb_dsi(iommu, 0, 0);	spin_unlock_irqrestore(&iommu->lock, flags);	return 0;}static intdomain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev){	int ret;	struct pci_dev *tmp, *parent;	ret = domain_context_mapping_one(domain, pdev->bus->number,		pdev->devfn);	if (ret)		return ret;	/* dependent device mapping */	tmp = pci_find_upstream_pcie_bridge(pdev);	if (!tmp)		return 0;	/* Secondary interface's bus number and devfn 0 */	parent = pdev->bus->self;	while (parent != tmp) {		ret = domain_context_mapping_one(domain, parent->bus->number,			parent->devfn);		if (ret)			return ret;		parent = parent->bus->self;	}	if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */		return domain_context_mapping_one(domain,			tmp->subordinate->number, 0);	else /* this is a legacy PCI bridge */		return domain_context_mapping_one(domain,			tmp->bus->number, tmp->devfn);}static int domain_context_mapped(struct dmar_domain *domain,	struct pci_dev *pdev){	int ret;	struct pci_dev *tmp, *parent;	ret = device_context_mapped(domain->iommu,		pdev->bus->number, pdev->devfn);	if (!ret)		return ret;	/* dependent device mapping */	tmp = pci_find_upstream_pcie_bridge(pdev);	if (!tmp)		return ret;	/* Secondary interface's bus number and devfn 0 */	parent = pdev->bus->self;	while (parent != tmp) {		ret = device_context_mapped(domain->iommu, parent->bus->number,			parent->devfn);		if (!ret)			return ret;		parent = parent->bus->self;	}	if (tmp->is_pcie)		return device_context_mapped(domain->iommu,			tmp->subordinate->number, 0);	else		return device_context_mapped(domain->iommu,			tmp->bus->number, tmp->devfn);}static intdomain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,			u64 hpa, size_t size, int prot){	u64 start_pfn, end_pfn;	struct dma_pte *pte;	int index;	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)		return -EINVAL;	iova &= PAGE_MASK_4K;	start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;	end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;	index = 0;	while (start_pfn < end_pfn) {		pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);		if (!pte)			return -ENOMEM;		/* We don't need lock here, nobody else		 * touches the iova range		 */		BUG_ON(dma_pte_addr(*pte));		dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);		dma_set_pte_prot(*pte, prot);		__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));		start_pfn++;		index++;	}	return 0;}static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn){	clear_context_table(domain->iommu, bus, devfn);	iommu_flush_context_global(domain->iommu, 0);	iommu_flush_iotlb_global(domain->iommu, 0);}static void domain_remove_dev_info(struct dmar_domain *domain){	struct device_domain_info *info;	unsigned long flags;	spin_lock_irqsave(&device_domain_lock, flags);	while (!list_empty(&domain->devices)) {		info = list_entry(domain->devices.next,			struct device_domain_info, link);		list_del(&info->link);		list_del(&info->global);		if (info->dev)			info->dev->dev.archdata.iommu = NULL;		spin_unlock_irqrestore(&device_domain_lock, flags);		detach_domain_for_dev(info->domain, info->bus, info->devfn);		free_devinfo_mem(info);		spin_lock_irqsave(&device_domain_lock, flags);	}	spin_unlock_irqrestore(&device_domain_lock, flags);}/* * find_domain * Note: we use struct pci_dev->dev.archdata.iommu stores the info */struct dmar_domain *find_domain(struct pci_dev *pdev){	struct device_domain_info *info;	/* No lock here, assumes no domain exit in normal case */	info = pdev->dev.archdata.iommu;	if (info)		return info->domain;	return NULL;}static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,     struct pci_dev *dev){	int index;	while (dev) {		for (index = 0; index < cnt; index ++)			if (dev == devices[index])				return 1;		/* Check our parent */		dev = dev->bus->self;	}	return 0;}static struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev){	struct dmar_drhd_unit *drhd = NULL;	list_for_each_entry(drhd, &dmar_drhd_units, list) {		if (drhd->include_all || dmar_pci_device_match(drhd->devices,						drhd->devices_cnt, dev))			return drhd;	}	return NULL;}/* domain is initialized */static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw){	struct dmar_domain *domain, *found = NULL;	struct intel_iommu *iommu;	struct dmar_drhd_unit *drhd;	struct device_domain_info *info, *tmp;	struct pci_dev *dev_tmp;	unsigned long flags;	int bus = 0, devfn = 0;	domain = find_domain(pdev);	if (domain)		return domain;	dev_tmp = pci_find_upstream_pcie_bridge(pdev);	if (dev_tmp) {		if (dev_tmp->is_pcie) {			bus = dev_tmp->subordinate->number;			devfn = 0;		} else {			bus = dev_tmp->bus->number;			devfn = dev_tmp->devfn;		}		spin_lock_irqsave(&device_domain_lock, flags);		list_for_each_entry(info, &device_domain_list, global) {			if (info->bus == bus && info->devfn == devfn) {				found = info->domain;				break;			}		}		spin_unlock_irqrestore(&device_domain_lock, flags);		/* pcie-pci bridge already has a domain, uses it */		if (found) {			domain = found;			goto found_domain;		}	}	/* Allocate new domain for the device */	drhd = dmar_find_matched_drhd_unit(pdev);	if (!drhd) {		printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",			pci_name(pdev));		return NULL;	}	iommu = drhd->iommu;	domain = iommu_alloc_domain(iommu);	if (!domain)		goto error;	if (domain_init(domain, gaw)) {		domain_exit(domain);		goto error;	}	/* register pcie-to-pci device */	if (dev_tmp) {		info = alloc_devinfo_mem();		if (!info) {			domain_exit(domain);			goto error;		}		info->bus = bus;		info->devfn = devfn;		info->dev = NULL;		info->domain = domain;		/* This domain is shared by devices under p2p bridge */		domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES;		/* pcie-to-pci bridge already has a domain, uses it */		found = NULL;		spin_lock_irqsave(&device_domain_lock, flags);		list_for_each_entry(tmp, &device_domain_list, global) {			if (tmp->bus == bus && tmp->devfn == devfn) {				found = tmp->domain;				break;			}		}		if (found) {			free_devinfo_mem(info);			domain_exit(domain);			domain = found;		} else {			list_add(&info->link, &domain->devices);			list_add(&info->global, &device_domain_list);		}		spin_unlock_irqrestore(&device_domain_lock, flags);	}found_domain:	info = alloc_devinfo_mem();	if (!info)		goto error;	info->bus = pdev->bus->number;	info->devfn = pdev->devfn;	info->dev = pdev;	info->domain = domain;	spin_lock_irqsave(&device_domain_lock, flags);	/* somebody is fast */	found = find_domain(pdev);	if (found != NULL) {		spin_unlock_irqrestore(&device_domain_lock, flags);		if (found != domain) {			domain_exit(domain);			domain = found;		}		free_devinfo_mem(info);		return domain;	}	list_add(&info->link, &domain->devices);	list_add(&info->global, &device_domain_list);	pdev->dev.archdata.iommu = info;	spin_unlock_irqrestore(&device_domain_lock, flags);	return domain;error:	/* recheck it here, maybe others set it */	return find_domain(pdev);}static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end){	struct dmar_domain *domain;	unsigned long size;	u64 base;	int ret;	printk(KERN_INFO		"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",		pci_name(pdev), start, end);	/* page table init */	domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);	if (!domain)		return -ENOMEM;	/* The address might not be aligned */	base = start & PAGE_MASK_4K;	size = end - base;	size = PAGE_ALIGN_4K(size);	if (!reserve_iova(&domain->iovad, IOVA_PFN(base),			IOVA_PFN(base + size) - 1)) {		printk(KERN_ERR "IOMMU: reserve iova failed\n");		ret = -ENOMEM;		goto error;	}	pr_debug("Mapping reserved region %lx@%llx for %s\n",		size, base, pci_name(pdev));	/*	 * RMRR range might have overlap with physical memory range,	 * clear it first	 */	dma_pte_clear_range(domain, base, base + size);	ret = domain_page_mapping(domain, base, base, size,		DMA_PTE_READ|DMA_PTE_WRITE);	if (ret)		goto error;	/* context entry init */	ret = domain_context_mapping(domain, pdev);	if (!ret)		return 0;error:	domain_exit(domain);	return ret;}static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,	struct pci_dev *pdev){	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)		return 0;	return iommu_prepare_identity_map(pdev, rmrr->base_address,		rmrr->end_address + 1);}#ifdef CONFIG_DMAR_GFX_WAextern int arch_get_ram_range(int slot, u64 *addr, u64 *size);static void __init iommu_prepare_gfx_mapping(void){	struct pci_dev *pdev = NULL;	u64 base, size;	int slot;	int ret;	for_each_pci_dev(pdev) {		if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||				!IS_GFX_DEVICE(pdev))			continue;		printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",			pci_name(pdev));		slot = arch_get_ram_range(0, &base, &size);		while (slot >= 0) {			ret = iommu_prepare_identity_map(pdev,					base, base + size);			if (ret)				goto error;			slot = arch_get_ram_range(slot, &base, &size);		}		continue;error:		printk(KERN_ERR "IOMMU: mapping reserved region failed\n");	}}#endif#ifdef CONFIG_DMAR_FLOPPY_WAstatic inline void iommu_prepare_isa(void){	struct pci_dev *pdev;	int ret;	pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);	if (!pdev)		return;	printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");	ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);	if (ret)		printk("IOMMU: Failed to create 0-64M identity map, "			"floppy might not work\n");}#elsestatic inline void iommu_prepare_isa(void){	return;}#endif /* !CONFIG_DMAR_FLPY_WA */int __init init_dmars(void){	struct dmar_drhd_unit *drhd;	struct dmar_rmrr_unit *rmrr;	struct pci_dev *pdev;	struct intel_iommu *iommu;	int ret, unit = 0;	/*	 * for each drhd	 *    allocate root	 *    initialize and program root entry to not present	 * endfor	 */	for_each_drhd_unit(drhd) {		if (drhd->ignored)			continue;		iommu = alloc_iommu(drhd);		if (!iommu) {			ret = -ENOMEM;			goto error;		}		/*		 * TBD:		 * we could share the same root & context tables		 * amoung all IOMMU's. Need to Split it later.		 */		ret = iommu_alloc_root_entry(iommu);		if (ret) {			printk(KERN_ERR "IOMMU: allocate root entry failed\n");			goto error;		}	}	/*	 * For each rmrr	 *   for each dev attached to rmrr	 *   do	 *     locate drhd for dev, alloc domain for dev	 *     allocate free domain	 *     allocate page table entries for rmrr	 *     if context not allocated for bus	 *           allocate and init context	 *           set present in root table for this bus	 *     init context with domain, translation etc	 *    endfor	 * endfor	 */	for_each_rmrr_units(rmrr) {		int i;		for (i = 0; i < rmrr->devices_cnt; i++) {			pdev = rmrr->devices[i];			/* some BIOS lists non-exist devices in DMAR table */			if (!pdev)				continue;			ret = iommu_prepare_rmrr_dev(rmrr, pdev);			if (ret)				printk(KERN_ERR				 "IOMMU: mapping reserved region failed\n");		}	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -