⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sworks-agp.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Serverworks AGPGART routines. */#include <linux/module.h>#include <linux/pci.h>#include <linux/init.h>#include <linux/string.h>#include <linux/slab.h>#include <linux/agp_backend.h>#include "agp.h"#define SVWRKS_COMMAND		0x04#define SVWRKS_APSIZE		0x10#define SVWRKS_MMBASE		0x14#define SVWRKS_CACHING		0x4b#define SVWRKS_AGP_ENABLE	0x60#define SVWRKS_FEATURE		0x68#define SVWRKS_SIZE_MASK	0xfe000000/* Memory mapped registers */#define SVWRKS_GART_CACHE	0x02#define SVWRKS_GATTBASE		0x04#define SVWRKS_TLBFLUSH		0x10#define SVWRKS_POSTFLUSH	0x14#define SVWRKS_DIRFLUSH		0x0cstruct serverworks_page_map {	unsigned long *real;	unsigned long __iomem *remapped;};static struct _serverworks_private {	struct pci_dev *svrwrks_dev;	/* device one */	volatile u8 __iomem *registers;	struct serverworks_page_map **gatt_pages;	int num_tables;	struct serverworks_page_map scratch_dir;	int gart_addr_ofs;	int mm_addr_ofs;} serverworks_private;static int serverworks_create_page_map(struct serverworks_page_map *page_map){	int i;	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);	if (page_map->real == NULL) {		return -ENOMEM;	}	SetPageReserved(virt_to_page(page_map->real));	global_cache_flush();	page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),					    PAGE_SIZE);	if (page_map->remapped == NULL) {		ClearPageReserved(virt_to_page(page_map->real));		free_page((unsigned long) page_map->real);		page_map->real = NULL;		return -ENOMEM;	}	global_cache_flush();	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)		writel(agp_bridge->scratch_page, page_map->remapped+i);	return 0;}static void serverworks_free_page_map(struct serverworks_page_map *page_map){	iounmap(page_map->remapped);	ClearPageReserved(virt_to_page(page_map->real));	free_page((unsigned long) page_map->real);}static void serverworks_free_gatt_pages(void){	int i;	struct serverworks_page_map **tables;	struct serverworks_page_map *entry;	tables = serverworks_private.gatt_pages;	for(i = 0; i < serverworks_private.num_tables; i++) {		entry = tables[i];		if (entry != NULL) {			if (entry->real != NULL) {				serverworks_free_page_map(entry);			}			kfree(entry);		}	}	kfree(tables);}static int serverworks_create_gatt_pages(int nr_tables){	struct serverworks_page_map **tables;	struct serverworks_page_map *entry;	int retval = 0;	int i;	tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 			 GFP_KERNEL);	if (tables == NULL)		return -ENOMEM;	for (i = 0; i < nr_tables; i++) {		entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);		if (entry == NULL) {			retval = -ENOMEM;			break;		}		tables[i] = entry;		retval = serverworks_create_page_map(entry);		if (retval != 0) break;	}	serverworks_private.num_tables = nr_tables;	serverworks_private.gatt_pages = tables;	if (retval != 0) serverworks_free_gatt_pages();	return retval;}#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\	GET_PAGE_DIR_IDX(addr)]->remapped)#ifndef GET_PAGE_DIR_OFF#define GET_PAGE_DIR_OFF(addr) (addr >> 22)#endif#ifndef GET_PAGE_DIR_IDX#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))#endif#ifndef GET_GATT_OFF#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)#endifstatic int serverworks_create_gatt_table(struct agp_bridge_data *bridge){	struct aper_size_info_lvl2 *value;	struct serverworks_page_map page_dir;	int retval;	u32 temp;	int i;	value = A_SIZE_LVL2(agp_bridge->current_size);	retval = serverworks_create_page_map(&page_dir);	if (retval != 0) {		return retval;	}	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);	if (retval != 0) {		serverworks_free_page_map(&page_dir);		return retval;	}	/* Create a fake scratch directory */	for(i = 0; i < 1024; i++) {		writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);		writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);	}	retval = serverworks_create_gatt_pages(value->num_entries / 1024);	if (retval != 0) {		serverworks_free_page_map(&page_dir);		serverworks_free_page_map(&serverworks_private.scratch_dir);		return retval;	}	agp_bridge->gatt_table_real = (u32 *)page_dir.real;	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;	agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);	/* Get the address for the gart region.	 * This is a bus address even on the alpha, b/c its	 * used to program the agp master not the cpu	 */	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);	/* Calculate the agp offset */		for(i = 0; i < value->num_entries / 1024; i++)		writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);	return 0;}static int serverworks_free_gatt_table(struct agp_bridge_data *bridge){	struct serverworks_page_map page_dir;   	page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;	page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;	serverworks_free_gatt_pages();	serverworks_free_page_map(&page_dir);	serverworks_free_page_map(&serverworks_private.scratch_dir);	return 0;}static int serverworks_fetch_size(void){	int i;	u32 temp;	u32 temp2;	struct aper_size_info_lvl2 *values;	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,					SVWRKS_SIZE_MASK);	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);	temp2 &= SVWRKS_SIZE_MASK;	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {		if (temp2 == values[i].size_value) {			agp_bridge->previous_size =			    agp_bridge->current_size = (void *) (values + i);			agp_bridge->aperture_size_idx = i;			return values[i].size;		}	}	return 0;}/* * This routine could be implemented by taking the addresses * written to the GATT, and flushing them individually.  However * currently it just flushes the whole table.  Which is probably * more efficent, since agp_memory blocks can be a large number of * entries. */static void serverworks_tlbflush(struct agp_memory *temp){	unsigned long timeout;	writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);	timeout = jiffies + 3*HZ;	while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {		cpu_relax();		if (time_after(jiffies, timeout)) {			printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n");			break;		}	}	writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);	timeout = jiffies + 3*HZ;	while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {		cpu_relax();		if (time_after(jiffies, timeout)) {			printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n");			break;		}	}}static int serverworks_configure(void){	struct aper_size_info_lvl2 *current_size;	u32 temp;	u8 enable_reg;	u16 cap_reg;	current_size = A_SIZE_LVL2(agp_bridge->current_size);	/* Get the memory mapped registers */	pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);	serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);	if (!serverworks_private.registers) {		printk (KERN_ERR PFX "Unable to ioremap() memory.\n");		return -ENOMEM;	}	writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -