⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 agpgart_be.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * AGPGART module version 0.99 * Copyright (C) 1999 Jeff Hartmann * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */#include <linux/config.h>#include <linux/version.h>#include <linux/module.h>#include <linux/types.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/mm.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/slab.h>#include <linux/vmalloc.h>#include <linux/pci.h>#include <linux/init.h>#include <linux/pagemap.h>#include <linux/miscdevice.h>#include <linux/pm.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/io.h>#include <asm/page.h>#include <linux/agp_backend.h>#include "agp.h"MODULE_AUTHOR("Jeff Hartmann <jhartmann@precisioninsight.com>");MODULE_PARM(agp_try_unsupported, "1i");MODULE_LICENSE("GPL and additional rights");EXPORT_SYMBOL(agp_free_memory);EXPORT_SYMBOL(agp_allocate_memory);EXPORT_SYMBOL(agp_copy_info);EXPORT_SYMBOL(agp_bind_memory);EXPORT_SYMBOL(agp_unbind_memory);EXPORT_SYMBOL(agp_enable);EXPORT_SYMBOL(agp_backend_acquire);EXPORT_SYMBOL(agp_backend_release);static void flush_cache(void);static struct agp_bridge_data agp_bridge;static int agp_try_unsupported __initdata = 0;static inline void flush_cache(void){#if defined(__i386__) || defined(__x86_64__)	asm volatile ("wbinvd":::"memory");#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__)	/* ??? I wonder if we'll really need to flush caches, or if the	   core logic can manage to keep the system coherent.  The ARM	   speaks only of using `cflush' to get things in memory in	   preparation for power failure.	   If we do need to call `cflush', we'll need a target page,	   as we can only flush one page at a time.	   Ditto for IA-64. --davidm 00/08/07 */	mb();#else#error "Please define flush_cache."#endif}#ifdef CONFIG_SMPstatic atomic_t cpus_waiting;static void ipi_handler(void *null){	flush_cache();	atomic_dec(&cpus_waiting);	while (atomic_read(&cpus_waiting) > 0)		barrier();}static void smp_flush_cache(void){	atomic_set(&cpus_waiting, smp_num_cpus - 1);	if (smp_call_function(ipi_handler, NULL, 1, 0) != 0)		panic(PFX "timed out waiting for the other CPUs!\n");	flush_cache();	while (atomic_read(&cpus_waiting) > 0)		barrier();}#define global_cache_flush smp_flush_cache#else				/* CONFIG_SMP */#define global_cache_flush flush_cache#endif				/* CONFIG_SMP */int agp_backend_acquire(void){	if (agp_bridge.type == NOT_SUPPORTED) {		return -EINVAL;	}	atomic_inc(&agp_bridge.agp_in_use);	if (atomic_read(&agp_bridge.agp_in_use) != 1) {		atomic_dec(&agp_bridge.agp_in_use);		return -EBUSY;	}	MOD_INC_USE_COUNT;	return 0;}void agp_backend_release(void){	if (agp_bridge.type == NOT_SUPPORTED) {		return;	}	atomic_dec(&agp_bridge.agp_in_use);	MOD_DEC_USE_COUNT;}/*  * Generic routines for handling agp_memory structures - * They use the basic page allocation routines to do the * brunt of the work. */static void agp_free_key(int key){	if (key < 0) {		return;	}	if (key < MAXKEY) {		clear_bit(key, agp_bridge.key_list);	}}static int agp_get_key(void){	int bit;	bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY);	if (bit < MAXKEY) {		set_bit(bit, agp_bridge.key_list);		return bit;	}	return -1;}static agp_memory *agp_create_memory(int scratch_pages){	agp_memory *new;	new = kmalloc(sizeof(agp_memory), GFP_KERNEL);	if (new == NULL) {		return NULL;	}	memset(new, 0, sizeof(agp_memory));	new->key = agp_get_key();	if (new->key < 0) {		kfree(new);		return NULL;	}	new->memory = vmalloc(PAGE_SIZE * scratch_pages);	if (new->memory == NULL) {		agp_free_key(new->key);		kfree(new);		return NULL;	}	new->num_scratch_pages = scratch_pages;	return new;}void agp_free_memory(agp_memory * curr){	int i;	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {		return;	}	if (curr->is_bound == TRUE) {		agp_unbind_memory(curr);	}	if (curr->type != 0) {		agp_bridge.free_by_type(curr);		return;	}	if (curr->page_count != 0) {		for (i = 0; i < curr->page_count; i++) {			curr->memory[i] &= ~(0x00000fff);			agp_bridge.agp_destroy_page((unsigned long)					 phys_to_virt(curr->memory[i]));		}	}	agp_free_key(curr->key);	vfree(curr->memory);	kfree(curr);	MOD_DEC_USE_COUNT;}#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))agp_memory *agp_allocate_memory(size_t page_count, u32 type){	int scratch_pages;	agp_memory *new;	int i;	if (agp_bridge.type == NOT_SUPPORTED) {		return NULL;	}	if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) >	    agp_bridge.max_memory_agp) {		return NULL;	}	if (type != 0) {		new = agp_bridge.alloc_by_type(page_count, type);		return new;	}      	/* We always increase the module count, since free auto-decrements	 * it	 */      	MOD_INC_USE_COUNT;	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;	new = agp_create_memory(scratch_pages);	if (new == NULL) {	      	MOD_DEC_USE_COUNT;		return NULL;	}	for (i = 0; i < page_count; i++) {		new->memory[i] = agp_bridge.agp_alloc_page();		if (new->memory[i] == 0) {			/* Free this structure */			agp_free_memory(new);			return NULL;		}		new->memory[i] =		    agp_bridge.mask_memory(				   virt_to_phys((void *) new->memory[i]),						  type);		new->page_count++;	}	return new;}/* End - Generic routines for handling agp_memory structures */static int agp_return_size(void){	int current_size;	void *temp;	temp = agp_bridge.current_size;	switch (agp_bridge.size_type) {	case U8_APER_SIZE:		current_size = A_SIZE_8(temp)->size;		break;	case U16_APER_SIZE:		current_size = A_SIZE_16(temp)->size;		break;	case U32_APER_SIZE:		current_size = A_SIZE_32(temp)->size;		break;	case LVL2_APER_SIZE:		current_size = A_SIZE_LVL2(temp)->size;		break;	case FIXED_APER_SIZE:		current_size = A_SIZE_FIX(temp)->size;		break;	default:		current_size = 0;		break;	}	return current_size;}/* Routine to copy over information structure */void agp_copy_info(agp_kern_info * info){	unsigned long page_mask = 0;	int i;	memset(info, 0, sizeof(agp_kern_info));	if (agp_bridge.type == NOT_SUPPORTED) {		info->chipset = agp_bridge.type;		return;	}	info->version.major = agp_bridge.version->major;	info->version.minor = agp_bridge.version->minor;	info->device = agp_bridge.dev;	info->chipset = agp_bridge.type;	info->mode = agp_bridge.mode;	info->aper_base = agp_bridge.gart_bus_addr;	info->aper_size = agp_return_size();	info->max_memory = agp_bridge.max_memory_agp;	info->current_memory = atomic_read(&agp_bridge.current_memory_agp);	info->cant_use_aperture = agp_bridge.cant_use_aperture;	for(i = 0; i < agp_bridge.num_of_masks; i++)		page_mask |= agp_bridge.mask_memory(page_mask, i);	info->page_mask = ~page_mask;}/* End - Routine to copy over information structure *//* * Routines for handling swapping of agp_memory into the GATT - * These routines take agp_memory and insert them into the GATT. * They call device specific routines to actually write to the GATT. */int agp_bind_memory(agp_memory * curr, off_t pg_start){	int ret_val;	if ((agp_bridge.type == NOT_SUPPORTED) ||	    (curr == NULL) || (curr->is_bound == TRUE)) {		return -EINVAL;	}	if (curr->is_flushed == FALSE) {		CACHE_FLUSH();		curr->is_flushed = TRUE;	}	ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type);	if (ret_val != 0) {		return ret_val;	}	curr->is_bound = TRUE;	curr->pg_start = pg_start;	return 0;}int agp_unbind_memory(agp_memory * curr){	int ret_val;	if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) {		return -EINVAL;	}	if (curr->is_bound != TRUE) {		return -EINVAL;	}	ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type);	if (ret_val != 0) {		return ret_val;	}	curr->is_bound = FALSE;	curr->pg_start = 0;	return 0;}/* End - Routines for handling swapping of agp_memory into the GATT *//*  * Driver routines - start * Currently this module supports the following chipsets: * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3, * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541, * and generic support for the SiS chipsets. *//* Generic Agp routines - Start */static void agp_generic_agp_enable(u32 mode){	struct pci_dev *device = NULL;	u32 command, scratch, cap_id;	u8 cap_ptr;	pci_read_config_dword(agp_bridge.dev,			      agp_bridge.capndx + 4,			      &command);	/*	 * PASS1: go throu all devices that claim to be	 *        AGP devices and collect their data.	 */	pci_for_each_dev(device)	{		/*		 *	Enable AGP devices. Most will be VGA display but		 *	some may be coprocessors on non VGA devices too		 */		 		if((((device->class >> 16) & 0xFF) != PCI_BASE_CLASS_DISPLAY) &&			(device->class != (PCI_CLASS_PROCESSOR_CO << 8)))			continue;		pci_read_config_dword(device, 0x04, &scratch);		if (!(scratch & 0x00100000))			continue;		pci_read_config_byte(device, 0x34, &cap_ptr);		if (cap_ptr != 0x00) {			do {				pci_read_config_dword(device,						      cap_ptr, &cap_id);				if ((cap_id & 0xff) != 0x02)					cap_ptr = (cap_id >> 8) & 0xff;			}			while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));		}		if (cap_ptr != 0x00) {			/*			 * Ok, here we have a AGP device. Disable impossible 			 * settings, and adjust the readqueue to the minimum.			 */			pci_read_config_dword(device, cap_ptr + 4, &scratch);			/* adjust RQ depth */			command =			    ((command & ~0xff000000) |			     min_t(u32, (mode & 0xff000000),				 min_t(u32, (command & 0xff000000),				     (scratch & 0xff000000))));			/* disable SBA if it's not supported */			if (!((command & 0x00000200) &&			      (scratch & 0x00000200) &&			      (mode & 0x00000200)))				command &= ~0x00000200;			/* disable FW if it's not supported */			if (!((command & 0x00000010) &&			      (scratch & 0x00000010) &&			      (mode & 0x00000010)))				command &= ~0x00000010;			if (!((command & 4) &&			      (scratch & 4) &&			      (mode & 4)))				command &= ~0x00000004;			if (!((command & 2) &&			      (scratch & 2) &&			      (mode & 2)))				command &= ~0x00000002;			if (!((command & 1) &&			      (scratch & 1) &&			      (mode & 1)))				command &= ~0x00000001;		}	}	/*	 * PASS2: Figure out the 4X/2X/1X setting and enable the	 *        target (our motherboard chipset).	 */	if (command & 4) {		command &= ~3;	/* 4X */	}	if (command & 2) {		command &= ~5;	/* 2X */	}	if (command & 1) {		command &= ~6;	/* 1X */	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -