📄 agpgart_be.c
字号:
command |= 0x00000100; pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx + 8, command); /* * PASS3: Go throu all AGP devices and update the * command registers. */ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, device)) != NULL) { pci_read_config_dword(device, 0x04, &scratch); if (!(scratch & 0x00100000)) continue; pci_read_config_byte(device, 0x34, &cap_ptr); if (cap_ptr != 0x00) { do { pci_read_config_dword(device, cap_ptr, &cap_id); if ((cap_id & 0xff) != 0x02) cap_ptr = (cap_id >> 8) & 0xff; } while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); } if (cap_ptr != 0x00) pci_write_config_dword(device, cap_ptr + 8, command); }}static int agp_generic_create_gatt_table(void){ char *table; char *table_end; int size; int page_order; int num_entries; int i; void *temp; struct page *page; /* The generic routines can't handle 2 level gatt's */ if (agp_bridge.size_type == LVL2_APER_SIZE) { return -EINVAL; } table = NULL; i = agp_bridge.aperture_size_idx; temp = agp_bridge.current_size; size = page_order = num_entries = 0; if (agp_bridge.size_type != FIXED_APER_SIZE) { do { switch (agp_bridge.size_type) { case U8_APER_SIZE: size = A_SIZE_8(temp)->size; page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: size = A_SIZE_16(temp)->size; page_order = A_SIZE_16(temp)->page_order; num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: size = A_SIZE_32(temp)->size; page_order = A_SIZE_32(temp)->page_order; num_entries = A_SIZE_32(temp)->num_entries; break; /* This case will never really happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: size = page_order = num_entries = 0; break; } table = (char *) __get_free_pages(GFP_KERNEL, page_order); if (table == NULL) { i++; switch (agp_bridge.size_type) { case U8_APER_SIZE: agp_bridge.current_size = A_IDX8(); break; case U16_APER_SIZE: agp_bridge.current_size = A_IDX16(); break; case U32_APER_SIZE: agp_bridge.current_size = A_IDX32(); break; /* This case will never really * happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: agp_bridge.current_size = agp_bridge.current_size; break; } } else { agp_bridge.aperture_size_idx = i; } } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes)); } else { size = ((aper_size_info_fixed *) temp)->size; page_order = ((aper_size_info_fixed *) temp)->page_order; num_entries = ((aper_size_info_fixed *) temp)->num_entries; table = (char *) __get_free_pages(GFP_KERNEL, page_order); } if (table == NULL) { return -ENOMEM; } table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) set_bit(PG_reserved, &page->flags); agp_bridge.gatt_table_real = (unsigned long *) table; CACHE_FLUSH(); agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); CACHE_FLUSH(); if (agp_bridge.gatt_table == NULL) { for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) clear_bit(PG_reserved, &page->flags); free_pages((unsigned long) table, page_order); return -ENOMEM; } agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); for (i = 0; i < num_entries; i++) { agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; } return 0;}static int agp_generic_suspend(void){ return 0;}static void agp_generic_resume(void){ return;}static int agp_generic_free_gatt_table(void){ int page_order; char *table, *table_end; void *temp; struct page *page; temp = agp_bridge.current_size; switch (agp_bridge.size_type) { case U8_APER_SIZE: page_order = A_SIZE_8(temp)->page_order; break; case U16_APER_SIZE: page_order = A_SIZE_16(temp)->page_order; break; case U32_APER_SIZE: page_order = A_SIZE_32(temp)->page_order; break; case FIXED_APER_SIZE: page_order = A_SIZE_FIX(temp)->page_order; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; break; default: page_order = 0; break; } /* Do not worry about freeing memory, because if this is * called, then all agp memory is deallocated and removed * from the table. */ iounmap(agp_bridge.gatt_table); table = (char *) agp_bridge.gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) clear_bit(PG_reserved, &page->flags); free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); return 0;}static int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type){ int i, j, num_entries; void *temp; temp = agp_bridge.current_size; switch (agp_bridge.size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: num_entries = A_SIZE_32(temp)->num_entries; break; case FIXED_APER_SIZE: num_entries = A_SIZE_FIX(temp)->num_entries; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; break; default: num_entries = 0; break; } if (type != 0 || mem->type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } if ((pg_start + mem->page_count) > num_entries) { return -EINVAL; } j = pg_start; while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { return -EBUSY; } j++; } if (mem->is_flushed == FALSE) { CACHE_FLUSH(); mem->is_flushed = TRUE; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { agp_bridge.gatt_table[j] = mem->memory[i]; } agp_bridge.tlb_flush(mem); return 0;}static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, int type){ int i; if (type != 0 || mem->type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } for (i = pg_start; i < (mem->page_count + pg_start); i++) { agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; } agp_bridge.tlb_flush(mem); return 0;}static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type){ return NULL;}static void agp_generic_free_by_type(agp_memory * curr){ if (curr->memory != NULL) { vfree(curr->memory); } agp_free_key(curr->key); kfree(curr);}/* * Basic Page Allocation Routines - * These routines handle page allocation * and by default they reserve the allocated * memory. They also handle incrementing the * current_memory_agp value, Which is checked * against a maximum value. */static unsigned long agp_generic_alloc_page(void){ struct page * page; page = alloc_page(GFP_KERNEL); if (page == NULL) { return 0; } atomic_inc(&page->count); set_bit(PG_locked, &page->flags); atomic_inc(&agp_bridge.current_memory_agp); return (unsigned long)page_address(page);}static void agp_generic_destroy_page(unsigned long addr){ void *pt = (void *) addr; struct page *page; if (pt == NULL) { return; } page = virt_to_page(pt); atomic_dec(&page->count); clear_bit(PG_locked, &page->flags); wake_up(&page->wait); free_page((unsigned long) pt); atomic_dec(&agp_bridge.current_memory_agp);}/* End Basic Page Allocation Routines */void agp_enable(u32 mode){ if (agp_bridge.type == NOT_SUPPORTED) return; agp_bridge.agp_enable(mode);}/* End - Generic Agp routines */#ifdef CONFIG_AGP_I810static aper_size_info_fixed intel_i810_sizes[] ={ {64, 16384, 4}, /* The 32M mode still requires a 64k gatt */ {32, 8192, 4}};#define AGP_DCACHE_MEMORY 1#define AGP_PHYS_MEMORY 2static gatt_mask intel_i810_masks[] ={ {I810_PTE_VALID, 0}, {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY}, {I810_PTE_VALID, 0}};static struct _intel_i810_private { struct pci_dev *i810_dev; /* device one */ volatile u8 *registers; int num_dcache_entries;} intel_i810_private;static int intel_i810_fetch_size(void){ u32 smram_miscc; aper_size_info_fixed *values; pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); values = A_SIZE_FIX(agp_bridge.aperture_sizes); if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { printk(KERN_WARNING PFX "i810 is disabled\n"); return 0; } if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + 1); agp_bridge.aperture_size_idx = 1; return values[1].size; } else { agp_bridge.previous_size = agp_bridge.current_size = (void *) (values); agp_bridge.aperture_size_idx = 0; return values[0].size; } return 0;}static int intel_i810_configure(void){ aper_size_info_fixed *current_size; u32 temp; int i; current_size = A_SIZE_FIX(agp_bridge.current_size); pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); temp &= 0xfff80000; intel_i810_private.registers = (volatile u8 *) ioremap(temp, 128 * 4096); if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { /* This will need to be dynamically assigned */ printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); intel_i810_private.num_dcache_entries = 1024; } pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); CACHE_FLUSH(); if (agp_bridge.needs_scratch_page == TRUE) { for (i = 0; i < current_size->num_entries; i++) { OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), agp_bridge.scratch_page); } } return 0;}static void intel_i810_cleanup(void){ OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0); iounmap((void *) intel_i810_private.registers);}static void intel_i810_tlbflush(agp_memory * mem){ return;}static void intel_i810_agp_enable(u32 mode){ return;}static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, int type){ int i, j, num_entries; void *temp; temp = agp_bridge.current_size; num_entries = A_SIZE_FIX(temp)->num_entries; if ((pg_start + mem->page_count) > num_entries) { return -EINVAL; } for (j = pg_start; j < (pg_start + mem->page_count); j++) { if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { return -EBUSY; } } if (type != 0 || mem->type != 0) { if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) { /* special insert */ CACHE_FLUSH(); for (i = pg_start; i < (pg_start + mem->page_count); i++) { OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), (i * 4096) | I810_PTE_LOCAL | I810_PTE_VALID); } CACHE_FLUSH(); agp_bridge.tlb_flush(mem); return 0; } if((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY)) { goto insert; } return -EINVAL; }insert: CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -