📄 sun4c.c
字号:
remove_lru(entry); sun4c_set_context(savectx); return entry;unlink_out: remove_ring(&sun4c_ufree_ring, entry); return entry;kunlink_out: remove_ring(&sun4c_kfree_ring, entry); return entry;}/* NOTE: Must be called with interrupts disabled. */void sun4c_grow_kernel_ring(void){ struct sun4c_mmu_entry *entry; /* Prevent deadlock condition. */ if (sun4c_user_taken_entries >= max_user_taken_entries) return; if (sun4c_ufree_ring.num_entries) { entry = sun4c_ufree_ring.ringhd.next; remove_ring(&sun4c_ufree_ring, entry); add_ring(&sun4c_kfree_ring, entry); sun4c_user_taken_entries++; }}/* 2 page buckets for task struct and kernel stack allocation. * * TASK_STACK_BEGIN * bucket[0] * bucket[1] * [ ... ] * bucket[NR_TASK_BUCKETS-1] * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS) * * Each slot looks like: * * page 1 -- task struct + beginning of kernel stack * page 2 -- rest of kernel stack */union task_union *sun4c_bucket[NR_TASK_BUCKETS];static int sun4c_lowbucket_avail;#define BUCKET_EMPTY ((union task_union *) 0)#define BUCKET_SHIFT (PAGE_SHIFT + 1) /* log2(sizeof(struct task_bucket)) */#define BUCKET_SIZE (1 << BUCKET_SHIFT)#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)#define BUCKET_PTE(page) \ ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))#define BUCKET_PTE_PAGE(pte) \ (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))static void get_locked_segment(unsigned long addr){ struct sun4c_mmu_entry *stolen; unsigned long flags; save_and_cli(flags); addr &= SUN4C_REAL_PGDIR_MASK; stolen = sun4c_user_strategy(); max_user_taken_entries--; stolen->vaddr = addr; flush_user_windows(); sun4c_kernel_map(stolen); restore_flags(flags);}static void free_locked_segment(unsigned long addr){ struct sun4c_mmu_entry *entry; unsigned long flags; unsigned char pseg; save_and_cli(flags); addr &= SUN4C_REAL_PGDIR_MASK; pseg = sun4c_get_segmap(addr); entry = &mmu_entry_pool[pseg]; flush_user_windows(); if (sun4c_vacinfo.do_hwflushes) sun4c_flush_segment_hw(addr); else sun4c_flush_segment_sw(addr); sun4c_kernel_unmap(entry); add_ring(&sun4c_ufree_ring, entry); max_user_taken_entries++; restore_flags(flags);}static inline void garbage_collect(int entry){ int start, end; /* 32 buckets per segment... */ entry &= ~31; start = entry; for (end = (start + 32); start < end; start++) if (sun4c_bucket[start] != BUCKET_EMPTY) return; /* Entire segment empty, release it. */ free_locked_segment(BUCKET_ADDR(entry));}#ifdef CONFIG_SUN4#define TASK_STRUCT_ORDER 0#else#define TASK_STRUCT_ORDER 1#endifstatic struct task_struct *sun4c_alloc_task_struct(void){ unsigned long addr, pages; int entry; pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER); if (!pages) return (struct task_struct *) 0; for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++) if (sun4c_bucket[entry] == BUCKET_EMPTY) break; if (entry == NR_TASK_BUCKETS) { free_pages(pages, TASK_STRUCT_ORDER); return (struct task_struct *) 0; } if (entry >= sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry + 1; addr = BUCKET_ADDR(entry); sun4c_bucket[entry] = (union task_union *) addr; if(sun4c_get_segmap(addr) == invalid_segment) get_locked_segment(addr); /* We are changing the virtual color of the page(s) * so we must flush the cache to guarentee consistancy. */ if (sun4c_vacinfo.do_hwflushes) { sun4c_flush_page_hw(pages);#ifndef CONFIG_SUN4 sun4c_flush_page_hw(pages + PAGE_SIZE);#endif } else { sun4c_flush_page_sw(pages);#ifndef CONFIG_SUN4 sun4c_flush_page_sw(pages + PAGE_SIZE);#endif } sun4c_put_pte(addr, BUCKET_PTE(pages));#ifndef CONFIG_SUN4 sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));#endif return (struct task_struct *) addr;}static void sun4c_free_task_struct_hw(struct task_struct *tsk){ unsigned long tsaddr = (unsigned long) tsk; unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tsaddr)); int entry = BUCKET_NUM(tsaddr); if (atomic_dec_and_test(&(tsk)->thread.refcount)) { /* We are deleting a mapping, so the flush here is mandatory. */ sun4c_flush_page_hw(tsaddr);#ifndef CONFIG_SUN4 sun4c_flush_page_hw(tsaddr + PAGE_SIZE);#endif sun4c_put_pte(tsaddr, 0);#ifndef CONFIG_SUN4 sun4c_put_pte(tsaddr + PAGE_SIZE, 0);#endif sun4c_bucket[entry] = BUCKET_EMPTY; if (entry < sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry; free_pages(pages, TASK_STRUCT_ORDER); garbage_collect(entry); }}static void sun4c_free_task_struct_sw(struct task_struct *tsk){ unsigned long tsaddr = (unsigned long) tsk; unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tsaddr)); int entry = BUCKET_NUM(tsaddr); if (atomic_dec_and_test(&(tsk)->thread.refcount)) { /* We are deleting a mapping, so the flush here is mandatory. */ sun4c_flush_page_sw(tsaddr);#ifndef CONFIG_SUN4 sun4c_flush_page_sw(tsaddr + PAGE_SIZE);#endif sun4c_put_pte(tsaddr, 0);#ifndef CONFIG_SUN4 sun4c_put_pte(tsaddr + PAGE_SIZE, 0);#endif sun4c_bucket[entry] = BUCKET_EMPTY; if (entry < sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry; free_pages(pages, TASK_STRUCT_ORDER); garbage_collect(entry); }}static void sun4c_get_task_struct(struct task_struct *tsk){ atomic_inc(&(tsk)->thread.refcount);}static void __init sun4c_init_buckets(void){ int entry; if (sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) { prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER); } for (entry = 0; entry < NR_TASK_BUCKETS; entry++) sun4c_bucket[entry] = BUCKET_EMPTY; sun4c_lowbucket_avail = 0;}static unsigned long sun4c_iobuffer_start;static unsigned long sun4c_iobuffer_end;static unsigned long sun4c_iobuffer_high;static unsigned long *sun4c_iobuffer_map;static int iobuffer_map_size;/* * Alias our pages so they do not cause a trap. * Also one page may be aliased into several I/O areas and we may * finish these I/O separately. */static char *sun4c_lockarea(char *vaddr, unsigned long size){ unsigned long base, scan; unsigned long npages; unsigned long vpage; unsigned long pte; unsigned long apage; unsigned long high; unsigned long flags; npages = (((unsigned long)vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; scan = 0; save_and_cli(flags); for (;;) { scan = find_next_zero_bit(sun4c_iobuffer_map, iobuffer_map_size, scan); if ((base = scan) + npages > iobuffer_map_size) goto abend; for (;;) { if (scan >= base + npages) goto found; if (test_bit(scan, sun4c_iobuffer_map)) break; scan++; } }found: high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start; high = SUN4C_REAL_PGDIR_ALIGN(high); while (high > sun4c_iobuffer_high) { get_locked_segment(sun4c_iobuffer_high); sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE; } vpage = ((unsigned long) vaddr) & PAGE_MASK; for (scan = base; scan < base+npages; scan++) { pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT); pte |= pgprot_val(SUN4C_PAGE_KERNEL); pte |= _SUN4C_PAGE_NOCACHE; set_bit(scan, sun4c_iobuffer_map); apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start; /* Flush original mapping so we see the right things later. */ sun4c_flush_page(vpage); sun4c_put_pte(apage, pte); vpage += PAGE_SIZE; } restore_flags(flags); return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start + (((unsigned long) vaddr) & ~PAGE_MASK));abend: restore_flags(flags); printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size); panic("Out of iobuffer table"); return 0;}static void sun4c_unlockarea(char *vaddr, unsigned long size){ unsigned long vpage, npages; unsigned long flags; int scan, high; vpage = (unsigned long)vaddr & PAGE_MASK; npages = (((unsigned long)vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; save_and_cli(flags); while (npages != 0) { --npages; /* This mapping is marked non-cachable, no flush necessary. */ sun4c_put_pte(vpage, 0); clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT, sun4c_iobuffer_map); vpage += PAGE_SIZE; } /* garbage collect */ scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT; while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5]) scan -= 32; scan += 32; high = sun4c_iobuffer_start + (scan << PAGE_SHIFT); high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE; while (high < sun4c_iobuffer_high) { sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE; free_locked_segment(sun4c_iobuffer_high); } restore_flags(flags);}/* Note the scsi code at init time passes to here buffers * which sit on the kernel stack, those are already locked * by implication and fool the page locking code above * if passed to by mistake. */static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus){ unsigned long page; page = ((unsigned long)bufptr) & PAGE_MASK; if (!VALID_PAGE(virt_to_page(page))) { sun4c_flush_page(page); return (__u32)bufptr; /* already locked */ } return (__u32)sun4c_lockarea(bufptr, len);}static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus){ while (sz >= 0) { sg[sz].dvma_address = (__u32)sun4c_lockarea(sg[sz].address, sg[sz].length); sg[sz].dvma_length = sg[sz].length; sz--; }}static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus){ if (bufptr < sun4c_iobuffer_start) return; /* On kernel stack or similar, see above */ sun4c_unlockarea((char *)bufptr, len);}static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus){ while (sz >= 0) { sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length); sz--; }}#define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))struct vm_area_struct sun4c_kstack_vma;static void __init sun4c_init_lock_areas(void){ unsigned long sun4c_taskstack_start; unsigned long sun4c_taskstack_end; int bitmap_size; sun4c_init_buckets(); sun4c_taskstack_start = SUN4C_LOCK_VADDR; sun4c_taskstack_end = (sun4c_taskstack_start + (TASK_ENTRY_SIZE * NR_TASK_BUCKETS)); if (sun4c_taskstack_end >= SUN4C_LOCK_END) { prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n"); prom_halt(); } sun4c_iobuffer_start = sun4c_iobuffer_high = SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end); sun4c_iobuffer_end = SUN4C_LOCK_END; bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT; bitmap_size = (bitmap_size + 7) >> 3; bitmap_size = LONG_ALIGN(bitmap_size); iobuffer_map_size = bitmap_size << 3; sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL); memset((void *) sun4c_iobuffer_map, 0, bitmap_size); sun4c_kstack_vma.vm_mm = &init_mm; sun4c_kstack_vma.vm_start = sun4c_taskstack_start; sun4c_kstack_vma.vm_end = sun4c_taskstack_end; sun4c_kstack_vma.vm_page_prot = PAGE_SHARED; sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC; insert_vm_struct(&init_mm, &sun4c_kstack_vma);}/* Cache flushing on the sun4c. */static void sun4c_flush_cache_all(void){ unsigned long begin, end; flush_user_windows(); begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE); end = (begin + SUN4C_VAC_SIZE); if (sun4c_vacinfo.linesize == 32) { while (begin < end) { __asm__ __volatile__( "ld [%0 + 0x00], %%g0\n\t" "ld [%0 + 0x20], %%g0\n\t" "ld [%0 + 0x40], %%g0\n\t" "ld [%0 + 0x60], %%g0\n\t" "ld [%0 + 0x80], %%g0\n\t" "ld [%0 + 0xa0], %%g0\n\t" "ld [%0 + 0xc0], %%g0\n\t" "ld [%0 + 0xe0], %%g0\n\t" "ld [%0 + 0x100], %%g0\n\t" "ld [%0 + 0x120], %%g0\n\t" "ld [%0 + 0x140], %%g0\n\t" "ld [%0 + 0x160], %%g0\n\t" "ld [%0 + 0x180], %%g0\n\t" "ld [%0 + 0x1a0], %%g0\n\t" "ld [%0 + 0x1c0], %%g0\n\t" "ld [%0 + 0x1e0], %%g0\n" : : "r" (begin)); begin += 512; } } else { while (begin < end) { __asm__ __volatile__( "ld [%0 + 0x00], %%g0\n\t" "ld [%0 + 0x10], %%g0\n\t" "ld [%0 + 0x20], %%g0\n\t" "ld [%0 + 0x30], %%g0\n\t" "ld [%0 + 0x40], %%g0\n\t" "ld [%0 + 0x50], %%g0\n\t" "ld [%0 + 0x60], %%g0\n\t" "ld [%0 + 0x70], %%g0\n\t" "ld [%0 + 0x80], %%g0\n\t" "ld [%0 + 0x90], %%g0\n\t" "ld [%0 + 0xa0], %%g0\n\t" "ld [%0 + 0xb0], %%g0\n\t" "ld [%0 + 0xc0], %%g0\n\t" "ld [%0 + 0xd0], %%g0\n\t" "ld [%0 + 0xe0], %%g0\n\t" "ld [%0 + 0xf0], %%g0\n" : : "r" (begin)); begin += 256; } }}static void sun4c_flush_cache_mm_hw(struct mm_struct *mm){ int new_ctx = mm->context; if (new_ctx != NO_CONTEXT) { flush_user_windows(); if (sun4c_context_ring[new_ctx].num_entries) { struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; unsigned long flags; save_and_cli(flags); if (head->next != head) { struct sun4c_mmu_entry *entry = head->next; int savectx = sun4c_get_context(); sun4c_set_context(new_ctx); sun4c_flush_context_hw(); do { struct sun4c_mmu_entry *next = entry->next; sun4c_user_unmap(entry); free_user_entry(new_ctx, entry);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -