📄 fasttrap.c
字号:
mutex_enter(&bucket->ftb_mtx); fpp = (fasttrap_provider_t **)&bucket->ftb_data; while ((fp = *fpp) != NULL) { /* * Acquire and release the lock as a simple way of * waiting for any other consumer to finish with * this provider. A thread must first acquire the * bucket lock so there's no chance of another thread * blocking on the providers lock. */ mutex_enter(&fp->ftp_mtx); mutex_exit(&fp->ftp_mtx); if (dtrace_unregister(fp->ftp_provid) != 0) { fail = 1; fpp = &fp->ftp_next; } else { *fpp = fp->ftp_next; fasttrap_provider_free(fp); } } mutex_exit(&bucket->ftb_mtx); } if (fail || dtrace_unregister(fasttrap_id) != 0) { uint_t work; /* * If we're failing to detach, we need to unblock timeouts * and start a new timeout if any work has accumulated while * we've been unsuccessfully trying to detach. */ mutex_enter(&fasttrap_cleanup_mtx); fasttrap_timeout = 0; work = fasttrap_cleanup_work; mutex_exit(&fasttrap_cleanup_mtx); if (work) fasttrap_pid_cleanup(); (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, &fasttrap_meta_id); return (DDI_FAILURE); }#ifdef DEBUG mutex_enter(&fasttrap_count_mtx); ASSERT(fasttrap_count == 0); mutex_exit(&fasttrap_count_mtx);#endif kmem_free(fasttrap_tpoints.fth_table, fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t)); fasttrap_tpoints.fth_nent = 0; kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t)); fasttrap_provs.fth_nent = 0; /* * We know there are no tracepoints in any process anywhere in * the system so there is no process which has its p_dtrace_count * greater than zero, therefore we know that no thread can actively * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes * and fasttrap_exec() and fasttrap_exit(). */ ASSERT(dtrace_fasttrap_fork_ptr == &fasttrap_fork); dtrace_fasttrap_fork_ptr = NULL; ASSERT(dtrace_fasttrap_exec_ptr == &fasttrap_exec_exit); dtrace_fasttrap_exec_ptr = NULL; ASSERT(dtrace_fasttrap_exit_ptr == &fasttrap_exec_exit); dtrace_fasttrap_exit_ptr = NULL; ddi_remove_minor_node(devi, NULL); return (DDI_SUCCESS);}static struct dev_ops fasttrap_ops = { DEVO_REV, /* devo_rev */ 0, /* refcnt */ fasttrap_info, /* get_dev_info */ nulldev, /* identify */ nulldev, /* probe */ fasttrap_attach, /* attach */ fasttrap_detach, /* detach */ nodev, /* reset */ &fasttrap_cb_ops, /* driver operations */ NULL, /* bus operations */ nodev /* dev power */};/* * Module linkage information for the kernel. */static struct modldrv modldrv = { &mod_driverops, /* module type (this is a pseudo driver) */ "Fasttrap Tracing", /* name of module */ &fasttrap_ops, /* driver ops */};static struct modlinkage modlinkage = { MODREV_1, (void *)&modldrv, NULL};int_init(void){ return (mod_install(&modlinkage));}int_info(struct modinfo *modinfop){ return (mod_info(&modlinkage, modinfop));}int_fini(void){ return (mod_remove(&modlinkage));}/* * All of this is stolen lock stock and barrel from procfs. */extern struct seg_ops segdev_ops; /* needs a header file */extern struct seg_ops segspt_shmops; /* needs a header file */static intfasttrap_page_exists(struct seg *seg, caddr_t addr){ struct segvn_data *svd; vnode_t *vp; vattr_t vattr; /* * Fail if the page doesn't map to a page in the underlying * mapped file, if an underlying mapped file exists. */ vattr.va_mask = AT_SIZE; if (seg->s_ops == &segvn_ops && SEGOP_GETVP(seg, addr, &vp) == 0 && vp != NULL && vp->v_type == VREG && VOP_GETATTR(vp, &vattr, 0, CRED()) == 0) { u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE); u_offset_t offset = SEGOP_GETOFFSET(seg, addr); if (offset >= size) return (0); } /* * Fail if this is an ISM shared segment and the address is * not within the real size of the spt segment that backs it. */ if (seg->s_ops == &segspt_shmops && addr >= seg->s_base + spt_realsize(seg)) return (0); /* * Fail if the segment is mapped from /dev/null. * The key is that the mapping comes from segdev and the * type is neither MAP_SHARED nor MAP_PRIVATE. */ if (seg->s_ops == &segdev_ops && ((SEGOP_GETTYPE(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0)) return (0); /* * Fail if the page is a MAP_NORESERVE page that has * not actually materialized. * We cheat by knowing that segvn is the only segment * driver that supports MAP_NORESERVE. */ if (seg->s_ops == &segvn_ops && (svd = (struct segvn_data *)seg->s_data) != NULL && (svd->vp == NULL || svd->vp->v_type != VREG) && (svd->flags & MAP_NORESERVE)) { /* * Guilty knowledge here. We know that * segvn_incore returns more than just the * low-order bit that indicates the page is * actually in memory. If any bits are set, * then there is backing store for the page. */ char incore = 0; (void) SEGOP_INCORE(seg, addr, PAGESIZE, &incore); if (incore == 0) return (0); } return (1);}/* * Use physmax to determine the highest physical page of DRAM memory * It is assumed that any physical addresses above physmax is in IO space. * We don't bother checking the low end because we assume that memory space * begins at physical page frame 0. * * Return 1 if the page frame is onboard DRAM memory, else 0. * Returns 0 for nvram so it won't be cached. */intfasttrap_pf_is_memory(pfn_t pf){ /* We must be IO space */ if (pf > physmax) return (0); /* We must be memory space */ return (1);}/* * Map address "addr" in address space "as" into a kernel virtual address. * The memory is guaranteed to be resident and locked down. */caddr_tfasttrap_mapin(struct as *as, caddr_t addr, int writing){ page_t *pp; caddr_t kaddr; pfn_t pfnum; /* * NB: Because of past mistakes, we have bits being returned * by getpfnum that are actually the page type bits of the pte. * When the object we are trying to map is a memory page with * a page structure everything is ok and we can use the optimal * method, ppmapin. Otherwise, we have to do something special. */ pfnum = hat_getpfnum(as->a_hat, addr); if (fasttrap_pf_is_memory(pfnum)) { pp = page_numtopp_nolock(pfnum); if (pp != NULL) { ASSERT(PAGE_LOCKED(pp)); kaddr = ppmapin(pp, writing ? (PROT_READ | PROT_WRITE) : PROT_READ, (caddr_t)-1); return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); } } /* * Oh well, we didn't have a page struct for the object we were * trying to map in; ppmapin doesn't handle devices, but allocating a * heap address allows ppmapout to free virutal space when done. */ kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum, writing ? (PROT_READ | PROT_WRITE) : PROT_READ, HAT_LOAD_LOCK); return (kaddr + ((uintptr_t)addr & PAGEOFFSET));}/*ARGSUSED*/voidfasttrap_mapout(struct as *as, caddr_t addr, caddr_t vaddr, int writing){ vaddr = (caddr_t)(uintptr_t)((uintptr_t)vaddr & PAGEMASK); ppmapout(vaddr);}intfasttrap_uread(proc_t *p, void *buf, size_t len, uintptr_t a){ caddr_t addr = (caddr_t)a; caddr_t page; caddr_t vaddr; struct seg *seg; int error = 0; int err = 0; uint_t prot; int protchanged; on_trap_data_t otd; int retrycnt; struct as *as = p->p_as; /* * Locate segment containing address of interest. */ page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK); retrycnt = 0; AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);retry: if ((seg = as_segat(as, page)) == NULL || !fasttrap_page_exists(seg, page)) { AS_LOCK_EXIT(as, &as->a_lock); err = error = EIO; goto err; } SEGOP_GETPROT(seg, page, 0, &prot); protchanged = 0; if ((prot & PROT_READ) == 0) { protchanged = 1; err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | PROT_READ); if (err == IE_RETRY) { err = 0; protchanged = 0; ASSERT(retrycnt == 0); retrycnt++; goto retry; } if (err != 0) { AS_LOCK_EXIT(as, &as->a_lock); err = error = EIO; goto err; } } if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, S_READ)) { if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock); err = error = EIO; goto err; } CPU_STATS_ADD_K(vm, softlock, 1); /* * Make sure we're not trying to read off the end of the page. */ ASSERT(len <= page + PAGESIZE - addr); /* * Map in the locked page, copy to our local buffer, * then map the page out and unlock it. */ vaddr = fasttrap_mapin(as, addr, 0); /* * Since we are copying memory on behalf of the user process, * protect against memory error correction faults. If we get * one, set error only: we *always* return EIO in this case. */ if (!on_trap(&otd, OT_DATA_EC)) { if (seg->s_ops == &segdev_ops) { /* * Device memory can behave strangely; invoke * a segdev-specific copy operation instead. */ if (segdev_copyfrom(seg, addr, vaddr, buf, len)) err = error = EIO; } else { bcopy(vaddr, buf, len); } } else { error = EIO; } no_trap(); fasttrap_mapout(as, addr, vaddr, 0); (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, S_READ); if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock);err: return (error);}intfasttrap_uwrite(proc_t *p, void *buf, size_t len, uintptr_t a){ caddr_t addr = (caddr_t)a; caddr_t page; caddr_t vaddr; struct seg *seg; int error = 0; int err = 0; uint_t prot; int protchanged; on_trap_data_t otd; int retrycnt; struct as *as = p->p_as; /* * Locate segment containing address of interest. */ page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK); retrycnt = 0; AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);retry: if ((seg = as_segat(as, page)) == NULL || !fasttrap_page_exists(seg, page)) { AS_LOCK_EXIT(as, &as->a_lock); err = error = EIO; goto err; } SEGOP_GETPROT(seg, page, 0, &prot); protchanged = 0; if ((prot & PROT_WRITE) == 0) { protchanged = 1; err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | PROT_WRITE); if (err == IE_RETRY) { err = 0; protchanged = 0; ASSERT(retrycnt == 0); retrycnt++; goto retry; } if (err != 0) { AS_LOCK_EXIT(as, &as->a_lock); err = error = EIO; goto err; } } if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, S_WRITE)) { if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock); err = error = EIO; goto err; } CPU_STATS_ADD_K(vm, softlock, 1); /* * Make sure we're not trying to read off the end of the page. */ ASSERT(len <= page + PAGESIZE - addr); /* * Map in the locked page, copy to our local buffer, * then map the page out and unlock it. */ vaddr = fasttrap_mapin(as, addr, 1); /* * Since we are copying memory on behalf of the user process, * protect against memory error correction faults. If we get * one, set error only: we *always* return EIO in this case. */ if (!on_trap(&otd, OT_DATA_EC)) { if (seg->s_ops == &segdev_ops) { /* * Device memory can behave strangely; invoke * a segdev-specific copy operation instead. */ if (segdev_copyto(seg, addr, buf, vaddr, len)) err = error = EIO; } else { bcopy(buf, vaddr, len); } } else { error = EIO; } no_trap(); if (prot & PROT_EXEC) sync_icache(vaddr, (uint_t)len); fasttrap_mapout(as, addr, vaddr, 1); (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, S_WRITE); if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock);err: return (error);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -