⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vectors.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 4 页
字号:
			 *	   (p->p_vpcontext->vpc_error &= ~VPC_ERROR_PMF)			 */				panic ("vp_contextrest(): VPC_ERROR_PMF, now what ?!");		}	}	vpd->vpd_in_kernel = ~ VPD_IN_KERNEL;	if (vpc->vpc_error & VPC_ERROR_IMP)		return (VP_FAILURE);	else		return (VP_SUCCESS);}vp_cleanup (p)struct	proc	*p;{/* * input:	proc struct pointer * output:	none * description:	make sure usage of the vp is complete.  this includes  *		releasing the KM_ALLOC'd memory.  leave the vp in a disabled  *		state. * assumption:	test for a vp_context area has already been made. */int	saveaffinity;struct	cpudata		*pcpu;struct	vpdata		*vpd;struct	vpcontext	*vpc;	pcpu = CURRENT_CPUDATA;	vpd = pcpu->cpu_vpdata;	vpc = p->p_vpcontext;	/* debby:  if the vpcontext state is VPC_SAVED, then there is no 	 *		need to wait on the vp, return now.	 */	if (p != vpd->vpd_proc) {		/*		 * if this process does not own the VP then don't touch the		 * hardware!		 */		goto resume_scalar;	}	/* 	 * wait for the VP to go idle, and all memory references to complete 	 * and all errors to be reported	 */	while (mfpr(VPSR) & VPSR_BSY) ;	mfpr (VMAC);	mfpr (VMAC);	/* 	 * debby: is a RST really necessary ???	 * clear the VPSR (vector processor status register) and the VAER 	 * (vector arithmetic exception register, This will also clear all 	 * the exceptions in the VP.  Leave the VP disabled	 * Also re-enable memory mapping for the vector processor (the reset	 * disabled it)	 */	mtpr (VPSR, VPSR_RST);	mfpr(VPSR);	if(cpu == VAX_6400) {		/* Enable mapping */		mtpr(VIADR, 0x509);		mtpr(VIDLO, 1);		/* Initialize cache controller: Enable memory transactions,		 * enable cache, enable error reporting, clear errors,		 * and flush cache.  Do this by writing a 0x138e00 to the 		 * VIR_LSX_CCSR.		 */		mtpr(VIADR, 0x520);		mtpr(VIDLO, 0x138e00);		/* Initialize vector controller status register; enable		 * hard and soft error reporting.		 */		mtpr(VIADR, 0x489);		mtpr(VIDLO, 0xc0000);	}	/* 	 * clear the vp_proc pointer in the cpudata struct, and set the 	 * vpdata state to VPD_ALIVE | VPD_DISABLED (VP is alive and 	 * operational and disabled) 	 */	if (p == vpd->vpd_proc ) {		vpd->vpd_proc =  NULL;		vpd->vpd_state &= ~VPD_ENABLED;		vpd->vpd_state |= VPD_DISABLED;		set_bit_atomic (pcpu->cpu_num, &vpfree);	}resume_scalar:	/* sanity check */	if ((u.u_acflag & AVP) != AVP)		uprintf ("u_acflag error, proc #%d\n", u.u_procp->p_pid);	/* save the old affinity before deallocating the vpcontext area */	saveaffinity = vpc->vpc_affinity ;	/* deallocate the vpcontext area */	KM_FREE (vpc->vpc_vregs, KM_VECTOR);	vpc->vpc_vregs = 0;	KM_FREE (vpc, KM_VECTOR);	p->p_vpcontext = 0;	/* clear the vector process flag */	u.u_acflag &= ~AVP;	--num_vec_procs;	/*	 * restore the original process affinity.  	 */	p->p_affinity = saveaffinity ;}vp_contextlimbo (cpudata_ptr)struct	cpudata	*cpudata_ptr;{int	before, after;int	new_affinity;/* * input:	cpudata struct pointer * output:	none * description:	 *		- in most cases, this routine will leave the context area a  *		  vector process in a state of limbo.  The vector context will  *		  still be in the vector processor, but it will no longer be  *		  the process running on the scalar process. *		- this routine will leave the vector processor in an idle and  *		  disabled state. *		- this routine may decide to context save the vector context. *		  - if this process has an affinity to more than one *		    processor. *		  - if this process is being debugged (ptrace'd) */struct	vpdata		*vpd;struct	vpcontext	*vpc;struct	proc		*vproc;	vpd = cpudata_ptr->cpu_vpdata;	vproc = vpd->vpd_proc;	vpc = vproc->p_vpcontext;		if (vpc->vpc_state == 	    VPC_LOAD) {		/*		 * wait for the VP to be idle		 */		while (mfpr(VPSR) & VPSR_BSY) ;		/*		 * wait for completion of vector memory access, and all		 * errors to be reproted.		 */		mfpr (VMAC);		mfpr (VMAC);		/*		 * update the state of this process - record the fact that 		 * there is a valid vector context in the vector processor - 		 * this will remain true even if there is another process's 		 * context in the scalar processor.  note: the disabled fault 		 * handler may decide to context this vector process at a 		 * latter time.		 */		vpc->vpc_state = 		    VPC_LIMBO;	}	/*	 * if the process's affinity is not limiited to one (this) processor, 	 * then do a vp_contextsave().  this will prevent this process from 	 * being scheduled on some other scalar processor while it's vector 	 * context is still stored in this scalar-vector pair.	 * Also, if this process is being debugged, then do a vp_contextsave() 	 * so that the debugger has access to the vector registers and state.	 * Also, if this processor is being stopped, then do a 	 * vp_contextsave().  	 */	if ( (vproc->p_affinity != 	     cpudata_ptr->cpu_mask) || (cpudata_ptr->cpu_stops) ) {		/*		 * save the process's context; set VPD_IN_KERNEL flag before 		 * call, and clear it after.  This tells the Disabled Fault		 * handler that we know that we are executing vector 		 * instructions in kernel mode.		 * debby: may be able to avoid this vp_contextsave() if the		 *  vpc_state is VPC_SAVED.		 */		vpd->vpd_in_kernel = VPD_IN_KERNEL;		if (vp_contextsave (vproc) == 		    VP_FAILURE) {			/*			 * vp_contextsave() will only fail			 * if it gets a vector IMP error.  When this			 * happen the system panics, to the following			 * signal will never be reached.  However... Some			 * future vector implementation may allow a			 * recovery from an IMP error.  If that ever			 * happens then the following psignal should be			 * reconsidered.			 */			psignal (vproc, SIGKILL);		}		vpd->vpd_in_kernel = ~ VPD_IN_KERNEL;		/* 		 * update the state of this process - record the fact that 		 * this process does not have it's context stored in any VP		 */		vpc->vpc_state = 		    VPC_SAVED;		/*		 * update the processes affinity so that it can run on any 		 * CPU which has an attached vector processor.  Remember to 		 * account for any pre-vector process affinity.		 * note: do not let the affinity become zero.  testing has 		 *       shown that this may happen if the stopcpu system 		 *       call is made when the boot cpu does not have a 		 *       vector processor.		 */		new_affinity = vpmask & 		   vpc->vpc_affinity;		if (new_affinity)		   vproc->p_affinity = new_affinity;		/*		 * update the vpfree mask		 * note: if this cpu is being stopped, then leave vpfree		 *       alone.  stopcpu() has already updated vpfree.		 */		if ( ! (cpudata_ptr->cpu_stops) ) {			set_bit_atomic (cpudata_ptr->cpu_num, &vpfree);		}		/* 		 * mark the vector processor as free by clearing the vpd_proc 		 * pointer in the cpudata structure of this cpu 		 */		vproc = NULL;	}	/*	 * disable the vector processor.  this is done by writing a 0 to the 	 * VPSR followed by a read of the VPSR.   	 * note:  this will leave the exceptions states intact so that a 	 * later call of the vector processor disabled fault handler can 	 * examine the exception states.	 */	mtpr (VPSR, 0);	mfpr (VPSR);	vpd->vpd_state &= ~VPD_ENABLED;	vpd->vpd_state |= VPD_DISABLED;}#define	LS_VINTSR_SET	(VINTSR_BUS_TIMEOUT      | VINTSR_VECTOR_UNIT_HERR)#define	LS_VINTSR_CLR	(VINTSR_VECTOR_UNIT_SERR | VINTSR_VECTL_VIB_SERR | \			 VINTSR_VECTL_VIB_HERR   | VINTSR_CCHIP_VIB_SERR | \			 VINTSR_CCHIP_VIB_HERR)#define	VIR_MOD_REV		0x48A#define	VIR_VCTL_CSR		0x489#define	VIR_LSX_CCSR		0x520#define	VIR_ALU_DIAG_CTRL	0x45C#define	VCTL_CSR_LSS         0x1	/* Load store soft error */#define	VIR_VCTL_CSR_CDS     0x4	/* Soft internal bus error */#define	VIR_VCTL_CSR_VIS     0x10	/* VIB bus soft error */#define	VIR_VCTL_CSR_VIH     0x20	/* VIB bus hard error */#define	VIR_VCTL_CSR_ISE     0x40	/* Illegal sequence error */#define	VIR_VCTL_CSR_VHE     0x800	/* Verse hard error */#define	LS_VCTL_CSR_CLR	( VCTL_CSR_LSS | VIR_VCTL_CSR_CDS |     \			  VIR_VCTL_CSR_VIS | VIR_VCTL_CSR_VIH | \			  VIR_VCTL_CSR_ISE | VIR_VCTL_CSR_VHE )#define	VIR_LSX_CCSR_CPE	0x200	/* Cache parity error */#define	VIR_LSX_CCSR_XSE	0x400	/* XMI interface soft error */#define	VIR_LSX_CCSR_XHE	0x800	/* XMI interface hard error */#define	LS_LSX_CCSR_CLR	( VIR_LSX_CCSR_CPE | VIR_LSX_CCSR_XSE | \			  VIR_LSX_CCSR_XHE )#define	VIR_ALU_DIAG_CTRL_ABE		0x100	/* AB bus parity error */#define	VIR_ALU_DIAG_CTRL_CPE		0x200	/* CP bus parity error */#define	VIR_ALU_DIAG_CTRL_IFO		0x400	/* Illegal FAVOR opcode */#define	LSX_ALU_DIAG_CTRL_CLR	( VIR_ALU_DIAG_CTRL_ABE |   \				  VIR_ALU_DIAG_CTRL_CPE |   \				  VIR_ALU_DIAG_CTRL_IFO )#ifdef	TRUE#undef	TRUE#endif	TRUE#define	TRUE	1#ifdef	FALSE#undef	FALSE#endif	FALSE#define	FALSE	0vp_ls_bug(ls_vintsr)long	ls_vintsr;{long	ls_mod_rev;long	ls_vctl_csr;long	ls_lsx_ccsr;long	ls_alu_diag_ctrl;long	tmp_vintsr;	if (cpu != VAX_6400) {		return (FALSE);	} else {		/*		 * Disable the vector processor by clearing the present		  * bit in the ACCS and by setting the disabled bit in the		  * VINTSR.  Save a copy of the VINTSR		 */		mtpr (ACCS, mfpr(ACCS) & ~1);		ls_vintsr = mfpr (VINTSR);		mtpr (VINTSR, ls_vintsr | VINTSR_DISABLE_VECT_INTF);		/*		 * Unlock the VINTSR by clearing all the error bits.		 * This is done by writing a one to the error bits.		 * Enable the vector processor by writing a 0 to the		 * disabled bit in the VINTSR and setting the present bit		 * in the ACCS.		 */		mtpr (VINTSR, ( VINTSR_VECTOR_UNIT_SERR |				VINTSR_VECTOR_UNIT_HERR |				VINTSR_VECTL_VIB_SERR |				VINTSR_VECTL_VIB_HERR |				VINTSR_CCHIP_VIB_SERR |				VINTSR_CCHIP_VIB_HERR |				VINTSR_BUS_TIMEOUT ) );		mtpr (ACCS, (mfpr(ACCS) | 1));		/*		 * Wait for not busy		 */		while (mfpr(VPSR) & VPSR_VEN) ;		/* VMS does another disable - enable&unlock here */		/*		 * Get a copy of the MOD_REV register.  This register		 * can only be accessed indirectly through the VIADDR and		 * VIDLO registers		 */		mtpr (VIADR, VIR_MOD_REV);		ls_mod_rev = mfpr (VIDLO);		/* VMS does another disable - enable&unlock here */		/*		 * Get a copy of the VCTL CSR register.  This register		 * can only be accessed indirectly through the VIADDR and		 * VIDLO registers		 */		mtpr (VIADR, VIR_VCTL_CSR);		ls_vctl_csr = mfpr (VIDLO);		/* VMS does another disable - enable&unlock here */		/*		 * Get a copy of the LSX_CCSR register.  This register		 * can only be accessed indirectly through the VIADDR and		 * VIDLO registers		 */		mtpr (VIADR, VIR_LSX_CCSR);		ls_lsx_ccsr = mfpr (VIDLO);		/* VMS does another disable - enable&unlock here */		/*		 * Get a copy of the DIAG_CTRL register.  This register		 * can only be accessed indirectly through the VIADDR and		 * VIDLO registers		 */		mtpr (VIADR, VIR_ALU_DIAG_CTRL);		ls_alu_diag_ctrl = mfpr (VIDLO);		/* VMS does a disable here */		/*		 * If the vpd_in_kernel flag is set, then the mchk		 * probably happened while executing the disabled fault		 * handler.  In this case the vector processor should be		 * left in an enabled state.  This way when the		 * instruction is re-started and control is returned to		 * the disabled fault handler, it will where it left		 * off.  If the vpd_in_kernel flag is not set then the		 * vector instruction causing the mchk was in a user		 * program, so leave the processor disabled so that when		 * the instruction is restarted, it will hit the disabled		 * fault handler.		 */		if ((CURRENT_CPUDATA->cpu_vpdata->vpd_in_kernel | VPD_IN_KERNEL)			== VPD_IN_KERNEL) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -