📄 ka9000.c
字号:
/* * RXFCT interrupt service routine. Services requests from * the SPU. */ka9000_rxfct_isr(){ u_long rxfct; u_long rxprm; int func; int sparam; int status; struct dmd * dmd; int s; /* Run at spl */ s = spl5(); rxprm = mfpr(RXPRM); rxfct = mfpr(RXFCT); /* Extract fields of interest */ sparam = RXFCT_GET_SPARAM(rxfct); func = rxfct & RXFCT_FUNCT; /* Perform the function */ switch (func) { case RXFCT_RM_CPU: /* We don't support this since we already have * a command to do it. Print a warning message * and fail the request. */ ka9000_rmcpu_reqs++; printf("SPU CPU removal not supported.\n"); printf("Use /etc/stopcpu instead.\n"); RXFCT_SET_STATUS(rxfct, 0); RXFCT_SET_SPARAM(rxfct, 0); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); break; case RXFCT_ADD_CPU: /* We don't support this since we already have * a command to do it. Print a warning message * and fail the request. */ ka9000_addcpu_reqs++; printf("SPU CPU addition not supported.\n"); printf("Use /etc/startcpu instead.\n"); RXFCT_SET_STATUS(rxfct, 0); RXFCT_SET_SPARAM(rxfct, 0); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); break; case RXFCT_MEM_BAD: /* We only support this request to the extent * that we print a warning message to the * console. Future implementations may wish * to add a hook to VM to disable a bad page. */ printf("VM: SPU reported bad page @0x%x\n", rxprm); /* No response expected */ break; case RXFCT_SEND_ERRLOG: /* Find the virtual address of the * dmd that we got... (rxprm contains the * physical address...) */ for(dmd = spu_dmdlist; dmd; dmd = (struct dmd *)dmd->dmd_link) { if(rxprm == dmd->dmd_paddr) { /* Found it! */ break; } } if(!dmd) { RXFCT_SET_STATUS(rxfct, 0); RXFCT_SET_SPARAM(rxfct, 0); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); break; } /* Process the error log message... */ v9000err_spu(dmd->dmd_bufaddr, dmd->dmd_buflen); /* Acknowledge the datagram */ RXFCT_SET_STATUS(rxfct, 1); RXFCT_SET_SPARAM(rxfct, 1); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); ka9000_spu_request(TXFCT_RETURN_DG_STATUS, SPU_SUCCESS, rxprm, 0); /* Free up the datagram */ ka9000_dgfree(rxprm); break; case RXFCT_SEND_OPCOM: ka9000_opcom_reqs++; { char *tmpbuf; struct dmd *dp; /* OPCOM is a VMS-ism, but we may have a use for * it anyway. If we get an OPCOM message, just * print it to the console. The printf's will * in turn get error-logged... */ /* Find the dmd */ for(dp = spu_dmdlist; dp ; dp = (struct dmd *)dp->dmd_link) { if(dp->dmd_paddr == rxprm) break; } if(!dp) { RXFCT_SET_STATUS(rxfct, 0); RXFCT_SET_SPARAM(rxfct, 0); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); break; } /* Allocate a temporary buffer to hold the * message... */ KM_ALLOC(tmpbuf, char *, dp->dmd_buflen+1, KM_SPU, KM_NOWAIT); if(tmpbuf) { /* If we got the buffer, then copy the * message over, null-terminate it, and * print it out. Otherwise, drop it. */ bcopy(dp->dmd_bufaddr, tmpbuf, dp->dmd_buflen); tmpbuf[dp->dmd_buflen] = '\0'; printf("OPCOM: %s\n", tmpbuf); KM_FREE(tmpbuf, KM_SPU); } /* Acknowledge the datagram */ RXFCT_SET_STATUS(rxfct, 1); RXFCT_SET_SPARAM(rxfct, 1); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); ka9000_spu_request(TXFCT_RETURN_DG_STATUS, SPU_SUCCESS, rxprm, 0); } /* In any case, free up the datagram */ ka9000_dgfree(rxprm); break; case RXFCT_GET_DG: /* Allocate datagram... */ status = ka9000_dgalloc(sparam, &rxprm); /* and report our results back to the SPU */ RXFCT_SET_STATUS(rxfct, status); RXFCT_SET_SPARAM(rxfct, status); mtpr(RXPRM, rxprm); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); break; case RXFCT_SEND_DG: /* We shouldn't see any of these... just * quietly acknowledge them... */ ka9000_spu_request(TXFCT_RETURN_DG_STATUS, SPU_SUCCESS, rxprm, 0); /* Free up the datagram */ ka9000_dgfree(rxprm); break; case RXFCT_RETURN_DG_STATUS: /* SPU is done with this datagram and so we * may reallocate it. */ ka9000_dgfree(rxprm); break; case RXFCT_SET_KEEPALIVE: ka9000_keepalive_enabled = sparam; break; case RXFCT_ABORT_DATALINK: /* We should never see this! */ RXFCT_SET_STATUS(rxfct, 1); RXFCT_SET_SPARAM(rxfct, 1); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); break; default: printf("Unsupported SPU function 0x%x\n", func); RXFCT_SET_STATUS(rxfct, 0); RXFCT_SET_SPARAM(rxfct, 0); rxfct |= RXFCT_VALID; mtpr(RXFCT, rxfct); } splx(s);}/*** Local functions used by ka9000_rxfct_isr() ***//* Allocate and initialize a datagram. NOTE: This routine assumes that * we're using 32-bit physical addresses. This will have to change if/when * we go to Aquarius 34-bit physical addresses... */ka9000_dgalloc(nbytes, dmdaddrp)int nbytes;caddr_t *dmdaddrp;{ int allocpages; int allocbytes; int npfn, i; caddr_t buf_vaddr; struct dmd * dmd; /* Round up the request to a page-sized boundary... */ allocbytes = roundup(nbytes, NBPG); allocpages = allocbytes / NBPG; /* Allocate a page of memory for the dmd header */ KM_ALLOC(dmd, struct dmd *, NBPG, KM_SPU, KM_NOWAIT); /* If we fail, return failure status */ if(dmd == (struct dmd *)NULL) { *dmdaddrp = (caddr_t)NULL; return(0); } /* Allocate page(s) of memory for the datagram */ KM_ALLOC(buf_vaddr, caddr_t, allocbytes, KM_SPU, KM_NOWAIT); /* If we fail, then back out the previous allocation and * return failure status. */ if(buf_vaddr == (caddr_t)NULL) { KM_FREE(dmd, KM_SPU); *dmdaddrp = (caddr_t)NULL; return(0); } /* Link this dmd onto the dmd list */ dmd->dmd_link = (long)spu_dmdlist; spu_dmdlist = dmd; /* Initialize the dmd... */ dmd->dmd_paddr = svtophy(dmd) >> 2; dmd->dmd_buflen = nbytes; dmd->dmd_pfn_count = allocpages; dmd->dmd_bufaddr = (long)buf_vaddr; dmd->dmd_pfn[0] = svtophy(buf_vaddr)/NBPG; for(i = 1; i < allocpages; i++) { dmd->dmd_pfn[i] = dmd->dmd_pfn[i-1]+1; } *dmdaddrp = (caddr_t)dmd->dmd_paddr; return(1);}/* Inverse of the previous routine; deallocates a previously-allocated * datagram. */ka9000_dgfree(paddr)caddr_t paddr;{ struct dmd * dmd_prev; struct dmd * dmd; /* Walk down the list until we find the DMD * we're looking for... */ dmd_prev = (struct dmd *)NULL; dmd = spu_dmdlist; while(dmd) { if((caddr_t)dmd->dmd_paddr == paddr) { break; } dmd_prev = dmd; dmd = (struct dmd *)dmd->dmd_link; } if(!dmd) { return; } /* Unlink this DG from the system list */ if(dmd_prev) { dmd_prev->dmd_link = dmd->dmd_link; } else { /* This is the first (or only) DG on the list */ spu_dmdlist = (struct dmd *)dmd->dmd_link; } /* Free up the memory. */ KM_FREE(dmd->dmd_bufaddr, KM_SPU); KM_FREE(dmd, KM_SPU);}/*** Utility routines for systems programmers ***//* Clear warmstart flag */ka9000_clear_warmstart(){ ka9000_spu_request(TXFCT_CLEAR_WS, boot_cpu_num, 0, 1);}/* Clear coldstart flag */ka9000_clear_coldstart(){ ka9000_spu_request(TXFCT_CLEAR_CS, boot_cpu_num, 0, 1);}/* Reset XJA adapter(s) */ka9000_io_reset(xjamask)int xjamask;{ /* Do we need to wait for this function to complete? */ ka9000_spu_request(TXFCT_IO_RESET, xjamask, 0, 1);}/* Disable VBOX */ka9000_disable_vbox(vbmask)int vbmask;{ /* Do we need to wait for this function to complete? */ ka9000_spu_request(TXFCT_DISABLE_VBOX, vbmask, 0, 1);}/* Error logging function */#define OFFSET_SID 0#define OFFSET_SYSTYPE 6#define OFFSET_ENTRYCODE 36#define SIZE_SID 4#define SIZE_SYSTYPE 4#define SIZE_ENTRYCODE 2#define SIZE_HEADER 48 /* returns 1 if ealloc() returns full, 0 otherwise */v9000err_spu(ptr,len)caddr_t ptr;int len;{ long sid, systype, entrycode; int len_b; struct el_rec *elp; if( len < SIZE_HEADER ) { return(0); } len_b = len - SIZE_HEADER; sid = *(long *)(ptr+OFFSET_SID); systype = *(long *)(ptr+OFFSET_SYSTYPE); entrycode = *(short *)(ptr+OFFSET_ENTRYCODE); /* timestamp messages from SPU risk filling up error log buffer in cases where host has been down for extended period of time, and then host reboots and messages arrive from SPU before elcsd is started and can empty error log buffer. Besides, timestamps don't really belong in error log buffer anyway. */#define SPU_TIMESTAMP_MSG 38 if( entrycode == SPU_TIMESTAMP_MSG ) return(0); if( (elp=ealloc(len_b, EL_PRIHIGH)) == EL_FULL ) return(1); bcopy(ptr+SIZE_HEADER, &elp->el_body, len_b); elp->elrhdr.rhdr_sid = sid; elp->elrhdr.rhdr_systype = systype; switch(entrycode) { case ELSPU_MC: LSUBID(elp, ELCT_MCK, ELMCKT_9000SPU, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_SYNDROME: LSUBID(elp, ELCT_9000_SYNDROME, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_SE: LSUBID(elp, ELCT_MEM, EL_UNDEF, ELMCNTR_9000_SE, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_HE: LSUBID(elp, ELCT_MEM, EL_UNDEF, ELMCNTR_9000_HE, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_EMM: LSUBID(elp, ELCT_NMIEMM, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_HLT: LSUBID(elp, ELCT_9000_KAF, ELMCKT_9000SPU, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_BIADPERR: LSUBID(elp, ELCT_ADPTR, ELADP_SJASCM, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_CLKERR: LSUBID(elp, ELCT_9000_CLK, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_SCAN: LSUBID(elp, ELCT_9000_SCAN, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; case ELSPU_CONFIG: LSUBID(elp, ELCT_9000_CONFIG, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; default: LSUBID(elp, EL_UNDEF, entrycode, EL_UNDEF, EL_UNDEF, EL_UNDEF, EL_UNDEF); break; } EVALID(elp); return(0);}ka9000_enable_errlog(){ /* Only do this once!!*/ if(!ka9000_errlog_enabled) { ka9000_errlog_enabled = 1; /* Enable error log transmission */ ka9000_spu_request(TXFCT_ENABLE_ERRLOG, 1, 0, 0); }}/* handles error from XJA (XMI to Jbox adapter) */ka9000xja_err(xjanum)int xjanum;{ register struct el_rec *elp; register struct xmi_reg *nxv; /* virtual pointer to XMI node */ register struct xja_node_regs *nodep; /* pointer to XJA node */ register struct xja_regs *xja; /* pointer to xja private space */ register int xja_node, pri, len_b; /* compute address of xja private space */ xja =(struct xja_regs *) (((char *)xja_mem) + xjanum*NBPG); /* compute address of xja nodespace */ xja_node = (xja->xja_cnf >> 12) & 0xf; nxv = xmi_start + (xjanum * 16) + xja_node; nodep = (struct xja_node_regs *)nxv; /* for all errors except the three parity errors, we will panic. */ /* For ERRS, check all errors other than:(XJA_ERRS_JXDI_PE) */ pri = EL_PRIHIGH; if( xja->xja_errs & (XJA_ERRS_JXDI_MPE| XJA_ERRS_XCE_TE| XJA_ERRS_ICU_BC| XJA_ERRS_CPU_RO| XJA_ERRS_RBO| XJA_ERRS_CLE| XJA_ERRS_CBI_PE| XJA_ERRS_XMI_ATO| XJA_ERRS_XMI_PF) ) { pri = EL_PRISEVERE; } /* For BER, check all errors other than:(XMI_CC|XMI_PE) */ if( nodep->xja_node_ber & (XMI_NRST | XMI_NHALT | XMI_XFAULT | XMI_WEI | XMI_WSE | XMI_RIDNAK | XMI_WDNAK | XMI_CRD | XMI_NRR | XMI_RSE | XMI_RER | XMI_CNAK | XMI_TTO) ) { pri = EL_PRISEVERE; } if( (elp=ealloc(sizeof(struct el_xja), pri)) != EL_FULL ) { LSUBID(elp, ELCT_ADPTR, ELADP_XJA, EL_UNDEF, xjanum, EL_UNDEF, EL_UNDEF); /* Struct xmi_reg has registers present in all nodes on xmi. It does not have registers specific to xja nodes on xmi. Struct xja_node_regs is used to reference these regs. */ elp->el_body.el_xja.el_xja_xdev = nodep->xja_node_dev; elp->el_body.el_xja.el_xja_xber = nodep->xja_node_ber; elp->el_body.el_xja.el_xja_xfadra = nodep->xja_node_xfadra; elp->el_body.el_xja.el_xja_xfadrb = nodep->xja_node_xfadrb; elp->el_body.el_xja.el_xja_aosts = nodep->xja_node_aosts; elp->el_body.el_xja.el_xja_sernum = nodep->xja_node_sernum; elp->el_body.el_xja.el_xja_errs = xja->xja_errs; elp->el_body.el_xja.el_xja_fcmd = xja->xja_fcmd; elp->el_body.el_xja.el_xja_ipintrsrc = xja->xja_ipint; elp->el_body.el_xja.el_xja_diag = xja->xja_diag; elp->el_body.el_xja.el_xja_dmafaddr = xja->xja_dmafaddr; elp->el_body.el_xja.el_xja_dmafcmd = xja->xja_dmafcmd; elp->el_body.el_xja.el_xja_errintr = xja->xja_errintr; elp->el_body.el_xja.el_xja_cnf = xja->xja_cnf; elp->el_body.el_xja.el_xja_xbiida = xja->xja_xbiida; elp->el_body.el_xja.el_xja_xbiidb = xja->xja_xbiidb; elp->el_body.el_xja.el_xja_errscb = xja->xja_errscb; EVALID(elp); } if(pri == EL_PRISEVERE) panic("xja error"); /* We panic on all errors except for: XJA_ERRS_JXDI_PE, XMI_CC, XMI_PE. So, we need to clear only these specific errors. */ xja->xja_errs = XJA_ERRS_JXDI_PE; nodep->xja_node_ber = XMI_CC|XMI_PE;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -