⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 safe.c

📁 linux下基于加密芯片的加密设备
💻 C
📖 第 1 页 / 共 4 页
字号:
	} else		ret = EINVAL;	return (ret);}static intsafe_process(void *arg, struct cryptop *crp, int hint){	int err = 0, i, nicealign, uniform;	struct safe_softc *sc = arg;	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;	int bypass, oplen, ivsize;	caddr_t iv;	int16_t coffset;	struct safe_session *ses;	struct safe_ringentry *re;	struct safe_sarec *sa;	struct safe_pdesc *pd;	u_int32_t cmd0, cmd1, staterec;	int flags;	DPRINTF("%s()\n", __FUNCTION__);	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {		safestats.st_invalid++;		return (EINVAL);	}	if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {		safestats.st_badsession++;		return (EINVAL);	}	spin_lock_irqsave(&sc->sc_ringmtx, flags);	if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {		safestats.st_ringfull++;		sc->sc_needwakeup |= CRYPTO_SYMQ;		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);		return (ERESTART);	}	re = sc->sc_front;	staterec = re->re_sa.sa_staterec;	/* save */	/* NB: zero everything but the PE descriptor */	bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));	re->re_sa.sa_staterec = staterec;	/* restore */	re->re_crp = crp;	re->re_sesn = SAFE_SESSION(crp->crp_sid);	re->re_src.nsegs = 0;	re->re_dst.nsegs = 0;	if (crp->crp_flags & CRYPTO_F_IMBUF) {		printk("safe: no CRYPTO_F_IMBUF support");	} else if (crp->crp_flags & CRYPTO_F_IOV) {		re->re_src_io = (struct uio *)crp->crp_buf;		re->re_dst_io = (struct uio *)crp->crp_buf;	} else {		safestats.st_badflags++;		err = EINVAL;		goto errout;	/* XXX we don't handle contiguous blocks! */	}	sa = &re->re_sa;	ses = &sc->sc_sessions[re->re_sesn];	crd1 = crp->crp_desc;	if (crd1 == NULL) {		safestats.st_nodesc++;		err = EINVAL;		goto errout;	}	crd2 = crd1->crd_next;	cmd0 = SAFE_SA_CMD0_BASIC;		/* basic group operation */	cmd1 = 0;	if (crd2 == NULL) {		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||		    crd1->crd_alg == CRYPTO_NULL_HMAC) {			maccrd = crd1;			enccrd = NULL;			cmd0 |= SAFE_SA_CMD0_OP_HASH;		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||		    crd1->crd_alg == CRYPTO_3DES_CBC ||		    crd1->crd_alg == CRYPTO_AES_CBC ||		    crd1->crd_alg == CRYPTO_NULL_CBC) {			maccrd = NULL;			enccrd = crd1;			cmd0 |= SAFE_SA_CMD0_OP_CRYPT;		} else {			safestats.st_badalg++;			err = EINVAL;			goto errout;		}	} else {		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||		    crd1->crd_alg == CRYPTO_NULL_HMAC) &&		    (crd2->crd_alg == CRYPTO_DES_CBC ||			crd2->crd_alg == CRYPTO_3DES_CBC ||		        crd2->crd_alg == CRYPTO_AES_CBC ||		        crd2->crd_alg == CRYPTO_NULL_CBC) &&		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {			maccrd = crd1;			enccrd = crd2;		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||		    crd1->crd_alg == CRYPTO_3DES_CBC ||		    crd1->crd_alg == CRYPTO_AES_CBC ||		    crd1->crd_alg == CRYPTO_NULL_CBC) &&		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||			crd2->crd_alg == CRYPTO_SHA1_HMAC ||			crd2->crd_alg == CRYPTO_NULL_HMAC) &&		    (crd1->crd_flags & CRD_F_ENCRYPT)) {			enccrd = crd1;			maccrd = crd2;		} else {			safestats.st_badalg++;			err = EINVAL;			goto errout;		}		cmd0 |= SAFE_SA_CMD0_OP_BOTH;	}	if (enccrd) {		if (enccrd->crd_alg == CRYPTO_DES_CBC) {			cmd0 |= SAFE_SA_CMD0_DES;			cmd1 |= SAFE_SA_CMD1_CBC;			ivsize = 2*sizeof(u_int32_t);		} else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {			cmd0 |= SAFE_SA_CMD0_3DES;			cmd1 |= SAFE_SA_CMD1_CBC;			ivsize = 2*sizeof(u_int32_t);		} else if (enccrd->crd_alg == CRYPTO_AES_CBC) {			cmd0 |= SAFE_SA_CMD0_AES;			cmd1 |= SAFE_SA_CMD1_CBC;			if (ses->ses_klen == 128)			     cmd1 |=  SAFE_SA_CMD1_AES128;			else if (ses->ses_klen == 192)			     cmd1 |=  SAFE_SA_CMD1_AES192;			else			     cmd1 |=  SAFE_SA_CMD1_AES256;			ivsize = 4*sizeof(u_int32_t);		} else {			cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;			ivsize = 0;		}		/*		 * Setup encrypt/decrypt state.  When using basic ops		 * we can't use an inline IV because hash/crypt offset		 * must be from the end of the IV to the start of the		 * crypt data and this leaves out the preceding header		 * from the hash calculation.  Instead we place the IV		 * in the state record and set the hash/crypt offset to		 * copy both the header+IV.		 */		if (enccrd->crd_flags & CRD_F_ENCRYPT) {			cmd0 |= SAFE_SA_CMD0_OUTBOUND;			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)				iv = enccrd->crd_iv;			else				iv = (caddr_t) ses->ses_iv;			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {				if (crp->crp_flags & CRYPTO_F_IMBUF)					printk("safe: no CRYPTO_F_IMBUF support");				else if (crp->crp_flags & CRYPTO_F_IOV)					cuio_copyback(re->re_src_io,						enccrd->crd_inject, ivsize, iv);			}			bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);			/* make iv LE */			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)				re->re_sastate.sa_saved_iv[i] =					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);			cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;			re->re_flags |= SAFE_QFLAGS_COPYOUTIV;		} else {			cmd0 |= SAFE_SA_CMD0_INBOUND;			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)				bcopy(enccrd->crd_iv,					re->re_sastate.sa_saved_iv, ivsize);			else if (crp->crp_flags & CRYPTO_F_IMBUF)				printk("safe: no CRYPTO_F_IMBUF support");			else if (crp->crp_flags & CRYPTO_F_IOV)				cuio_copydata(re->re_src_io, enccrd->crd_inject,					ivsize, (caddr_t)re->re_sastate.sa_saved_iv);			/* make iv LE */			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)				re->re_sastate.sa_saved_iv[i] =					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);			cmd0 |= SAFE_SA_CMD0_IVLD_STATE;		}		/*		 * For basic encryption use the zero pad algorithm.		 * This pads results to an 8-byte boundary and		 * suppresses padding verification for inbound (i.e.		 * decrypt) operations.		 *		 * NB: Not sure if the 8-byte pad boundary is a problem.		 */		cmd0 |= SAFE_SA_CMD0_PAD_ZERO;		/* XXX assert key bufs have the same size */		bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));	}	if (maccrd) {		if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {			cmd0 |= SAFE_SA_CMD0_MD5;			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */		} else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {			cmd0 |= SAFE_SA_CMD0_SHA1;			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */		} else {			cmd0 |= SAFE_SA_CMD0_HASH_NULL;		}		/*		 * Digest data is loaded from the SA and the hash		 * result is saved to the state block where we		 * retrieve it for return to the caller.		 */		/* XXX assert digest bufs have the same size */		bcopy(ses->ses_hminner, sa->sa_indigest,			sizeof(sa->sa_indigest));		bcopy(ses->ses_hmouter, sa->sa_outdigest,			sizeof(sa->sa_outdigest));		cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;		re->re_flags |= SAFE_QFLAGS_COPYOUTICV;	}	if (enccrd && maccrd) {		/*		 * The offset from hash data to the start of		 * crypt data is the difference in the skips.		 */		bypass = maccrd->crd_skip;		coffset = enccrd->crd_skip - maccrd->crd_skip;		if (coffset < 0) {			DPRINTF("%s: hash does not precede crypt; "				"mac skip %u enc skip %u\n",				__func__, maccrd->crd_skip, enccrd->crd_skip);			safestats.st_skipmismatch++;			err = EINVAL;			goto errout;		}		oplen = enccrd->crd_skip + enccrd->crd_len;		if (maccrd->crd_skip + maccrd->crd_len != oplen) {			DPRINTF("%s: hash amount %u != crypt amount %u\n",				__func__, maccrd->crd_skip + maccrd->crd_len,				oplen);			safestats.st_lenmismatch++;			err = EINVAL;			goto errout;		}#ifdef SAFE_DEBUG		if (debug) {			printk("mac: skip %d, len %d, inject %d\n",			    maccrd->crd_skip, maccrd->crd_len,			    maccrd->crd_inject);			printk("enc: skip %d, len %d, inject %d\n",			    enccrd->crd_skip, enccrd->crd_len,			    enccrd->crd_inject);			printk("bypass %d coffset %d oplen %d\n",				bypass, coffset, oplen);		}#endif		if (coffset & 3) {	/* offset must be 32-bit aligned */			DPRINTF("%s: coffset %u misaligned\n",				__func__, coffset);			safestats.st_coffmisaligned++;			err = EINVAL;			goto errout;		}		coffset >>= 2;		if (coffset > 255) {	/* offset must be <256 dwords */			DPRINTF("%s: coffset %u too big\n",				__func__, coffset);			safestats.st_cofftoobig++;			err = EINVAL;			goto errout;		}		/*		 * Tell the hardware to copy the header to the output.		 * The header is defined as the data from the end of		 * the bypass to the start of data to be encrypted. 		 * Typically this is the inline IV.  Note that you need		 * to do this even if src+dst are the same; it appears		 * that w/o this bit the crypted data is written		 * immediately after the bypass data.		 */		cmd1 |= SAFE_SA_CMD1_HDRCOPY;		/*		 * Disable IP header mutable bit handling.  This is		 * needed to get correct HMAC calculations.		 */		cmd1 |= SAFE_SA_CMD1_MUTABLE;	} else {		if (enccrd) {			bypass = enccrd->crd_skip;			oplen = bypass + enccrd->crd_len;		} else {			bypass = maccrd->crd_skip;			oplen = bypass + maccrd->crd_len;		}		coffset = 0;	}	/* XXX verify multiple of 4 when using s/g */	if (bypass > 96) {		/* bypass offset must be <= 96 bytes */		DPRINTF("%s: bypass %u too big\n", __func__, bypass);		safestats.st_bypasstoobig++;		err = EINVAL;		goto errout;	}	if (crp->crp_flags & CRYPTO_F_IMBUF) {		printk("safe: no CRYPTO_F_IMBUF support");		err = EINVAL;		goto errout;	} else if (crp->crp_flags & CRYPTO_F_IOV) {		if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {			safestats.st_noload++;			err = ENOMEM;			goto errout;		}	}	nicealign = safe_dmamap_aligned(&re->re_src);	uniform = safe_dmamap_uniform(&re->re_src);	DPRINTF("src nicealign %u uniform %u nsegs %u\n",		nicealign, uniform, re->re_src.nsegs);	if (re->re_src.nsegs > 1) {		re->re_desc.d_src = sc->sc_sp_dma +			((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);		for (i = 0; i < re->re_src_nsegs; i++) {			/* NB: no need to check if there's space */			pd = sc->sc_spfree;			if (++(sc->sc_spfree) == sc->sc_springtop)				sc->sc_spfree = sc->sc_spring;			KASSERT((pd->pd_flags&3) == 0 ||				(pd->pd_flags&3) == SAFE_PD_DONE,				("bogus source particle descriptor; flags %x",				pd->pd_flags));			pd->pd_addr = re->re_src_segs[i].ds_addr;			pd->pd_size = re->re_src_segs[i].ds_len;			pd->pd_flags = SAFE_PD_READY;		}		cmd0 |= SAFE_SA_CMD0_IGATHER;	} else {		/*		 * No need for gather, reference the operand directly.		 */		re->re_desc.d_src = re->re_src_segs[0].ds_addr;	}	if (enccrd == NULL && maccrd != NULL) {		/*		 * Hash op; no destination needed.		 */	} else {		if (crp->crp_flags & CRYPTO_F_IOV) {			if (!nicealign) {				safestats.st_iovmisaligned++;				err = EINVAL;				goto errout;			}			if (uniform != 1) {				printk("safe: !uniform source\n");				if (!uniform) {					/*					 * There's no way to handle the DMA					 * requirements with this uio.  We					 * could create a separate DMA area for					 * the result and then copy it back,					 * but for now we just bail and return					 * an error.  Note that uio requests					 * > SAFE_MAX_DSIZE are handled because					 * the DMA map and segment list for the					 * destination wil result in a					 * destination particle list that does					 * the necessary scatter DMA.					 */ 					safestats.st_iovnotuniform++;					err = EINVAL;					goto errout;				}			} else				re->re_dst = re->re_src;		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {			printk("safe: no CRYPTO_F_IMBUF support");			err = EINVAL;			goto errout;		} else {			safestats.st_badflags++;			err = EINVAL;			goto errout;		}		if (re->re_dst.nsegs > 1) {			re->re_desc.d_dst = sc->sc_dp_dma +			    ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);			for (i = 0; i < re->re_dst_nsegs; i++) {				pd = sc->sc_dpfree;				KASSERT((pd->pd_flags&3) == 0 ||					(pd->pd_flags&3) == SAFE_PD_DONE,					("bogus dest particle descriptor; flags %x",						pd->pd_flags));				if (++(sc->sc_dpfree) == sc->sc_dpringtop)					sc->sc_dpfree = sc->sc_dpring;				pd->pd_addr = re->re_dst_segs[i].ds_addr;				pd->pd_flags = SAFE_PD_READY;			}			cmd0 |= SAFE_SA_CMD0_OSCATTER;		} else {			/*			 * No need for scatter, reference the operand directly.			 */			re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;		}	}	/*	 * All done with setup; fillin the SA command words	 * and the packet engine descriptor.  The operation	 * is now ready for submission to the hardware.	 */	sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;	sa->sa_cmd1 = cmd1		    | (coffset << SAFE_SA_CMD1_OFFSET_S)		    | SAFE_SA_CMD1_SAREV1	/* Rev 1 SA data structure */		    | SAFE_SA_CMD1_SRPCI		    ;	/*	 * NB: the order of writes is important here.  In case the	 * chip is scanning the ring because of an outstanding request	 * it might nab this one too.  In that case we need to make	 * sure the setup is complete before we write the length	 * field of the descriptor as it signals the descriptor is	 * ready for processing.	 */	re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;	if (maccrd)		re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;	wmb();	re->re_desc.d_len = oplen			  | SAFE_PE_LEN_READY			  | (bypass << SAFE_PE_LEN_BYPASS_S)			  ;	safestats.st_ipackets++;	safestats.st_ibytes += oplen;	if (++(sc->sc_front) == sc->sc_ringtop)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -