dm-crypt.c

来自「linux 内核源代码」· C语言 代码 · 共 1,109 行 · 第 1/2 页

C
1,109
字号
/* * Copyright (C) 2003 Christophe Saout <christophe@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */#include <linux/err.h>#include <linux/module.h>#include <linux/init.h>#include <linux/kernel.h>#include <linux/bio.h>#include <linux/blkdev.h>#include <linux/mempool.h>#include <linux/slab.h>#include <linux/crypto.h>#include <linux/workqueue.h>#include <linux/backing-dev.h>#include <asm/atomic.h>#include <linux/scatterlist.h>#include <asm/page.h>#include <asm/unaligned.h>#include "dm.h"#define DM_MSG_PREFIX "crypt"#define MESG_STR(x) x, sizeof(x)/* * per bio private data */struct dm_crypt_io {	struct dm_target *target;	struct bio *base_bio;	struct work_struct work;	atomic_t pending;	int error;};/* * context holding the current state of a multi-part conversion */struct convert_context {	struct bio *bio_in;	struct bio *bio_out;	unsigned int offset_in;	unsigned int offset_out;	unsigned int idx_in;	unsigned int idx_out;	sector_t sector;	int write;};struct crypt_config;struct crypt_iv_operations {	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,		   const char *opts);	void (*dtr)(struct crypt_config *cc);	const char *(*status)(struct crypt_config *cc);	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);};/* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };struct crypt_config {	struct dm_dev *dev;	sector_t start;	/*	 * pool for per bio private data and	 * for encryption buffer pages	 */	mempool_t *io_pool;	mempool_t *page_pool;	struct bio_set *bs;	struct workqueue_struct *io_queue;	struct workqueue_struct *crypt_queue;	/*	 * crypto related data	 */	struct crypt_iv_operations *iv_gen_ops;	char *iv_mode;	union {		struct crypto_cipher *essiv_tfm;		int benbi_shift;	} iv_gen_private;	sector_t iv_offset;	unsigned int iv_size;	char cipher[CRYPTO_MAX_ALG_NAME];	char chainmode[CRYPTO_MAX_ALG_NAME];	struct crypto_blkcipher *tfm;	unsigned long flags;	unsigned int key_size;	u8 key[0];};#define MIN_IOS        16#define MIN_POOL_PAGES 32#define MIN_BIO_PAGES  8static struct kmem_cache *_crypt_io_pool;static void clone_init(struct dm_crypt_io *, struct bio *);/* * Different IV generation algorithms: * * plain: the initial vector is the 32-bit little-endian version of the sector *        number, padded with zeros if necessary. * * essiv: "encrypted sector|salt initial vector", the sector number is *        encrypted with the bulk cipher using a salt as key. The salt *        should be derived from the bulk cipher's key via hashing. * * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 *        (needed for LRW-32-AES and possible other narrow block modes) * * null: the initial vector is always zero.  Provides compatibility with *       obsolete loop_fish2 devices.  Do not use for new devices. * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector){	memset(iv, 0, cc->iv_size);	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);	return 0;}static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,			      const char *opts){	struct crypto_cipher *essiv_tfm;	struct crypto_hash *hash_tfm;	struct hash_desc desc;	struct scatterlist sg;	unsigned int saltsize;	u8 *salt;	int err;	if (opts == NULL) {		ti->error = "Digest algorithm missing for ESSIV mode";		return -EINVAL;	}	/* Hash the cipher key with the given hash algorithm */	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);	if (IS_ERR(hash_tfm)) {		ti->error = "Error initializing ESSIV hash";		return PTR_ERR(hash_tfm);	}	saltsize = crypto_hash_digestsize(hash_tfm);	salt = kmalloc(saltsize, GFP_KERNEL);	if (salt == NULL) {		ti->error = "Error kmallocing salt storage in ESSIV";		crypto_free_hash(hash_tfm);		return -ENOMEM;	}	sg_init_one(&sg, cc->key, cc->key_size);	desc.tfm = hash_tfm;	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);	crypto_free_hash(hash_tfm);	if (err) {		ti->error = "Error calculating hash in ESSIV";		kfree(salt);		return err;	}	/* Setup the essiv_tfm with the given salt */	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);	if (IS_ERR(essiv_tfm)) {		ti->error = "Error allocating crypto tfm for ESSIV";		kfree(salt);		return PTR_ERR(essiv_tfm);	}	if (crypto_cipher_blocksize(essiv_tfm) !=	    crypto_blkcipher_ivsize(cc->tfm)) {		ti->error = "Block size of ESSIV cipher does "			    "not match IV size of block cipher";		crypto_free_cipher(essiv_tfm);		kfree(salt);		return -EINVAL;	}	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);	if (err) {		ti->error = "Failed to set key for ESSIV cipher";		crypto_free_cipher(essiv_tfm);		kfree(salt);		return err;	}	kfree(salt);	cc->iv_gen_private.essiv_tfm = essiv_tfm;	return 0;}static void crypt_iv_essiv_dtr(struct crypt_config *cc){	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);	cc->iv_gen_private.essiv_tfm = NULL;}static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector){	memset(iv, 0, cc->iv_size);	*(u64 *)iv = cpu_to_le64(sector);	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);	return 0;}static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,			      const char *opts){	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);	int log = ilog2(bs);	/* we need to calculate how far we must shift the sector count	 * to get the cipher block count, we use this shift in _gen */	if (1 << log != bs) {		ti->error = "cypher blocksize is not a power of 2";		return -EINVAL;	}	if (log > 9) {		ti->error = "cypher blocksize is > 512";		return -EINVAL;	}	cc->iv_gen_private.benbi_shift = 9 - log;	return 0;}static void crypt_iv_benbi_dtr(struct crypt_config *cc){}static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector){	__be64 val;	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));	return 0;}static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector){	memset(iv, 0, cc->iv_size);	return 0;}static struct crypt_iv_operations crypt_iv_plain_ops = {	.generator = crypt_iv_plain_gen};static struct crypt_iv_operations crypt_iv_essiv_ops = {	.ctr       = crypt_iv_essiv_ctr,	.dtr       = crypt_iv_essiv_dtr,	.generator = crypt_iv_essiv_gen};static struct crypt_iv_operations crypt_iv_benbi_ops = {	.ctr	   = crypt_iv_benbi_ctr,	.dtr	   = crypt_iv_benbi_dtr,	.generator = crypt_iv_benbi_gen};static struct crypt_iv_operations crypt_iv_null_ops = {	.generator = crypt_iv_null_gen};static intcrypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,                          struct scatterlist *in, unsigned int length,                          int write, sector_t sector){	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));	struct blkcipher_desc desc = {		.tfm = cc->tfm,		.info = iv,		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,	};	int r;	if (cc->iv_gen_ops) {		r = cc->iv_gen_ops->generator(cc, iv, sector);		if (r < 0)			return r;		if (write)			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);		else			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);	} else {		if (write)			r = crypto_blkcipher_encrypt(&desc, out, in, length);		else			r = crypto_blkcipher_decrypt(&desc, out, in, length);	}	return r;}static void crypt_convert_init(struct crypt_config *cc,			       struct convert_context *ctx,			       struct bio *bio_out, struct bio *bio_in,			       sector_t sector, int write){	ctx->bio_in = bio_in;	ctx->bio_out = bio_out;	ctx->offset_in = 0;	ctx->offset_out = 0;	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;	ctx->sector = sector + cc->iv_offset;	ctx->write = write;}/* * Encrypt / decrypt data from one bio to another one (can be the same one) */static int crypt_convert(struct crypt_config *cc,			 struct convert_context *ctx){	int r = 0;	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&	      ctx->idx_out < ctx->bio_out->bi_vcnt) {		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);		struct scatterlist sg_in, sg_out;		sg_init_table(&sg_in, 1);		sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);		sg_init_table(&sg_out, 1);		sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);		ctx->offset_in += sg_in.length;		if (ctx->offset_in >= bv_in->bv_len) {			ctx->offset_in = 0;			ctx->idx_in++;		}		ctx->offset_out += sg_out.length;		if (ctx->offset_out >= bv_out->bv_len) {			ctx->offset_out = 0;			ctx->idx_out++;		}		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,					      ctx->write, ctx->sector);		if (r < 0)			break;		ctx->sector++;	}	return r;}static void dm_crypt_bio_destructor(struct bio *bio){	struct dm_crypt_io *io = bio->bi_private;	struct crypt_config *cc = io->target->private;	bio_free(bio, cc->bs);}/* * Generate a new unfragmented bio with the given size * This should never violate the device limitations * May return a smaller bio when running out of pages */static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size){	struct crypt_config *cc = io->target->private;	struct bio *clone;	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;	unsigned i, len;	struct page *page;	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);	if (!clone)		return NULL;	clone_init(io, clone);	for (i = 0; i < nr_iovecs; i++) {		page = mempool_alloc(cc->page_pool, gfp_mask);		if (!page)			break;		/*		 * if additional pages cannot be allocated without waiting,		 * return a partially allocated bio, the caller will then try		 * to allocate additional bios while submitting this partial bio		 */		if (i == (MIN_BIO_PAGES - 1))			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;		if (!bio_add_page(clone, page, len, 0)) {			mempool_free(page, cc->page_pool);			break;		}		size -= len;	}	if (!clone->bi_size) {		bio_put(clone);		return NULL;	}	return clone;}static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone){	unsigned int i;	struct bio_vec *bv;	for (i = 0; i < clone->bi_vcnt; i++) {		bv = bio_iovec_idx(clone, i);		BUG_ON(!bv->bv_page);		mempool_free(bv->bv_page, cc->page_pool);		bv->bv_page = NULL;	}}/* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. */static void crypt_dec_pending(struct dm_crypt_io *io, int error){	struct crypt_config *cc = (struct crypt_config *) io->target->private;	if (error < 0)		io->error = error;	if (!atomic_dec_and_test(&io->pending))		return;	bio_endio(io->base_bio, io->error);	mempool_free(io, cc->io_pool);}/* * kcryptd/kcryptd_io: * * Needed because it would be very unwise to do decryption in an * interrupt context. * * kcryptd performs the actual encryption or decryption. * * kcryptd_io performs the IO submission. * * They must be separated as otherwise the final stages could be * starved by new requests which can block in the first stages due * to memory allocation. */static void kcryptd_do_work(struct work_struct *work);static void kcryptd_do_crypt(struct work_struct *work);static void kcryptd_queue_io(struct dm_crypt_io *io){	struct crypt_config *cc = io->target->private;	INIT_WORK(&io->work, kcryptd_do_work);	queue_work(cc->io_queue, &io->work);}static void kcryptd_queue_crypt(struct dm_crypt_io *io){	struct crypt_config *cc = io->target->private;	INIT_WORK(&io->work, kcryptd_do_crypt);	queue_work(cc->crypt_queue, &io->work);}static void crypt_endio(struct bio *clone, int error){	struct dm_crypt_io *io = clone->bi_private;	struct crypt_config *cc = io->target->private;	unsigned read_io = bio_data_dir(clone) == READ;	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))		error = -EIO;	/*	 * free the processed pages	 */	if (!read_io) {		crypt_free_buffer_pages(cc, clone);		goto out;	}	if (unlikely(error))		goto out;	bio_put(clone);	kcryptd_queue_crypt(io);	return;out:	bio_put(clone);	crypt_dec_pending(io, error);}static void clone_init(struct dm_crypt_io *io, struct bio *clone){	struct crypt_config *cc = io->target->private;	clone->bi_private = io;	clone->bi_end_io  = crypt_endio;	clone->bi_bdev    = cc->dev->bdev;	clone->bi_rw      = io->base_bio->bi_rw;	clone->bi_destructor = dm_crypt_bio_destructor;}static void process_read(struct dm_crypt_io *io){	struct crypt_config *cc = io->target->private;	struct bio *base_bio = io->base_bio;	struct bio *clone;	sector_t sector = base_bio->bi_sector - io->target->begin;	atomic_inc(&io->pending);	/*

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?