dm-crypt.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 800 行 · 第 1/2 页
C
800 行
sprintf(hex, "%02x", *key); hex += 2; key++; }}/* * Construct an encryption mapping: * <cipher> <key> <iv_offset> <dev_path> <start> */static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv){ struct crypt_config *cc; struct crypto_tfm *tfm; char *tmp; char *cipher; char *mode; int crypto_flags; int key_size; if (argc != 5) { ti->error = PFX "Not enough arguments"; return -EINVAL; } tmp = argv[0]; cipher = strsep(&tmp, "-"); mode = strsep(&tmp, "-"); if (tmp) DMWARN(PFX "Unexpected additional cipher options"); key_size = strlen(argv[1]) >> 1; cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); if (cc == NULL) { ti->error = PFX "Cannot allocate transparent encryption context"; return -ENOMEM; } if (!mode || strcmp(mode, "plain") == 0) cc->iv_generator = crypt_iv_plain; else if (strcmp(mode, "ecb") == 0) cc->iv_generator = NULL; else { ti->error = PFX "Invalid chaining mode"; goto bad1; } if (cc->iv_generator) crypto_flags = CRYPTO_TFM_MODE_CBC; else crypto_flags = CRYPTO_TFM_MODE_ECB; tfm = crypto_alloc_tfm(cipher, crypto_flags); if (!tfm) { ti->error = PFX "Error allocating crypto tfm"; goto bad1; } if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { ti->error = PFX "Expected cipher algorithm"; goto bad2; } if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) /* at least a 32 bit sector number should fit in our buffer */ cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), (unsigned int)(sizeof(u32) / sizeof(u8))); else { cc->iv_size = 0; if (cc->iv_generator) { DMWARN(PFX "Selected cipher does not support IVs"); cc->iv_generator = NULL; } } cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, mempool_free_slab, _crypt_io_pool); if (!cc->io_pool) { ti->error = PFX "Cannot allocate crypt io mempool"; goto bad2; } cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, mempool_free_page, NULL); if (!cc->page_pool) { ti->error = PFX "Cannot allocate page mempool"; goto bad3; } cc->tfm = tfm; cc->key_size = key_size; if ((key_size == 0 && strcmp(argv[1], "-") != 0) || crypt_decode_key(cc->key, argv[1], key_size) < 0) { ti->error = PFX "Error decoding key"; goto bad4; } if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { ti->error = PFX "Error setting key"; goto bad4; } if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { ti->error = PFX "Invalid iv_offset sector"; goto bad4; } if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { ti->error = PFX "Invalid device sector"; goto bad4; } if (dm_get_device(ti, argv[3], cc->start, ti->len, dm_table_get_mode(ti->table), &cc->dev)) { ti->error = PFX "Device lookup failed"; goto bad4; } ti->private = cc; return 0;bad4: mempool_destroy(cc->page_pool);bad3: mempool_destroy(cc->io_pool);bad2: crypto_free_tfm(tfm);bad1: kfree(cc); return -EINVAL;}static void crypt_dtr(struct dm_target *ti){ struct crypt_config *cc = (struct crypt_config *) ti->private; mempool_destroy(cc->page_pool); mempool_destroy(cc->io_pool); crypto_free_tfm(cc->tfm); dm_put_device(ti, cc->dev); kfree(cc);}static int crypt_endio(struct bio *bio, unsigned int done, int error){ struct crypt_io *io = (struct crypt_io *) bio->bi_private; struct crypt_config *cc = (struct crypt_config *) io->target->private; if (bio_data_dir(bio) == WRITE) { /* * free the processed pages, even if * it's only a partially completed write */ crypt_free_buffer_pages(cc, bio, done); } if (bio->bi_size) return 1; bio_put(bio); /* * successful reads are decrypted by the worker thread */ if ((bio_data_dir(bio) == READ) && bio_flagged(bio, BIO_UPTODATE)) { kcryptd_queue_io(io); return 0; } dec_pending(io, error); return error;}static inline struct bio *crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio, sector_t sector, int *bvec_idx, struct convert_context *ctx){ struct bio *clone; if (bio_data_dir(bio) == WRITE) { clone = crypt_alloc_buffer(cc, bio->bi_size, io->first_clone, bvec_idx); if (clone) { ctx->bio_out = clone; if (crypt_convert(cc, ctx) < 0) { crypt_free_buffer_pages(cc, clone, clone->bi_size); bio_put(clone); return NULL; } } } else { /* * The block layer might modify the bvec array, so always * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ clone = bio_alloc(GFP_NOIO, bio_segments(bio)); if (clone) { clone->bi_idx = 0; clone->bi_vcnt = bio_segments(bio); clone->bi_size = bio->bi_size; memcpy(clone->bi_io_vec, bio_iovec(bio), sizeof(struct bio_vec) * clone->bi_vcnt); } } if (!clone) return NULL; clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; clone->bi_sector = cc->start + sector; clone->bi_rw = bio->bi_rw; return clone;}static int crypt_map(struct dm_target *ti, struct bio *bio, union map_info *map_context){ struct crypt_config *cc = (struct crypt_config *) ti->private; struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); struct convert_context ctx; struct bio *clone; unsigned int remaining = bio->bi_size; sector_t sector = bio->bi_sector - ti->begin; int bvec_idx = 0; io->target = ti; io->bio = bio; io->first_clone = NULL; io->error = 0; atomic_set(&io->pending, 1); /* hold a reference */ if (bio_data_dir(bio) == WRITE) crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); /* * The allocated buffers can be smaller than the whole bio, * so repeat the whole process until all the data can be handled. */ while (remaining) { clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx); if (!clone) goto cleanup; if (!io->first_clone) { /* * hold a reference to the first clone, because it * holds the bio_vec array and that can't be freed * before all other clones are released */ bio_get(clone); io->first_clone = clone; } atomic_inc(&io->pending); remaining -= clone->bi_size; sector += bio_sectors(clone); generic_make_request(clone); /* out of memory -> run queues */ if (remaining) blk_congestion_wait(bio_data_dir(clone), HZ/100); } /* drop reference, clones could have returned before we reach this */ dec_pending(io, 0); return 0;cleanup: if (io->first_clone) { dec_pending(io, -ENOMEM); return 0; } /* if no bio has been dispatched yet, we can directly return the error */ mempool_free(io, cc->io_pool); return -ENOMEM;}static int crypt_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen){ struct crypt_config *cc = (struct crypt_config *) ti->private; char buffer[32]; const char *cipher; const char *mode = NULL; int offset; switch (type) { case STATUSTYPE_INFO: result[0] = '\0'; break; case STATUSTYPE_TABLE: cipher = crypto_tfm_alg_name(cc->tfm); switch(cc->tfm->crt_cipher.cit_mode) { case CRYPTO_TFM_MODE_CBC: mode = "plain"; break; case CRYPTO_TFM_MODE_ECB: mode = "ecb"; break; default: BUG(); } snprintf(result, maxlen, "%s-%s ", cipher, mode); offset = strlen(result); if (cc->key_size > 0) { if ((maxlen - offset) < ((cc->key_size << 1) + 1)) return -ENOMEM; crypt_encode_key(result + offset, cc->key, cc->key_size); offset += cc->key_size << 1; } else { if (offset >= maxlen) return -ENOMEM; result[offset++] = '-'; } format_dev_t(buffer, cc->dev->bdev->bd_dev); snprintf(result + offset, maxlen - offset, " " SECTOR_FORMAT " %s " SECTOR_FORMAT, cc->iv_offset, buffer, cc->start); break; } return 0;}static struct target_type crypt_target = { .name = "crypt", .version= {1, 0, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, .map = crypt_map, .status = crypt_status,};static int __init dm_crypt_init(void){ int r; _crypt_io_pool = kmem_cache_create("dm-crypt_io", sizeof(struct crypt_io), 0, 0, NULL, NULL); if (!_crypt_io_pool) return -ENOMEM; _kcryptd_workqueue = create_workqueue("kcryptd"); if (!_kcryptd_workqueue) { r = -ENOMEM; DMERR(PFX "couldn't create kcryptd"); goto bad1; } r = dm_register_target(&crypt_target); if (r < 0) { DMERR(PFX "register failed %d", r); goto bad2; } return 0;bad2: destroy_workqueue(_kcryptd_workqueue);bad1: kmem_cache_destroy(_crypt_io_pool); return r;}static void __exit dm_crypt_exit(void){ int r = dm_unregister_target(&crypt_target); if (r < 0) DMERR(PFX "unregister failed %d", r); destroy_workqueue(_kcryptd_workqueue); kmem_cache_destroy(_crypt_io_pool);}module_init(dm_crypt_init);module_exit(dm_crypt_exit);MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");MODULE_LICENSE("GPL");
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?