📄 dm-target.c
字号:
// Checks if two regions overlap (borders are parts of regions)
static int RegionsOverlap (u64 start1, u64 end1, u64 start2, u64 end2)
{
return (start1 < start2) ? (end1 >= start2) : (start1 <= end2);
}
static void dereference_bio_ctx (struct bio_ctx *bc)
{
struct target_ctx *tc = (struct target_ctx *) bc->target->private;
trace (3, "dereference_bio_ctx (%p)\n", bc);
if (!atomic_dec_and_test (&bc->ref_count))
return;
bio_endio (bc->orig_bio, bc->orig_bio->bi_size, bc->error);
trace (3, "deref: mempool_free (%p)\n", bc);
mempool_free (bc, tc->bio_ctx_pool);
}
static void work_process (void *qdata)
{
struct bio_ctx *bc = (struct bio_ctx *) qdata;
struct target_ctx *tc = (struct target_ctx *) bc->target->private;
struct bio_vec *bv;
u64 sec_no = bc->crypto_sector;
int seg_no;
unsigned long flags;
trace (3, "work_process (%p)\n", qdata);
// Decrypt queued data
bio_for_each_segment (bv, bc->orig_bio, seg_no)
{
unsigned int secs = bv->bv_len / SECTOR_SIZE;
char *data = bvec_kmap_irq (bv, &flags);
trace (2, "DecryptSectors (%llu, %d)\n", (unsigned long long) sec_no, secs);
DecryptSectors ((unsigned __int32 *)data, sec_no, secs, tc->ci);
sec_no += secs;
flush_dcache_page (bv->bv_page);
bvec_kunmap_irq (data, &flags);
if (seg_no + 1 < bc->orig_bio->bi_vcnt)
cond_resched ();
}
dereference_bio_ctx (bc);
}
static int truecrypt_endio (struct bio *bio, unsigned int bytes_done, int error)
{
struct bio_ctx *bc = (struct bio_ctx *) bio->bi_private;
struct target_ctx *tc = (struct target_ctx *) bc->target->private;
struct bio_vec *bv;
int seg_no;
trace (3, "truecrypt_endio (%p, %d, %d)\n", bio, bytes_done, error);
trace (1, "end: sc=%llu fl=%ld rw=%ld sz=%d ix=%hd vc=%hd dn=%d er=%d\n",
(unsigned long long) bio->bi_sector, bio->bi_flags, bio->bi_rw, bio->bi_size, bio->bi_idx, bio->bi_vcnt, bytes_done, error);
if (error != 0)
bc->error = error;
if (bio->bi_size)
{
trace (2, "Outstanding IO: %d\n", bio->bi_size);
return 1;
}
if (bio_data_dir (bio) == READ)
{
bio_put (bio);
// Queue decryption to leave completion interrupt ASAP
INIT_WORK (&bc->work, work_process, bc);
trace (3, "queue_work (%p)\n", work_queue);
queue_work (work_queue, &bc->work);
return error;
}
// Free pages allocated for encryption
bio_for_each_segment (bv, bio, seg_no)
{
trace (3, "endio: mempool_free (%p)\n", bv->bv_page);
mempool_free (bv->bv_page, tc->pg_pool);
}
bio_put (bio);
dereference_bio_ctx (bc);
return error;
}
static int truecrypt_map (struct dm_target *ti, struct bio *bio, union map_info *map_context)
{
struct target_ctx *tc = (struct target_ctx *) ti->private;
struct bio_ctx *bc;
struct bio *bion;
struct bio_vec *bv;
int seg_no;
trace (3, "truecrypt_map (%p, %p, %p)\n", ti, bio, map_context);
trace (1, "map: sc=%llu fl=%ld rw=%ld sz=%d ix=%hd vc=%hd\n",
(unsigned long long) bio->bi_sector, bio->bi_flags, bio->bi_rw, bio->bi_size, bio->bi_idx, bio->bi_vcnt);
// Write protection
if (bio_data_dir (bio) == WRITE && READ_ONLY (tc))
return -EPERM;
// Validate segment sizes
bio_for_each_segment (bv, bio, seg_no)
{
if (bv->bv_len & (SECTOR_SIZE - 1))
{
error ("unsupported bio segment size %d (%ld %d %hd %hd)\n",
bv->bv_len, bio->bi_rw, bio->bi_size, bio->bi_idx, bio->bi_vcnt);
return -EINVAL;
}
}
// Bio context
bc = malloc_wait (tc->bio_ctx_pool, bio_data_dir (bio));
if (!bc)
{
error ("bio context allocation failed\n");
return -ENOMEM;
}
trace (3, "mempool_alloc bc: %p\n", bc);
atomic_set (&bc->ref_count, 1);
bc->orig_bio = bio;
bc->error = 0;
bc->target = ti;
bc->crypto_sector = tc->start + (bio->bi_sector - ti->begin);
// New bio for encrypted device
trace (3, "bio_alloc (%hd)\n", bio_segments (bio));
while (!(bion = bio_alloc (GFP_NOIO | __GFP_NOMEMALLOC, bio_segments (bio))))
{
trace (3, "blk_congestion_wait\n");
blk_congestion_wait (bio_data_dir (bio), HZ / 50);
}
bion->bi_bdev = tc->dev->bdev;
bion->bi_end_io = truecrypt_endio;
bion->bi_idx = 0;
bion->bi_private = bc;
bion->bi_rw = bio->bi_rw;
bion->bi_sector = bc->crypto_sector;
bion->bi_size = bio->bi_size;
bion->bi_vcnt = bio_segments (bio);
if (bio_data_dir (bio) == READ)
{
// Buffers of originating bio can be used for decryption
memcpy (bion->bi_io_vec,
bio_iovec (bio),
bion->bi_vcnt * sizeof (struct bio_vec));
}
else
{
u64 sec_no = bc->crypto_sector;
int seg_no;
// Encrypt data to be written
unsigned long flags, copyFlags;
char *data, *copy;
memset (bion->bi_io_vec, 0, sizeof (struct bio_vec) * bion->bi_vcnt);
bio_for_each_segment (bv, bio, seg_no)
{
struct bio_vec *cbv = bio_iovec_idx (bion, seg_no);
unsigned int secs = bv->bv_len / SECTOR_SIZE;
// Hidden volume protection
if (!READ_ONLY (tc) && HID_VOL_PROT (tc)
&& RegionsOverlap (sec_no, sec_no + secs - 1, tc->read_only_start, tc->read_only_end))
{
tc->flags |= TC_READ_ONLY | TC_PROTECTION_ACTIVATED;
}
if (READ_ONLY (tc))
{
// Write not permitted
bio_for_each_segment (cbv, bion, seg_no)
{
if (cbv->bv_page != NULL)
mempool_free (cbv->bv_page, tc->pg_pool);
}
bio_put (bion);
bc->error = READ_ONLY (tc) ? -EPERM : -ENOMEM;
dereference_bio_ctx (bc);
return 0;
}
cbv->bv_page = malloc_wait (tc->pg_pool, bio_data_dir (bion));
trace (3, "mempool_alloc cbv: %p\n", bc);
cbv->bv_offset = 0;
cbv->bv_len = bv->bv_len;
copy = bvec_kmap_irq (cbv, ©Flags);
data = bvec_kmap_irq (bv, &flags);
memcpy (copy, data, bv->bv_len);
trace (2, "EncryptSectors (%llu, %d)\n", (unsigned long long) sec_no, secs);
EncryptSectors ((unsigned __int32 *) copy, sec_no, secs, tc->ci);
sec_no += secs;
bvec_kunmap_irq (data, &flags);
bvec_kunmap_irq (copy, ©Flags);
flush_dcache_page (bv->bv_page);
flush_dcache_page (cbv->bv_page);
if (seg_no + 1 < bio->bi_vcnt)
cond_resched();
}
}
atomic_inc (&bc->ref_count);
trace (3, "generic_make_request (rw=%ld sc=%llu)\n", bion->bi_rw, (unsigned long long) bion->bi_sector);
generic_make_request (bion);
dereference_bio_ctx (bc);
return 0;
}
static int truecrypt_status (struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen)
{
struct target_ctx *tc = (struct target_ctx *) ti->private;
switch (type)
{
case STATUSTYPE_INFO:
result[0] = 0;
break;
case STATUSTYPE_TABLE:
{
char name[32];
format_dev_t (name, tc->dev->bdev->bd_dev);
snprintf (result, maxlen, "%d %d 0 0 %s %llu %llu %llu %llu %llu %d %s",
tc->ci->ea,
tc->ci->mode,
name,
(unsigned long long) tc->start,
tc->read_only_start,
tc->read_only_end,
tc->mtime,
tc->atime,
tc->flags,
tc->volume_path);
}
break;
}
return 0;
}
static struct target_type truecrypt_target = {
.name = "truecrypt",
.version= {TC_VERSION_NUM1, TC_VERSION_NUM2, TC_VERSION_NUM3},
.module = THIS_MODULE,
.ctr = truecrypt_ctr,
.dtr = truecrypt_dtr,
.map = truecrypt_map,
.status = truecrypt_status
};
int __init dm_truecrypt_init(void)
{
int r;
trace (3, "dm_truecrypt_init (trace_level=%d)\n", trace_level);
if (!AutoTestAlgorithms ())
{
DMERR ("truecrypt: self-test of algorithms failed");
return -ERANGE;
}
work_queue = create_workqueue ("truecryptq");
if (!work_queue)
{
DMERR ("truecrypt: create_workqueue failed");
goto err;
}
bio_ctx_cache = kmem_cache_create ("truecrypt-bioctx", sizeof (struct bio_ctx), 0, 0, NULL, NULL);
if (!bio_ctx_cache)
{
DMERR ("truecrypt: kmem_cache_create failed");
goto err;
}
r = dm_register_target (&truecrypt_target);
if (r < 0)
{
DMERR ("truecrypt: register failed %d", r);
goto err;
}
return r;
err:
if (work_queue)
destroy_workqueue (work_queue);
if (bio_ctx_cache)
kmem_cache_destroy (bio_ctx_cache);
return -ENOMEM;
}
void __exit dm_truecrypt_exit(void)
{
int r;
trace (3, "dm_truecrypt_exit ()\n");
r = dm_unregister_target (&truecrypt_target);
if (r < 0)
DMERR ("truecrypt: unregister failed %d", r);
destroy_workqueue (work_queue);
kmem_cache_destroy (bio_ctx_cache);
}
module_init(dm_truecrypt_init);
module_exit(dm_truecrypt_exit);
module_param_named(trace, trace_level, int, 0);
MODULE_AUTHOR("TrueCrypt Foundation");
MODULE_DESCRIPTION(DM_NAME " target for encryption and decryption of TrueCrypt volumes");
MODULE_PARM_DESC(trace, "Trace level");
MODULE_LICENSE("GPL and additional rights"); // Kernel thinks only GPL/BSD/MPL != closed-source code
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -