📄 bc_dev26.c
字号:
/******************************************************************* * Copyright (c) 2003 Jetico, Inc., Finland * All rights reserved. * * File: driver/bc_dev26.c * * Description: BestCrypt pseudo-device driver. * * Scope: BestCrypt pseudo-device driver * * Platforms: Linux * * Ideas: Igor M. Arsenin * Serge A. Frolov * Vitaliy G. Zolotarev * * Author: Nail R. Kaipov * * Created: xx-Aug-2003 * * Revision: 20-Dec-2003. updated to comply 2.6.0 * *******************************************************************///#define KBUILD_MODNAME bc#include "bc_cfg.h"#include <linux/init.h>#include <linux/module.h>#include <linux/moduleparam.h> /* module_param() */#include <linux/vermagic.h>#include <linux/compiler.h>#include <linux/kernel.h>#include <asm/uaccess.h> /* copy_from/to_user() */#include <asm/hardirq.h>#include <linux/vmalloc.h> /* vmalloc()/vmfree() */#include <linux/fs.h>#include <linux/file.h> /* fget() */#include <linux/proc_fs.h> /* proc_fs support */#include <linux/devfs_fs_kernel.h> /* devfs_fs support */#include <linux/genhd.h> /* gendisk interface */#include <linux/bio.h> /* struct bio */#include <linux/blkdev.h> /* rerquest_queue_t */#include <linux/hdreg.h> /* HDIO_GETGEO, hd_geometry */#include "bc_types.h"#include "bc_ioctl.h"#include "bc_mgr.h"#include "bc_dev.h"#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)#include <linux/smp_lock.h> /* Shawn Leard added, resolve */ /* Unknown symbol(s) in SuSE linux 9.2 */#endif/* module parameters & friends */static int bc_devices = DEFAULT_BC_DEVICES;static int bc_partitions = DEFAULT_BC_PARTITIONS;static int bc_minor_shift = 0;static struct bc_device *bc_dev = NULL; /* bc devices' array *//* pid control table support */#define BC_PID_SIZE 64static pid_t *bc_pid_table;static int bc_pid_size;static int bc_pid_next;static struct semaphore bc_pid_sema;static struct timer_list bc_pid_timer;static struct proc_dir_entry *proc_bcrypt;static inline int bc_find_pid(pid_t pid){ register int i; for (i = 0; i < bc_pid_next; i++) if (pid == bc_pid_table[i]) return i; return -1;}static int bc_find_pid_safe(pid_t pid){ int i; down(&bc_pid_sema); i = bc_find_pid(pid); up(&bc_pid_sema); return i;}static int bc_add_pid(pid_t pid){ pid_t *tmp; down(&bc_pid_sema); if (bc_find_pid(pid) > -1) { up(&bc_pid_sema); return 0; } if (bc_pid_next < bc_pid_size) { bc_pid_table[bc_pid_next] = pid; bc_pid_next++; } else { tmp = kmalloc((bc_pid_size+BC_PID_SIZE) * sizeof(pid_t), GFP_KERNEL); if (NULL == tmp) { up(&bc_pid_sema); return -ENOMEM; } memcpy(bc_pid_table, tmp, bc_pid_size * sizeof(pid_t)); kfree(bc_pid_table); bc_pid_table = tmp; bc_pid_size += BC_PID_SIZE; bc_pid_table[bc_pid_next] = pid; bc_pid_next++; } if (1 == bc_pid_next) { bc_pid_timer.expires = jiffies + 5 * HZ; add_timer(&bc_pid_timer); } up(&bc_pid_sema); return 0;}static int bc_del_pid(pid_t pid){ int i; pid_t *tmp; down(&bc_pid_sema); i = bc_find_pid(pid); if (0 > i) { up(&bc_pid_sema); return 0; } bc_pid_next--; if (bc_pid_next) bc_pid_table[i] = bc_pid_table[bc_pid_next]; bc_pid_table[bc_pid_next] = 0; if (bc_pid_size-bc_pid_next > 2*BC_PID_SIZE) { tmp = kmalloc((bc_pid_size-BC_PID_SIZE) * sizeof(pid_t), GFP_KERNEL); if (NULL == tmp) { up(&bc_pid_sema); return 0; } bc_pid_size -= BC_PID_SIZE; memcpy(bc_pid_table, tmp, bc_pid_size * sizeof(pid_t)); kfree(bc_pid_table); bc_pid_table = tmp; } up(&bc_pid_sema); return 0;}static void bc_pid_timer_proc(unsigned long unused){ register int i;#if defined(in_atomic) if (in_atomic() || irqs_disabled()) {} else#endif down(&bc_pid_sema); #ifdef CONFIG_SMP write_lock_irq(&tasklist_lock);#endif for (i = 0; i < bc_pid_next; i++) { if (NULL == find_task_by_pid(bc_pid_table[i])) { bc_pid_next--; if (bc_pid_next) bc_pid_table[i] = bc_pid_table[bc_pid_next]; bc_pid_table[bc_pid_next] = 0; i--; } }#ifdef CONFIG_SMP write_unlock_irq(&tasklist_lock);#endif if (bc_pid_next) { bc_pid_timer.expires = jiffies + 5 * HZ; add_timer(&bc_pid_timer); }#if defined(in_atomic) if (in_atomic() || irqs_disabled()) {} else#endif up(&bc_pid_sema);}static inline void reset_dev(struct bc_device *bc){ // do not touch bc_control, bc_refcnt, bc_number here if (NULL == bc) return; bc->bc_offset = 0L; bc->bc_start_sector = 0L; bc->bc_num_sectors = 0L; bc->bc_dev = 0; //to_kdev_t(0); bc->bc_gfpmask = 0; bc->bc_biohead = NULL; bc->bc_biotail = NULL; bc->bc_process_bio = NULL; bc->bc_dentry = 0; bc->bc_file = 0; memset(&bc->bc_flags, 0, sizeof(bc->bc_flags)); atomic_set(&bc->bc_pending, 0); spin_lock_init(&bc->bc_lock); init_MUTEX_LOCKED(&bc->bc_thread); init_MUTEX_LOCKED(&bc->bc_run); if (NULL != bc->bc_buffer) vfree(bc->bc_buffer); bc->bc_buffer = NULL; }/*- strategy routine stuff -*/static int process_bio_fops(struct bc_device *bc, struct bio *bio){ int i, rw; struct bio_vec *bvec; char *buf, *IV; loff_t pos, sec; u16 iv[4]; u64 iv64, iv64le; struct file *file; size_t len, ret, total; //, size; mm_segment_t fs; if (READA == bio_rw(bio)) { bio_endio(bio, bio->bi_size, -EWOULDBLOCK); return 0; } rw = bio_data_dir(bio); file = bc->bc_file; sec = (loff_t)bio->bi_sector + bc->bc_start_sector; pos = sec * 512 + bc->bc_offset; total = 0; __bio_for_each_segment(bvec, bio, i, 0) { if (bc->bc_flags.iv_64bit) { iv64 = sec + (total >> 9) + 1; // iv numbering begins from 1 iv64le = __cpu_to_le64(iv64); IV = (char *)&iv64le; } else { iv[0] = iv[1] = iv[2] = iv[3] = (sec + (total >> 9)) & 0xFFFF; IV = (char *)iv; } len = bvec->bv_len; kmap(bvec->bv_page); total += len; buf = page_address(bvec->bv_page)+bvec->bv_offset; fs = get_fs(); if (READ == rw) { set_fs(KERNEL_DS); ret = file->f_op->read(file, buf, len, &pos); set_fs(fs); bc->bc_alg->decrypt(bc->bc_key, IV, buf, buf, len); } else { bc->bc_alg->encrypt(bc->bc_key, IV, buf, bc->bc_buffer, len); set_fs(KERNEL_DS); ret = file->f_op->write(file, bc->bc_buffer, len, &pos); set_fs(fs); } kunmap(bvec->bv_page); if (ret != len) { printk(KERN_ERR "bc: %s error wanted %ld, got %ld\n", READ == rw ? "read" : "write", (unsigned long)bvec->bv_len, (unsigned long)ret); goto error_out; } } bio_endio(bio, bio->bi_size, 0); return 0;error_out: bio_endio(bio, bio->bi_size, -EIO); return 0;}static int bc_thread(void *data){ struct bc_device *bc = (struct bc_device *)data; struct bio *bio; daemonize("bcrypt%d", bc->bc_number); set_user_nice(current, -20l); set_user_nice(current, -10l);#if defined(PF_IOTHREAD) current->flags |= PF_IOTHREAD;#endif#if defined(PF_NOFREEZE) current->flags |= PF_NOFREEZE;#endif#if defined(PF_LESS_THROTTLE) current->flags |= PF_LESS_THROTTLE;#endif spin_lock_irq(&bc->bc_lock); bc->bc_flags.active = 1; atomic_inc(&bc->bc_pending); spin_unlock_irq(&bc->bc_lock); up(&bc->bc_thread); while (1) { down_interruptible(&bc->bc_run); if (0 == atomic_read(&bc->bc_pending)) break; // fetch bio spin_lock_irq(&bc->bc_lock); bio = bc->bc_biohead; if (NULL == bio) { spin_unlock_irq(&bc->bc_lock); continue; } if (bc->bc_biotail == bio) bc->bc_biotail = NULL; bc->bc_biohead = bio->bi_next; bio->bi_next = NULL; spin_unlock_irq(&bc->bc_lock); // process bio bc->bc_activity = 1; if (NULL == bc->bc_process_bio) { bio_endio(bio, bio->bi_size, -EIO); } else { bc->bc_process_bio(bc, bio); } if (atomic_dec_and_test(&bc->bc_pending)) break; } up(&bc->bc_thread); return 0;}static int bc_make_request(request_queue_t *q, struct bio *bio){ struct bc_device *bc; int err; bc = q->queuedata; if (!bc || !bc->bc_dentry || !bc->bc_dentry->d_inode) { printk(KERN_ERR "bc: invalid device.\n"); goto error_out; }/* if (!bd->bd_flags.configured && !bc->bc_flags.busy) { printk(KERN_ERR "bc: device not configured yet.\n"); goto error_out; }*/ err = -EIO; switch (bio_rw(bio)) { case WRITE: if (bc->bc_flags.readonly) { printk(KERN_ERR "bc: attempt to write on readonly device.\n"); goto error_out; } case READ: break; case READA: err = -EWOULDBLOCK; goto error_out; default: printk(KERN_ERR "bc: unknown command (%d).\n", bio_rw(bio)); goto error_out; } spin_lock_irq(&bc->bc_lock); if (!bc->bc_flags.active) { spin_unlock_irq(&bc->bc_lock); goto error_out; } atomic_inc(&bc->bc_pending); if (bc->bc_biotail) bc->bc_biotail->bi_next = bio; else bc->bc_biohead = bio; bc->bc_biotail = bio; spin_unlock_irq(&bc->bc_lock); up(&bc->bc_run); return 0;error_out: bio_endio(bio, bio->bi_size, err); return 0;}/*----------------------------------------------------------------------*/static int bc_get_info(struct bc_device *bc, struct bc_info *arg){ struct bc_info query; long busy = 0; BC_GET_ARG(arg, query) put_user(BC_VERSION_MAJOR, (long *)&arg->ver_major); put_user(BC_VERSION_MINOR, (long *)&arg->ver_minor); busy = bc->bc_flags.configured || bc->bc_refcnt > 1 ? 1 : 0; put_user(busy, (long *)&arg->busy); return 0;}static int install_process_bio(struct bc_device *bc, struct file *file, struct block_device *bdev, int ro){ int error; struct inode *inode; error = -EINVAL; inode = file->f_dentry->d_inode; if (!inode) { printk(KERN_ERR "bc_set_fd: NULL inode.\n"); goto error_out; } if (!file->f_op) { printk(KERN_ERR "bc_set_fd: NULL file_operations.\n"); goto error_out; } if (!file->f_op->read) { printk(KERN_ERR "bc_set_fd: Can't perform reads.\n"); goto error_out; } error = -EACCES; if (S_ISBLK(inode->i_mode)) {// error = blkdev_open(inode, file);// if (error)// goto error_out; bc->bc_dev = inode->i_rdev; ro = ro || bdev_read_only(inode->i_bdev); } else if (S_ISREG(inode->i_mode)) {// bc->bc_dev = inode->i_dev; bc->bc_dev = inode->i_rdev; // !!! error !!! } else goto error_out; bc->bc_flags.readonly = ro || IS_RDONLY(inode) || (NULL == file->f_op->write); if (!bc->bc_flags.readonly) { error = get_write_access(inode); if (error) { printk (KERN_ERR "bc_set_fd: Can't get write access.\n"); goto error_out; }// invalidate_inode_pages (inode); } set_device_ro(bdev, !!bc->bc_flags.readonly); bc->bc_process_bio = process_bio_fops; return 0; error_out: return error; }#if defined(QUEUE_FLAG_PLUGGED)// never called actually.static void bc_unplug_fn(request_queue_t *q) { struct bc_device *bc = q->queuedata; clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); blk_run_address_space(bc->bc_file->f_mapping);}#endif static int scan_device(struct bc_device *bc, struct block_device *bdev){ loff_t bytes; sector_t sectors; if (NULL == bc->bc_dentry) return -ENXIO;/* from blkrrpart */// if (bc->bc_refcnt > 1)// return -EBUSY;// bc->bc_flags.busy = 1; bytes = i_size_read(bc->bc_dentry->d_inode->i_mapping->host); bytes -= bc->bc_offset; sectors = (sector_t)(bytes >> 9); if ((loff_t)sectors != (bytes >> 9)) return -EFBIG; bdev->bd_inode->i_size = bytes; /* major headache if forgotten */ set_capacity(bc->bc_gendisk, sectors); return 0;}static int bc_set_fd(struct bc_device *bc, struct block_device *bdev, struct bc_file64 *arg)/* key must be ready */{ struct file *file = NULL; struct bc_algorithm *bc_alg = NULL; struct bc_file64 query; int error; if (bc_find_pid_safe(current->pid) < 0) return -EPERM; try_module_get(THIS_MODULE); BC_GET_ARG(arg, query) error = -EBUSY; if (bc->bc_dentry) { goto error_out; } bc_alg = get_bc_algo(query.alg_id, NULL); if (NULL == bc_alg || bc_alg->test_key(query.key_handle) != 0) { error = -EINVAL; goto error_out; } if (!(file = fget(query.fd))) { error = -EBADF; goto error_out; } reset_dev(bc); bc->bc_buffer = vmalloc(BC_BUFFER_SIZE); if (NULL == bc->bc_buffer) goto error_out; error = install_process_bio(bc, file, bdev, query.flags & BC_FLAGS_READONLY); if (0 != error) goto error_out; bc->bc_key = query.key_handle; bc->bc_alg = bc_alg; bc->bc_file = file; bc->bc_dentry = dget(file->f_dentry); bc->bc_offset = (loff_t)query.offset; bc->bc_start_sector = (loff_t)query.start_sector; bc->bc_num_sectors = (loff_t)query.num_sectors; error = scan_device(bc, bdev); if (0 != error) { dput(file->f_dentry); goto error_out; } bc_alg->lock_key(query.key_handle, 1); bc->bc_gfpmask = mapping_gfp_mask(bc->bc_dentry->d_inode->i_mapping); mapping_set_gfp_mask(bc->bc_dentry->d_inode->i_mapping, bc->bc_gfpmask & ~(__GFP_IO | __GFP_FS)); bc->bc_flags.iv_64bit = query.flags & BC_FLAGS_IV_64BIT || 0; bc->bc_flags.multpart = query.flags & BC_FLAGS_MULTPART || 0; bc->bc_flags.configured = 1; blk_queue_make_request(&bc->bc_queue, bc_make_request); bc->bc_queue.queuedata = bc;// bc->bc_queue.backing_dev_info.memory_backed = 1;#if defined(QUEUE_FLAG_PLUGGED) bc->bc_queue.unplug_fn = bc_unplug_fn;#endif kernel_thread(bc_thread, bc, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); down(&bc->bc_thread); return 0;error_out: if (file) fput(file); module_put(THIS_MODULE); return error;}static int bc_clr_fd(struct bc_device *bc, struct block_device *bdev, struct inode *dev_inode){ int n;//, minor; size_t ret; loff_t pos; unsigned char magic; mm_segment_t fs; if (bc_find_pid_safe(current->pid) < 0) return -EPERM;// if (!is_parent(bd))// return -EPERM; if (NULL == bc->bc_dentry) return -ENXIO; if (NULL == bc->bc_file) return -ENXIO; if (bc->bc_refcnt > 1) return -EBUSY; fsync_bdev(bdev); spin_lock_irq(&bc->bc_lock); if (atomic_dec_and_test(&bc->bc_pending)) up(&bc->bc_run); bc->bc_flags.active = 0; spin_unlock_irq(&bc->bc_lock); down(&bc->bc_thread);// waitpid(-1, NULL, __WCLONE|WNOHANG); mapping_set_gfp_mask(bc->bc_dentry->d_inode->i_mapping, bc->bc_gfpmask); n = 1 << bc_minor_shift;/* for (i = 0; i < n; i++) { minor = bc->bc_number*n+i; sync_dev(MKDEV(MAJOR_NR, minor)); invalidate_buffers(MKDEV(MAJOR_NR, minor)); while (i) { hdevfs = devfs_find_handle(bc->bc_hdevfs, NULL, MAJOR_NR, minor, DEVFS_SPECIAL_BLK, 0); if (NULL == hdevfs) { bc_hd[minor].de = 0; break; } if (hdevfs != bc->bc_hdevfs) devfs_unregister(hdevfs); } bc_sizes[minor] = 0; }*//* if (bc->bc_save_fops) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -