⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 glue.c

📁 Fast and transparent file system and swap encryption package for linux. No source code changes to li
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  glue.c * *  Written by Jari Ruusu, December 14 2006 * *  Copyright 2001-2006 by Jari Ruusu. *  Redistribution of this file is permitted under the GNU Public License. */#include <linux/version.h>#include <linux/sched.h>#include <linux/fs.h>#include <linux/string.h>#include <linux/types.h>#include <linux/errno.h>#if LINUX_VERSION_CODE >= 0x20600# include <linux/bio.h># include <linux/blkdev.h>#endif#if LINUX_VERSION_CODE >= 0x20200# include <linux/slab.h># include <linux/loop.h># include <asm/uaccess.h>#else# include <linux/malloc.h># include <asm/segment.h># include "patched-loop.h"#endif#if LINUX_VERSION_CODE >= 0x20400# include <linux/spinlock.h>#endif#include <asm/byteorder.h>#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64))# include <asm/processor.h>#endif#include "aes.h"#include "md5.h"#if LINUX_VERSION_CODE >= 0x20600typedef sector_t TransferSector_t;# define LoopInfo_t struct loop_info64#elsetypedef int TransferSector_t;# define LoopInfo_t struct loop_info#endif#if !defined(cpu_to_le32)# if defined(__BIG_ENDIAN)#  define cpu_to_le32(x) ({u_int32_t __x=(x);((u_int32_t)((((u_int32_t)(__x)&(u_int32_t)0x000000ffUL)<<24)|(((u_int32_t)(__x)&(u_int32_t)0x0000ff00UL)<<8)|(((u_int32_t)(__x)&(u_int32_t)0x00ff0000UL)>>8)|(((u_int32_t)(__x)&(u_int32_t)0xff000000UL)>>24)));})# else#  define cpu_to_le32(x) ((u_int32_t)(x))# endif#endif#if LINUX_VERSION_CODE < 0x20200# define copy_from_user(t,f,s) (verify_area(VERIFY_READ,f,s)?(s):(memcpy_fromfs(t,f,s),0))#endif#if !defined(LOOP_MULTI_KEY_SETUP)# define LOOP_MULTI_KEY_SETUP 0x4C4D#endif#if !defined(LOOP_MULTI_KEY_SETUP_V3)# define LOOP_MULTI_KEY_SETUP_V3 0x4C4E#endif#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB# define KEY_ALLOC_COUNT  128#else# define KEY_ALLOC_COUNT  64#endiftypedef struct {    aes_context *keyPtr[KEY_ALLOC_COUNT];    unsigned    keyMask;#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB    u_int32_t   *partialMD5;    u_int32_t   partialMD5buf[8];    rwlock_t    rwlock;    unsigned    reversed;    unsigned    blocked;    struct timer_list timer;#else    u_int32_t   partialMD5[4];#endif#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64))    u_int32_t   padlock_cw_e;    u_int32_t   padlock_cw_d;#endif} AESmultiKey;#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64))/* This function allocates AES context structures at special address such *//* that returned address % 16 == 8 . That way expanded encryption and *//* decryption keys in AES context structure are always 16 byte aligned */static void *specialAligned_kmalloc(size_t size, unsigned int flags){    void *pn, **ps;    pn = kmalloc(size + (16 + 8), flags);    if(!pn) return (void *)0;    ps = (void **)((((unsigned long)pn + 15) & ~((unsigned long)15)) + 8);    *(ps - 1) = pn;    return (void *)ps;}static void specialAligned_kfree(void *ps){    if(ps) kfree(*((void **)ps - 1));}# define specialAligned_ctxSize     ((sizeof(aes_context) + 15) & ~15)#else# define specialAligned_kmalloc     kmalloc# define specialAligned_kfree       kfree# define specialAligned_ctxSize     sizeof(aes_context)#endif#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUBstatic void keyScrubWork(AESmultiKey *m){    aes_context *a0, *a1;    u_int32_t *p;    int x, y, z;    z = m->keyMask + 1;    for(x = 0; x < z; x++) {        a0 = m->keyPtr[x];        a1 = m->keyPtr[x + z];        memcpy(a1, a0, sizeof(aes_context));        m->keyPtr[x] = a1;        m->keyPtr[x + z] = a0;        p = (u_int32_t *) a0;        y = sizeof(aes_context) / sizeof(u_int32_t);        while(y > 0) {            *p ^= 0xFFFFFFFF;            p++;            y--;        }    }    x = m->reversed;    /* x is 0 or 4 */    m->reversed ^= 4;    y = m->reversed;    /* y is 4 or 0 */    p = &m->partialMD5buf[x];    memcpy(&m->partialMD5buf[y], p, 16);    m->partialMD5 = &m->partialMD5buf[y];    p[0] ^= 0xFFFFFFFF;    p[1] ^= 0xFFFFFFFF;    p[2] ^= 0xFFFFFFFF;    p[3] ^= 0xFFFFFFFF;    /* try to flush dirty cache data to RAM */#if !defined(CONFIG_XEN) && (defined(CONFIG_X86_64) || (defined(CONFIG_X86) && !defined(CONFIG_M386) && !defined(CONFIG_CPU_386)))    __asm__ __volatile__ ("wbinvd": : :"memory");#else    mb();#endif}/* called only from loop thread process context */static void keyScrubThreadFn(AESmultiKey *m){    write_lock(&m->rwlock);    if(!m->blocked) keyScrubWork(m);    write_unlock(&m->rwlock);}#if defined(NEW_TIMER_VOID_PTR_PARAM)# define KeyScrubTimerFnParamType void *#else# define KeyScrubTimerFnParamType unsigned long#endifstatic void keyScrubTimerFn(KeyScrubTimerFnParamType);static void keyScrubTimerInit(struct loop_device *lo){    AESmultiKey     *m;    unsigned long   expire;    m = (AESmultiKey *)lo->key_data;    expire = jiffies + HZ;    init_timer(&m->timer);    m->timer.expires = expire;    m->timer.data = (KeyScrubTimerFnParamType)lo;    m->timer.function = keyScrubTimerFn;    add_timer(&m->timer);}/* called only from timer handler context */static void keyScrubTimerFn(KeyScrubTimerFnParamType d){    struct loop_device *lo = (struct loop_device *)d;    extern void loop_add_keyscrub_fn(struct loop_device *, void (*)(void *), void *);    /* rw lock needs process context, so make loop thread do scrubbing */    loop_add_keyscrub_fn(lo, (void (*)(void*))keyScrubThreadFn, lo->key_data);    /* start timer again */    keyScrubTimerInit(lo);}#endifstatic AESmultiKey *allocMultiKey(void){    AESmultiKey *m;    aes_context *a;    int x = 0, n;    m = (AESmultiKey *) kmalloc(sizeof(AESmultiKey), GFP_KERNEL);    if(!m) return 0;    memset(m, 0, sizeof(AESmultiKey));#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB    m->partialMD5 = &m->partialMD5buf[0];    rwlock_init(&m->rwlock);    init_timer(&m->timer);    again:#endif    n = PAGE_SIZE / specialAligned_ctxSize;    if(!n) n = 1;    a = (aes_context *) specialAligned_kmalloc(specialAligned_ctxSize * n, GFP_KERNEL);    if(!a) {#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB        if(x) specialAligned_kfree(m->keyPtr[0]);#endif        kfree(m);        return 0;    }    while((x < KEY_ALLOC_COUNT) && n) {        m->keyPtr[x] = a;        a = (aes_context *)((unsigned char *)a + specialAligned_ctxSize);        x++;        n--;    }#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB    if(x < 2) goto again;#endif    return m;}static void clearAndFreeMultiKey(AESmultiKey *m){    aes_context *a;    int x, n;#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB    /* stop scrub timer. loop thread was killed earlier */    del_timer_sync(&m->timer);    /* make sure allocated keys are in original order */    if(m->reversed) keyScrubWork(m);#endif    n = PAGE_SIZE / specialAligned_ctxSize;    if(!n) n = 1;    x = 0;    while(x < KEY_ALLOC_COUNT) {        a = m->keyPtr[x];        if(!a) break;        memset(a, 0, specialAligned_ctxSize * n);        specialAligned_kfree(a);        x += n;    }    memset(m, 0, sizeof(AESmultiKey));    kfree(m);}static int multiKeySetup(struct loop_device *lo, unsigned char *k, int version3){    AESmultiKey *m;    aes_context *a;    int x, y, n, err = 0;    union {        u_int32_t     w[16];        unsigned char b[64];    } un;#if LINUX_VERSION_CODE >= 0x20200    if(lo->lo_key_owner != current->uid && !capable(CAP_SYS_ADMIN))        return -EPERM;#endif    m = (AESmultiKey *)lo->key_data;    if(!m) return -ENXIO;#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB    /* temporarily prevent loop thread from messing with keys */    write_lock(&m->rwlock);    m->blocked = 1;    /* make sure allocated keys are in original order */    if(m->reversed) keyScrubWork(m);    write_unlock(&m->rwlock);#endif    n = PAGE_SIZE / specialAligned_ctxSize;    if(!n) n = 1;    x = 0;    while(x < KEY_ALLOC_COUNT) {        if(!m->keyPtr[x]) {            a = (aes_context *) specialAligned_kmalloc(specialAligned_ctxSize * n, GFP_KERNEL);            if(!a) {                err = -ENOMEM;                goto error_out;            }            y = x;            while((y < (x + n)) && (y < KEY_ALLOC_COUNT)) {                m->keyPtr[y] = a;                a = (aes_context *)((unsigned char *)a + specialAligned_ctxSize);                y++;            }        }#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB        if(x >= 64) {            x++;            continue;        }#endif        if(copy_from_user(&un.b[0], k, 32)) {            err = -EFAULT;            goto error_out;        }        aes_set_key(m->keyPtr[x], &un.b[0], lo->lo_encrypt_key_size, 0);        k += 32;        x++;    }    m->partialMD5[0] = 0x67452301;    m->partialMD5[1] = 0xefcdab89;    m->partialMD5[2] = 0x98badcfe;    m->partialMD5[3] = 0x10325476;    if(version3) {        /* only first 128 bits of iv-key is used */        if(copy_from_user(&un.b[0], k, 16)) {            err = -EFAULT;            goto error_out;        }#if defined(__BIG_ENDIAN)        un.w[0] = cpu_to_le32(un.w[0]);        un.w[1] = cpu_to_le32(un.w[1]);        un.w[2] = cpu_to_le32(un.w[2]);        un.w[3] = cpu_to_le32(un.w[3]);#endif        memset(&un.b[16], 0, 48);        md5_transform_CPUbyteorder(&m->partialMD5[0], &un.w[0]);        lo->lo_flags |= 0x080000;  /* multi-key-v3 (info exported to user space) */    }    m->keyMask = 0x3F;          /* range 0...63 */    lo->lo_flags |= 0x100000;   /* multi-key (info exported to user space) */    memset(&un.b[0], 0, 32);error_out:#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB    /* re-enable loop thread key scrubbing */    write_lock(&m->rwlock);    m->blocked = 0;    write_unlock(&m->rwlock);#endif    return err;}void loop_compute_sector_iv(TransferSector_t devSect, u_int32_t *ivout){    if(sizeof(TransferSector_t) == 8) {        ivout[0] = cpu_to_le32(devSect);        ivout[1] = cpu_to_le32((u_int64_t)devSect>>32);        ivout[3] = ivout[2] = 0;    } else {        ivout[0] = cpu_to_le32(devSect);        ivout[3] = ivout[2] = ivout[1] = 0;    }}void loop_compute_md5_iv_v3(TransferSector_t devSect, u_int32_t *ivout, u_int32_t *data){    int         x;#if defined(__BIG_ENDIAN)    int         y, e;#endif    u_int32_t   buf[16];#if defined(__BIG_ENDIAN)    y = 7;    e = 16;    do {        if (!y) {            e = 12;            /* md5_transform_CPUbyteorder wants data in CPU byte order */            /* devSect is already in CPU byte order -- no need to convert */            if(sizeof(TransferSector_t) == 8) {                /* use only 56 bits of sector number */                buf[12] = devSect;                buf[13] = (((u_int64_t)devSect >> 32) & 0xFFFFFF) | 0x80000000;            } else {                /* 32 bits of sector number + 24 zero bits */                buf[12] = devSect;                buf[13] = 0x80000000;            }            /* 4024 bits == 31 * 128 bit plaintext blocks + 56 bits of sector number */            /* For version 3 on-disk format this really should be 4536 bits, but can't be */            /* changed without breaking compatibility. V3 uses MD5-with-wrong-length IV */            buf[14] = 4024;            buf[15] = 0;        }        x = 0;        do {            buf[x    ] = cpu_to_le32(data[0]);            buf[x + 1] = cpu_to_le32(data[1]);            buf[x + 2] = cpu_to_le32(data[2]);            buf[x + 3] = cpu_to_le32(data[3]);            x += 4;            data += 4;        } while (x < e);        md5_transform_CPUbyteorder(&ivout[0], &buf[0]);    } while (--y >= 0);    ivout[0] = cpu_to_le32(ivout[0]);    ivout[1] = cpu_to_le32(ivout[1]);    ivout[2] = cpu_to_le32(ivout[2]);    ivout[3] = cpu_to_le32(ivout[3]);#else    x = 6;    do {        md5_transform_CPUbyteorder(&ivout[0], data);        data += 16;    } while (--x >= 0);    memcpy(buf, data, 48);    /* md5_transform_CPUbyteorder wants data in CPU byte order */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -