📄 glue.c
字号:
/* devSect is already in CPU byte order -- no need to convert */ if(sizeof(TransferSector_t) == 8) { /* use only 56 bits of sector number */ buf[12] = devSect; buf[13] = (((u_int64_t)devSect >> 32) & 0xFFFFFF) | 0x80000000; } else { /* 32 bits of sector number + 24 zero bits */ buf[12] = devSect; buf[13] = 0x80000000; } /* 4024 bits == 31 * 128 bit plaintext blocks + 56 bits of sector number */ /* For version 3 on-disk format this really should be 4536 bits, but can't be */ /* changed without breaking compatibility. V3 uses MD5-with-wrong-length IV */ buf[14] = 4024; buf[15] = 0; md5_transform_CPUbyteorder(&ivout[0], &buf[0]);#endif}/* this function exists for compatibility with old external cipher modules */void loop_compute_md5_iv(TransferSector_t devSect, u_int32_t *ivout, u_int32_t *data){ ivout[0] = 0x67452301; ivout[1] = 0xefcdab89; ivout[2] = 0x98badcfe; ivout[3] = 0x10325476; loop_compute_md5_iv_v3(devSect, ivout, data);}/* Some external modules do not know if md5_transform_CPUbyteorder() *//* is asmlinkage or not, so here is C language wrapper for them. */void md5_transform_CPUbyteorder_C(u_int32_t *hash, u_int32_t const *in){ md5_transform_CPUbyteorder(hash, in);}int transfer_aes(struct loop_device *lo, int cmd, char *raw_buf, char *loop_buf, int size, TransferSector_t devSect){ aes_context *a; AESmultiKey *m; int x; unsigned y; u_int32_t iv[8]; if(!size || (size & 511)) { return -EINVAL; } m = (AESmultiKey *)lo->key_data; y = m->keyMask; if(cmd == READ) { while(size) {#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_lock(&m->rwlock);#endif a = m->keyPtr[((unsigned)devSect) & y]; if(y) { memcpy(&iv[0], raw_buf, 16); raw_buf += 16; loop_buf += 16; } else { loop_compute_sector_iv(devSect, &iv[0]); } x = 15; do { memcpy(&iv[4], raw_buf, 16); aes_decrypt(a, raw_buf, loop_buf); *((u_int32_t *)(&loop_buf[ 0])) ^= iv[0]; *((u_int32_t *)(&loop_buf[ 4])) ^= iv[1]; *((u_int32_t *)(&loop_buf[ 8])) ^= iv[2]; *((u_int32_t *)(&loop_buf[12])) ^= iv[3]; if(y && !x) { raw_buf -= 496; loop_buf -= 496; memcpy(&iv[4], &m->partialMD5[0], 16); loop_compute_md5_iv_v3(devSect, &iv[4], (u_int32_t *)(&loop_buf[16])); } else { raw_buf += 16; loop_buf += 16; memcpy(&iv[0], raw_buf, 16); } aes_decrypt(a, raw_buf, loop_buf); *((u_int32_t *)(&loop_buf[ 0])) ^= iv[4]; *((u_int32_t *)(&loop_buf[ 4])) ^= iv[5]; *((u_int32_t *)(&loop_buf[ 8])) ^= iv[6]; *((u_int32_t *)(&loop_buf[12])) ^= iv[7]; if(y && !x) { raw_buf += 512; loop_buf += 512; } else { raw_buf += 16; loop_buf += 16; } } while(--x >= 0);#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_unlock(&m->rwlock);#endif#if LINUX_VERSION_CODE >= 0x20600 cond_resched();#elif LINUX_VERSION_CODE >= 0x20400 if(current->need_resched) {set_current_state(TASK_RUNNING);schedule();}#elif LINUX_VERSION_CODE >= 0x20200 if(current->need_resched) {current->state=TASK_RUNNING;schedule();}#else if(need_resched) schedule();#endif size -= 512; devSect++; } } else { while(size) {#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_lock(&m->rwlock);#endif a = m->keyPtr[((unsigned)devSect) & y]; if(y) {#if LINUX_VERSION_CODE < 0x20400 /* on 2.2 and older kernels, real raw_buf may be doing */ /* writes at any time, so this needs to be stack buffer */ u_int32_t tmp_raw_buf[128]; char *TMP_RAW_BUF = (char *)(&tmp_raw_buf[0]);#else /* on 2.4 and later kernels, real raw_buf is not doing */ /* any writes now so it can be used as temp buffer */# define TMP_RAW_BUF raw_buf#endif memcpy(TMP_RAW_BUF, loop_buf, 512); memcpy(&iv[0], &m->partialMD5[0], 16); loop_compute_md5_iv_v3(devSect, &iv[0], (u_int32_t *)(&TMP_RAW_BUF[16])); x = 15; do { iv[0] ^= *((u_int32_t *)(&TMP_RAW_BUF[ 0])); iv[1] ^= *((u_int32_t *)(&TMP_RAW_BUF[ 4])); iv[2] ^= *((u_int32_t *)(&TMP_RAW_BUF[ 8])); iv[3] ^= *((u_int32_t *)(&TMP_RAW_BUF[12])); aes_encrypt(a, (unsigned char *)(&iv[0]), raw_buf); memcpy(&iv[0], raw_buf, 16); raw_buf += 16;#if LINUX_VERSION_CODE < 0x20400 TMP_RAW_BUF += 16;#endif iv[0] ^= *((u_int32_t *)(&TMP_RAW_BUF[ 0])); iv[1] ^= *((u_int32_t *)(&TMP_RAW_BUF[ 4])); iv[2] ^= *((u_int32_t *)(&TMP_RAW_BUF[ 8])); iv[3] ^= *((u_int32_t *)(&TMP_RAW_BUF[12])); aes_encrypt(a, (unsigned char *)(&iv[0]), raw_buf); memcpy(&iv[0], raw_buf, 16); raw_buf += 16;#if LINUX_VERSION_CODE < 0x20400 TMP_RAW_BUF += 16;#endif } while(--x >= 0); loop_buf += 512; } else { loop_compute_sector_iv(devSect, &iv[0]); x = 15; do { iv[0] ^= *((u_int32_t *)(&loop_buf[ 0])); iv[1] ^= *((u_int32_t *)(&loop_buf[ 4])); iv[2] ^= *((u_int32_t *)(&loop_buf[ 8])); iv[3] ^= *((u_int32_t *)(&loop_buf[12])); aes_encrypt(a, (unsigned char *)(&iv[0]), raw_buf); memcpy(&iv[0], raw_buf, 16); loop_buf += 16; raw_buf += 16; iv[0] ^= *((u_int32_t *)(&loop_buf[ 0])); iv[1] ^= *((u_int32_t *)(&loop_buf[ 4])); iv[2] ^= *((u_int32_t *)(&loop_buf[ 8])); iv[3] ^= *((u_int32_t *)(&loop_buf[12])); aes_encrypt(a, (unsigned char *)(&iv[0]), raw_buf); memcpy(&iv[0], raw_buf, 16); loop_buf += 16; raw_buf += 16; } while(--x >= 0); }#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_unlock(&m->rwlock);#endif#if LINUX_VERSION_CODE >= 0x20600 cond_resched();#elif LINUX_VERSION_CODE >= 0x20400 if(current->need_resched) {set_current_state(TASK_RUNNING);schedule();}#elif LINUX_VERSION_CODE >= 0x20200 if(current->need_resched) {current->state=TASK_RUNNING;schedule();}#else if(need_resched) schedule();#endif size -= 512; devSect++; } } return(0);}int keySetup_aes(struct loop_device *lo, LoopInfo_t *info){ AESmultiKey *m; union { u_int32_t w[8]; /* needed for 4 byte alignment for b[] */ unsigned char b[32]; } un; lo->key_data = m = allocMultiKey(); if(!m) return(-ENOMEM); memcpy(&un.b[0], &info->lo_encrypt_key[0], 32); aes_set_key(m->keyPtr[0], &un.b[0], info->lo_encrypt_key_size, 0); memset(&info->lo_encrypt_key[0], 0, sizeof(info->lo_encrypt_key)); memset(&un.b[0], 0, 32);#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64)) switch(info->lo_encrypt_key_size) { case 256: /* bits */ case 32: /* bytes */ /* 14 rounds, AES, software key gen, normal oper, encrypt, 256-bit key */ m->padlock_cw_e = 14 | (1<<7) | (2<<10); /* 14 rounds, AES, software key gen, normal oper, decrypt, 256-bit key */ m->padlock_cw_d = 14 | (1<<7) | (1<<9) | (2<<10); break; case 192: /* bits */ case 24: /* bytes */ /* 12 rounds, AES, software key gen, normal oper, encrypt, 192-bit key */ m->padlock_cw_e = 12 | (1<<7) | (1<<10); /* 12 rounds, AES, software key gen, normal oper, decrypt, 192-bit key */ m->padlock_cw_d = 12 | (1<<7) | (1<<9) | (1<<10); break; default: /* 10 rounds, AES, software key gen, normal oper, encrypt, 128-bit key */ m->padlock_cw_e = 10 | (1<<7); /* 10 rounds, AES, software key gen, normal oper, decrypt, 128-bit key */ m->padlock_cw_d = 10 | (1<<7) | (1<<9); break; }#endif#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB keyScrubTimerInit(lo);#endif return(0);}int keyClean_aes(struct loop_device *lo){ if(lo->key_data) { clearAndFreeMultiKey((AESmultiKey *)lo->key_data); lo->key_data = 0; } return(0);}#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64))#if LINUX_VERSION_CODE < 0x20400#error "this code does not support padlock crypto instructions on 2.2 or older kernels"#endifstatic __inline__ void padlock_flush_key_context(void){ __asm__ __volatile__("pushf; popf" : : : "cc");}static __inline__ void padlock_rep_xcryptcbc(void *cw, void *k, void *s, void *d, void *iv, unsigned long cnt){ __asm__ __volatile__(".byte 0xF3,0x0F,0xA7,0xD0" : "+a" (iv), "+c" (cnt), "+S" (s), "+D" (d) /*output*/ : "b" (k), "d" (cw) /*input*/ : "cc", "memory" /*modified*/ );}typedef struct { u_int32_t iv[4]; u_int32_t cw[4]; u_int32_t dummy1[4];} Padlock_IV_CW;static int transfer_padlock_aes(struct loop_device *lo, int cmd, char *raw_buf, char *loop_buf, int size, TransferSector_t devSect){ aes_context *a; AESmultiKey *m; unsigned y; Padlock_IV_CW ivcwua; Padlock_IV_CW *ivcw; /* ivcw->iv and ivcw->cw must have 16 byte alignment */ ivcw = (Padlock_IV_CW *)(((unsigned long)&ivcwua + 15) & ~((unsigned long)15)); if(!size || (size & 511) || (((unsigned long)raw_buf | (unsigned long)loop_buf) & 15)) { return -EINVAL; } m = (AESmultiKey *)lo->key_data; y = m->keyMask; if(cmd == READ) { while(size) { padlock_flush_key_context(); ivcw->cw[0] = m->padlock_cw_d; ivcw->cw[3] = ivcw->cw[2] = ivcw->cw[1] = 0;#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_lock(&m->rwlock);#endif a = m->keyPtr[((unsigned)devSect) & y]; if(y) { memcpy(&ivcw->iv[0], raw_buf, 16); padlock_rep_xcryptcbc(&ivcw->cw[0], &a->aes_d_key[0], raw_buf + 16, loop_buf + 16, &ivcw->iv[0], 31); memcpy(&ivcw->iv[0], &m->partialMD5[0], 16); loop_compute_md5_iv_v3(devSect, &ivcw->iv[0], (u_int32_t *)(&loop_buf[16])); padlock_rep_xcryptcbc(&ivcw->cw[0], &a->aes_d_key[0], raw_buf, loop_buf, &ivcw->iv[0], 1); } else { loop_compute_sector_iv(devSect, &ivcw->iv[0]); padlock_rep_xcryptcbc(&ivcw->cw[0], &a->aes_d_key[0], raw_buf, loop_buf, &ivcw->iv[0], 32); }#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_unlock(&m->rwlock);#endif#if LINUX_VERSION_CODE >= 0x20600 cond_resched();#else if(current->need_resched) {set_current_state(TASK_RUNNING);schedule();}#endif size -= 512; raw_buf += 512; loop_buf += 512; devSect++; } } else { while(size) { padlock_flush_key_context(); ivcw->cw[0] = m->padlock_cw_e; ivcw->cw[3] = ivcw->cw[2] = ivcw->cw[1] = 0;#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_lock(&m->rwlock);#endif a = m->keyPtr[((unsigned)devSect) & y]; if(y) { memcpy(raw_buf, loop_buf, 512); memcpy(&ivcw->iv[0], &m->partialMD5[0], 16); loop_compute_md5_iv_v3(devSect, &ivcw->iv[0], (u_int32_t *)(&raw_buf[16])); padlock_rep_xcryptcbc(&ivcw->cw[0], &a->aes_e_key[0], raw_buf, raw_buf, &ivcw->iv[0], 32); } else { loop_compute_sector_iv(devSect, &ivcw->iv[0]); padlock_rep_xcryptcbc(&ivcw->cw[0], &a->aes_e_key[0], loop_buf, raw_buf, &ivcw->iv[0], 32); }#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB read_unlock(&m->rwlock);#endif#if LINUX_VERSION_CODE >= 0x20600 cond_resched();#else if(current->need_resched) {set_current_state(TASK_RUNNING);schedule();}#endif size -= 512; raw_buf += 512; loop_buf += 512; devSect++; } } return(0);}#endifint handleIoctl_aes(struct loop_device *lo, int cmd, unsigned long arg){ int err; switch (cmd) { case LOOP_MULTI_KEY_SETUP: err = multiKeySetup(lo, (unsigned char *)arg, 0); break; case LOOP_MULTI_KEY_SETUP_V3: err = multiKeySetup(lo, (unsigned char *)arg, 1); break; default: err = -EINVAL; } return err;}#if LINUX_VERSION_CODE >= 0x20200static struct loop_func_table funcs_aes = { number: 16, /* 16 == AES */ transfer: (void *) transfer_aes, init: (void *) keySetup_aes, release: keyClean_aes, ioctl: (void *) handleIoctl_aes};#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64))static struct loop_func_table funcs_padlock_aes = { number: 16, /* 16 == AES */ transfer: (void *) transfer_padlock_aes, init: (void *) keySetup_aes, release: keyClean_aes, ioctl: (void *) handleIoctl_aes};#endifint init_module_aes(void){#if defined(CONFIG_BLK_DEV_LOOP_PADLOCK) && (defined(CONFIG_X86) || defined(CONFIG_X86_64)) if((boot_cpu_data.x86 >= 6) && (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) && (cpuid_eax(0xC0000000) >= 0xC0000001) && ((cpuid_edx(0xC0000001) & 0xC0) == 0xC0)) { if(loop_register_transfer(&funcs_padlock_aes)) { printk("loop: unable to register padlock AES transfer\n"); return -EIO; } printk("loop: padlock hardware AES enabled\n"); } else#endif if(loop_register_transfer(&funcs_aes)) { printk("loop: unable to register AES transfer\n"); return -EIO; }#ifdef CONFIG_BLK_DEV_LOOP_KEYSCRUB printk("loop: AES key scrubbing enabled\n");#endif return 0;}void cleanup_module_aes(void){ if(loop_unregister_transfer(funcs_aes.number)) { printk("loop: unable to unregister AES transfer\n"); }}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -