📄 vm86_32.c
字号:
return flags | (VEFLAGS & current->thread.v86mask);}static inline int is_revectored(int nr, struct revectored_struct * bitmap){ __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" :"=r" (nr) :"m" (*bitmap),"r" (nr)); return nr;}#define val_byte(val, n) (((__u8 *)&val)[n])#define pushb(base, ptr, val, err_label) \ do { \ __u8 __val = val; \ ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ } while(0)#define pushw(base, ptr, val, err_label) \ do { \ __u16 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while(0)#define pushl(base, ptr, val, err_label) \ do { \ __u32 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 3), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 2), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while(0)#define popb(base, ptr, err_label) \ ({ \ __u8 __res; \ if (get_user(__res, base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ })#define popw(base, ptr, err_label) \ ({ \ __u16 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ })#define popl(base, ptr, err_label) \ ({ \ __u32 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 2), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 3), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ })/* There are so many possible reasons for this function to return * VM86_INTx, so adding another doesn't bother me. We can expect * userspace programs to be able to handle it. (Getting a problem * in userspace is always better than an Oops anyway.) [KD] */static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char __user * ssp, unsigned short sp){ unsigned long __user *intr_ptr; unsigned long segoffs; if (regs->pt.xcs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &KVM86->int_revectored)) goto cannot_handle; if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; pushw(ssp, sp, get_vflags(regs), cannot_handle); pushw(ssp, sp, regs->pt.xcs, cannot_handle); pushw(ssp, sp, IP(regs), cannot_handle); regs->pt.xcs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; clear_TF(regs); clear_IF(regs); clear_AC(regs); return;cannot_handle: return_to_32bit(regs, VM86_INTx + (i << 8));}int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno){ if (VMPI.is_vm86pus) { if ( (trapno==3) || (trapno==1) ) return_to_32bit(regs, VM86_TRAP + (trapno << 8)); do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); return 0; } if (trapno !=1) return 1; /* we let this handle by the calling routine */ if (current->ptrace & PT_PTRACED) { unsigned long flags; spin_lock_irqsave(¤t->sighand->siglock, flags); sigdelset(¤t->blocked, SIGTRAP); recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); } send_sig(SIGTRAP, current, 1); current->thread.trap_no = trapno; current->thread.error_code = error_code; return 0;}void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code){ unsigned char opcode; unsigned char __user *csp; unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done;#define CHECK_IF_IN_TRAP \ if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ newflags |= TF_MASK#define VM86_FAULT_RETURN do { \ if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ return_to_32bit(regs, VM86_PICRETURN); \ if (orig_flags & TF_MASK) \ handle_vm86_trap(regs, 0, 1); \ return; } while (0) orig_flags = *(unsigned short *)®s->pt.eflags; csp = (unsigned char __user *) (regs->pt.xcs << 4); ssp = (unsigned char __user *) (regs->pt.xss << 4); sp = SP(regs); ip = IP(regs); data32 = 0; pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { case 0x66: /* 32-bit data */ data32=1; break; case 0x67: /* 32-bit address */ break; case 0x2e: /* CS */ break; case 0x3e: /* DS */ break; case 0x26: /* ES */ break; case 0x36: /* SS */ break; case 0x65: /* GS */ break; case 0x64: /* FS */ break; case 0xf2: /* repnz */ break; case 0xf3: /* rep */ break; default: pref_done = 1; } } while (!pref_done); switch (opcode) { /* pushf */ case 0x9c: if (data32) { pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 4; } else { pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 2; } IP(regs) = ip; VM86_FAULT_RETURN; /* popf */ case 0x9d: { unsigned long newflags; if (data32) { newflags=popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 2; } IP(regs) = ip; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } VM86_FAULT_RETURN; } /* int xx */ case 0xcd: { int intno=popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (VMPI.vm86dbg_active) { if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) return_to_32bit(regs, VM86_INTx + (intno << 8)); } do_int(regs, intno, ssp, sp); return; } /* iret */ case 0xcf: { unsigned long newip; unsigned long newcs; unsigned long newflags; if (data32) { newip=popl(ssp, sp, simulate_sigsegv); newcs=popl(ssp, sp, simulate_sigsegv); newflags=popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); newcs = popw(ssp, sp, simulate_sigsegv); newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 6; } IP(regs) = newip; regs->pt.xcs = newcs; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } VM86_FAULT_RETURN; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); VM86_FAULT_RETURN; /* sti */ /* * Damn. This is incorrect: the 'sti' instruction should actually * enable interrupts after the /next/ instruction. Not good. * * Probably needs some horsing around with the TF flag. Aiee.. */ case 0xfb: IP(regs) = ip; set_IF(regs); VM86_FAULT_RETURN; default: return_to_32bit(regs, VM86_UNKNOWN); } return;simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should * really send a SIGSEGV to the user program. * But how do we create the correct context? We * are inside a general protection fault handler * and has just returned from a page fault handler. * The correct context for the signal handler * should be a mixture of the two, but how do we * get the information? [KD] */ return_to_32bit(regs, VM86_UNKNOWN);}/* ---------------- vm86 special IRQ passing stuff ----------------- */#define VM86_IRQNAME "vm86irq"static struct vm86_irqs { struct task_struct *tsk; int sig;} vm86_irqs[16];static DEFINE_SPINLOCK(irqbits_lock);static int irqbits;#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUNUSED) ) static irqreturn_t irq_handler(int intno, void *dev_id){ int irq_bit; unsigned long flags; spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); /* * IRQ will be re-enabled when user asks for the irq (whether * polling or as a result of the signal) */ disable_irq_nosync(intno); spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_HANDLED;out: spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE;}static inline void free_vm86_irq(int irqnumber){ unsigned long flags; free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); spin_unlock_irqrestore(&irqbits_lock, flags); }void release_vm86_irqs(struct task_struct *task){ int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) if (vm86_irqs[i].tsk == task) free_vm86_irq(i);}static inline int get_and_reset_irq(int irqnumber){ int bit; unsigned long flags; int ret = 0; if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { enable_irq(irqnumber); ret = 1; } spin_unlock_irqrestore(&irqbits_lock, flags); return ret;}static int do_vm86_irq_handling(int subfunction, int irqnumber){ int ret; switch (subfunction) { case VM86_GET_AND_RESET_IRQ: { return get_and_reset_irq(irqnumber); } case VM86_GET_IRQ_BITS: { return irqbits; } case VM86_REQUEST_IRQ: { int sig = irqnumber >> 8; int irq = irqnumber & 255; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; if (invalid_vm86_irq(irq)) return -EPERM; if (vm86_irqs[irq].tsk) return -EPERM; ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); if (ret) return ret; vm86_irqs[irq].sig = sig; vm86_irqs[irq].tsk = current; return irq; } case VM86_FREE_IRQ: { if (invalid_vm86_irq(irqnumber)) return -EPERM; if (!vm86_irqs[irqnumber].tsk) return 0; if (vm86_irqs[irqnumber].tsk != current) return -EPERM; free_vm86_irq(irqnumber); return 0; } } return -EINVAL;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -