📄 i2o_config.c
字号:
printk(KERN_INFO "i2o_config: could not get query\n"); kfree(query); return -EFAULT; } } res = kmalloc(65536, GFP_KERNEL); if(!res) { i2o_unlock_controller(c); kfree(query); return -ENOMEM; } msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid; msg[2] = i2o_cfg_context; msg[3] = 0; msg[4] = kcmd.page; msg[5] = 0xD0000000|65536; msg[6] = virt_to_bus(res); if(!kcmd.qlen) /* Check for post data */ msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5; else { msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5; msg[5] = 0x50000000|65536; msg[7] = 0xD4000000|(kcmd.qlen); msg[8] = virt_to_bus(query); } /* Wait for a considerable time till the Controller does its job before timing out. The controller might take more time to process this request if there are many devices connected to it. */ token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res); if(token < 0) { printk(KERN_DEBUG "token = %#10x\n", token); i2o_unlock_controller(c); if(token != -ETIMEDOUT) { kfree(res); if(kcmd.qlen) kfree(query); } return token; } i2o_unlock_controller(c); len = strnlen(res, 65536); put_user(len, kcmd.reslen); if(len > reslen) ret = -ENOMEM; if(copy_to_user(kcmd.resbuf, res, len)) ret = -EFAULT; kfree(res); if(kcmd.qlen) kfree(query); return ret;} int ioctl_swdl(unsigned long arg){ struct i2o_sw_xfer kxfer; struct i2o_sw_xfer *pxfer = (struct i2o_sw_xfer *)arg; unsigned char maxfrag = 0, curfrag = 1; unsigned char *buffer; u32 msg[9]; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) return -EFAULT; if(get_user(swlen, kxfer.swlen) < 0) return -EFAULT; if(get_user(maxfrag, kxfer.maxfrag) < 0) return -EFAULT; if(get_user(curfrag, kxfer.curfrag) < 0) return -EFAULT; if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192; if(!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) return -EFAULT; c = i2o_find_controller(kxfer.iop); if(!c) return -ENXIO; buffer=kmalloc(fragsize, GFP_KERNEL); if (buffer==NULL) { i2o_unlock_controller(c); return -ENOMEM; } __copy_from_user(buffer, kxfer.buf, fragsize); msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7; msg[1]= I2O_CMD_SW_DOWNLOAD<<24 | HOST_TID<<12 | ADAPTER_TID; msg[2]= (u32)cfg_handler.context; msg[3]= 0; msg[4]= (((u32)kxfer.flags)<<24) | (((u32)kxfer.sw_type)<<16) | (((u32)maxfrag)<<8) | (((u32)curfrag)); msg[5]= swlen; msg[6]= kxfer.sw_id; msg[7]= (0xD0000000 | fragsize); msg[8]= virt_to_bus(buffer);// printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL); i2o_unlock_controller(c); if(status != -ETIMEDOUT) kfree(buffer); if (status != I2O_POST_WAIT_OK) { // it fails if you try and send frags out of order // and for some yet unknown reasons too printk(KERN_INFO "i2o_config: swdl failed, DetailedStatus = %d\n", status); return status; } return 0;}int ioctl_swul(unsigned long arg){ struct i2o_sw_xfer kxfer; struct i2o_sw_xfer *pxfer = (struct i2o_sw_xfer *)arg; unsigned char maxfrag = 0, curfrag = 1; unsigned char *buffer; u32 msg[9]; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) return -EFAULT; if(get_user(swlen, kxfer.swlen) < 0) return -EFAULT; if(get_user(maxfrag, kxfer.maxfrag) < 0) return -EFAULT; if(get_user(curfrag, kxfer.curfrag) < 0) return -EFAULT; if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192; if(!kxfer.buf || !access_ok(VERIFY_WRITE, kxfer.buf, fragsize)) return -EFAULT; c = i2o_find_controller(kxfer.iop); if(!c) return -ENXIO; buffer=kmalloc(fragsize, GFP_KERNEL); if (buffer==NULL) { i2o_unlock_controller(c); return -ENOMEM; } msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7; msg[1]= I2O_CMD_SW_UPLOAD<<24 | HOST_TID<<12 | ADAPTER_TID; msg[2]= (u32)cfg_handler.context; msg[3]= 0; msg[4]= (u32)kxfer.flags<<24|(u32)kxfer.sw_type<<16|(u32)maxfrag<<8|(u32)curfrag; msg[5]= swlen; msg[6]= kxfer.sw_id; msg[7]= (0xD0000000 | fragsize); msg[8]= virt_to_bus(buffer); // printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL); i2o_unlock_controller(c); if (status != I2O_POST_WAIT_OK) { if(status != -ETIMEDOUT) kfree(buffer); printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status); return status; } __copy_to_user(kxfer.buf, buffer, fragsize); kfree(buffer); return 0;}int ioctl_swdel(unsigned long arg){ struct i2o_controller *c; struct i2o_sw_xfer kxfer, *pxfer = (struct i2o_sw_xfer *)arg; u32 msg[7]; unsigned int swlen; int token; if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) return -EFAULT; if (get_user(swlen, kxfer.swlen) < 0) return -EFAULT; c = i2o_find_controller(kxfer.iop); if (!c) return -ENXIO; msg[0] = SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0; msg[1] = I2O_CMD_SW_REMOVE<<24 | HOST_TID<<12 | ADAPTER_TID; msg[2] = (u32)i2o_cfg_context; msg[3] = 0; msg[4] = (u32)kxfer.flags<<24 | (u32)kxfer.sw_type<<16; msg[5] = swlen; msg[6] = kxfer.sw_id; token = i2o_post_wait(c, msg, sizeof(msg), 10); i2o_unlock_controller(c); if (token != I2O_POST_WAIT_OK) { printk(KERN_INFO "i2o_config: swdel failed, DetailedStatus = %d\n", token); return -ETIMEDOUT; } return 0;}int ioctl_validate(unsigned long arg){ int token; int iop = (int)arg; u32 msg[4]; struct i2o_controller *c; c=i2o_find_controller(iop); if (!c) return -ENXIO; msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_CONFIG_VALIDATE<<24 | HOST_TID<<12 | iop; msg[2] = (u32)i2o_cfg_context; msg[3] = 0; token = i2o_post_wait(c, msg, sizeof(msg), 10); i2o_unlock_controller(c); if (token != I2O_POST_WAIT_OK) { printk(KERN_INFO "Can't validate configuration, ErrorStatus = %d\n", token); return -ETIMEDOUT; } return 0;} static int ioctl_evt_reg(unsigned long arg, struct file *fp){ u32 msg[5]; struct i2o_evt_id *pdesc = (struct i2o_evt_id *)arg; struct i2o_evt_id kdesc; struct i2o_controller *iop; struct i2o_device *d; if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) return -EFAULT; /* IOP exists? */ iop = i2o_find_controller(kdesc.iop); if(!iop) return -ENXIO; i2o_unlock_controller(iop); /* Device exists? */ for(d = iop->devices; d; d = d->next) if(d->lct_data.tid == kdesc.tid) break; if(!d) return -ENODEV; msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | kdesc.tid; msg[2] = (u32)i2o_cfg_context; msg[3] = (u32)fp->private_data; msg[4] = kdesc.evt_mask; i2o_post_this(iop, msg, 20); return 0;} static int ioctl_evt_get(unsigned long arg, struct file *fp){ u32 id = (u32)fp->private_data; struct i2o_cfg_info *p = NULL; struct i2o_evt_get *uget = (struct i2o_evt_get*)arg; struct i2o_evt_get kget; unsigned long flags; for(p = open_files; p; p = p->next) if(p->q_id == id) break; if(!p->q_len) { return -ENOENT; return 0; } memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); MODINC(p->q_out, I2O_EVT_Q_LEN); spin_lock_irqsave(&i2o_config_lock, flags); p->q_len--; kget.pending = p->q_len; kget.lost = p->q_lost; spin_unlock_irqrestore(&i2o_config_lock, flags); if(copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) return -EFAULT; return 0;}static int cfg_open(struct inode *inode, struct file *file){ struct i2o_cfg_info *tmp = (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL); unsigned long flags; if(!tmp) return -ENOMEM; file->private_data = (void*)(i2o_cfg_info_id++); tmp->fp = file; tmp->fasync = NULL; tmp->q_id = (u32)file->private_data; tmp->q_len = 0; tmp->q_in = 0; tmp->q_out = 0; tmp->q_lost = 0; tmp->next = open_files; spin_lock_irqsave(&i2o_config_lock, flags); open_files = tmp; spin_unlock_irqrestore(&i2o_config_lock, flags); return 0;}static int cfg_release(struct inode *inode, struct file *file){ u32 id = (u32)file->private_data; struct i2o_cfg_info *p1, *p2; unsigned long flags; lock_kernel(); p1 = p2 = NULL; spin_lock_irqsave(&i2o_config_lock, flags); for(p1 = open_files; p1; ) { if(p1->q_id == id) { if(p1->fasync) cfg_fasync(-1, file, 0); if(p2) p2->next = p1->next; else open_files = p1->next; kfree(p1); break; } p2 = p1; p1 = p1->next; } spin_unlock_irqrestore(&i2o_config_lock, flags); unlock_kernel(); return 0;}static int cfg_fasync(int fd, struct file *fp, int on){ u32 id = (u32)fp->private_data; struct i2o_cfg_info *p; for(p = open_files; p; p = p->next) if(p->q_id == id) break; if(!p) return -EBADF; return fasync_helper(fd, fp, on, &p->fasync);}static struct file_operations config_fops ={ owner: THIS_MODULE, llseek: no_llseek, read: cfg_read, write: cfg_write, ioctl: cfg_ioctl, open: cfg_open, release: cfg_release, fasync: cfg_fasync,};static struct miscdevice i2o_miscdev = { I2O_MINOR, "i2octl", &config_fops}; #ifdef MODULEint init_module(void)#elseint __init i2o_config_init(void)#endif{ printk(KERN_INFO "I2O configuration manager v 0.04.\n"); printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n"); if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL) { printk(KERN_ERR "i2o_config: no memory for page buffer.\n"); return -ENOBUFS; } if(misc_register(&i2o_miscdev)==-1) { printk(KERN_ERR "i2o_config: can't register device.\n"); kfree(page_buf); return -EBUSY; } /* * Install our handler */ if(i2o_install_handler(&cfg_handler)<0) { kfree(page_buf); printk(KERN_ERR "i2o_config: handler register failed.\n"); misc_deregister(&i2o_miscdev); return -EBUSY; } /* * The low 16bits of the transaction context must match this * for everything we post. Otherwise someone else gets our mail */ i2o_cfg_context = cfg_handler.context; return 0;}#ifdef MODULEvoid cleanup_module(void){ misc_deregister(&i2o_miscdev); if(page_buf) kfree(page_buf); if(i2o_cfg_context != -1) i2o_remove_handler(&cfg_handler);} EXPORT_NO_SYMBOLS;MODULE_AUTHOR("Red Hat Software");MODULE_DESCRIPTION("I2O Configuration");MODULE_LICENSE("GPL");#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -