⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lvm.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 5 页
字号:
	case LV_SET_ACCESS:		/* set access flags of a logical volume */		if (!capable(CAP_SYS_ADMIN)) return -EACCES;		lv_ptr->lv_access = (ulong) arg;		if ( lv_ptr->lv_access & LV_WRITE)			set_device_ro(lv_ptr->lv_dev, 0);		else			set_device_ro(lv_ptr->lv_dev, 1);		break;	case LV_SET_STATUS:		/* set status flags of a logical volume */		if (!capable(CAP_SYS_ADMIN)) return -EACCES;		if (!((ulong) arg & LV_ACTIVE) && lv_ptr->lv_open > 1)			return -EPERM;		lv_ptr->lv_status = (ulong) arg;		break;	case LV_BMAP:                /* turn logical block into (dev_t, block).  non privileged. */                /* don't bmap a snapshot, since the mapping can change */		if(lv_ptr->lv_access & LV_SNAPSHOT)			return -EPERM;		return lvm_user_bmap(inode, (struct lv_bmap *) arg);	case LV_SET_ALLOCATION:		/* set allocation flags of a logical volume */		if (!capable(CAP_SYS_ADMIN)) return -EACCES;		lv_ptr->lv_allocation = (ulong) arg;		break;	case LV_SNAPSHOT_USE_RATE:		return lvm_get_snapshot_use_rate(lv_ptr, arg);	default:		printk(KERN_WARNING		       "%s -- lvm_blk_ioctl: unknown command 0x%x\n",		       lvm_name, command);		return -EINVAL;	}	return 0;} /* lvm_blk_ioctl() *//* * block device close routine */static int lvm_blk_close(struct inode *inode, struct file *file){	int minor = MINOR(inode->i_rdev);	vg_t *vg_ptr = vg[VG_BLK(minor)];	lv_t *lv_ptr = vg_ptr->lv[LV_BLK(minor)];	P_DEV("blk_close MINOR: %d  VG#: %d  LV#: %d\n",	      minor, VG_BLK(minor), LV_BLK(minor));	if (lv_ptr->lv_open == 1) vg_ptr->lv_open--;	lv_ptr->lv_open--;	MOD_DEC_USE_COUNT;	return 0;} /* lvm_blk_close() */static int lvm_get_snapshot_use_rate(lv_t *lv, void *arg){	lv_snapshot_use_rate_req_t lv_rate_req;	if (!(lv->lv_access & LV_SNAPSHOT))		return -EPERM;	if (copy_from_user(&lv_rate_req, arg, sizeof(lv_rate_req)))		return -EFAULT;	if (lv_rate_req.rate < 0 || lv_rate_req.rate > 100)		return -EINVAL;	switch (lv_rate_req.block) {	case 0:		lv->lv_snapshot_use_rate = lv_rate_req.rate;		if (lv->lv_remap_ptr * 100 / lv->lv_remap_end <		    lv->lv_snapshot_use_rate)			interruptible_sleep_on(&lv->lv_snapshot_wait);		break;	case O_NONBLOCK:		break;	default:		return -EINVAL;	}	lv_rate_req.rate = lv->lv_remap_ptr * 100 / lv->lv_remap_end;	return copy_to_user(arg, &lv_rate_req,			    sizeof(lv_rate_req)) ? -EFAULT : 0;}static int lvm_user_bmap(struct inode *inode, struct lv_bmap *user_result){	struct buffer_head bh;	unsigned long block;	int err;	if (get_user(block, &user_result->lv_block))		return -EFAULT;	memset(&bh,0,sizeof bh);	bh.b_blocknr = block;	bh.b_dev = bh.b_rdev = inode->i_rdev;	bh.b_size = lvm_get_blksize(bh.b_dev); 	bh.b_rsector = block * (bh.b_size >> 9);	if ((err=lvm_map(&bh, READ)) < 0)  {		printk("lvm map failed: %d\n", err);		return -EINVAL;	}	return put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) ||	       put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block) ?	       -EFAULT : 0;}/* * block device support function for /usr/src/linux/drivers/block/ll_rw_blk.c * (see init_module/lvm_init) */static void __remap_snapshot(kdev_t rdev, ulong rsector,			     ulong pe_start, lv_t *lv, vg_t *vg) {	/* copy a chunk from the origin to a snapshot device */	down_write(&lv->lv_lock);	/* we must redo lvm_snapshot_remap_block in order to avoid a	   race condition in the gap where no lock was held */	if (!lvm_snapshot_remap_block(&rdev, &rsector, pe_start, lv) &&	    !lvm_snapshot_COW(rdev, rsector, pe_start, rsector, vg, lv))		lvm_write_COW_table_block(vg, lv);	up_write(&lv->lv_lock);}static inline void _remap_snapshot(kdev_t rdev, ulong rsector,				   ulong pe_start, lv_t *lv, vg_t *vg) {	int r;	/* check to see if this chunk is already in the snapshot */	down_read(&lv->lv_lock);	r = lvm_snapshot_remap_block(&rdev, &rsector, pe_start, lv);	up_read(&lv->lv_lock);	if (!r)		/* we haven't yet copied this block to the snapshot */		__remap_snapshot(rdev, rsector, pe_start, lv, vg);}/* * extents destined for a pe that is on the move should be deferred */static inline int _should_defer(kdev_t pv, ulong sector, uint32_t pe_size) {	return ((pe_lock_req.lock == LOCK_PE) &&		(pv == pe_lock_req.data.pv_dev) &&		(sector >= pe_lock_req.data.pv_offset) &&		(sector < (pe_lock_req.data.pv_offset + pe_size)));}static inline int _defer_extent(struct buffer_head *bh, int rw,				kdev_t pv, ulong sector, uint32_t pe_size){	if (pe_lock_req.lock == LOCK_PE) {		down_read(&_pe_lock);		if (_should_defer(pv, sector, pe_size)) {			up_read(&_pe_lock);			down_write(&_pe_lock);			if (_should_defer(pv, sector, pe_size))				_queue_io(bh, rw);			up_write(&_pe_lock);			return 1;		}		up_read(&_pe_lock);	}	return 0;}static int lvm_map(struct buffer_head *bh, int rw){	int minor = MINOR(bh->b_rdev);	ulong index;	ulong pe_start;	ulong size = bh->b_size >> 9;	ulong rsector_org = bh->b_rsector;	ulong rsector_map;	kdev_t rdev_map;	vg_t *vg_this = vg[VG_BLK(minor)];	lv_t *lv = vg_this->lv[LV_BLK(minor)];	down_read(&lv->lv_lock);	if (!(lv->lv_status & LV_ACTIVE)) {		printk(KERN_ALERT		       "%s - lvm_map: ll_rw_blk for inactive LV %s\n",		       lvm_name, lv->lv_name);		goto bad;	}	if ((rw == WRITE || rw == WRITEA) &&	    !(lv->lv_access & LV_WRITE)) {		printk(KERN_CRIT		       "%s - lvm_map: ll_rw_blk write for readonly LV %s\n",		       lvm_name, lv->lv_name);		goto bad;	}	P_MAP("%s - lvm_map minor: %d  *rdev: %s  *rsector: %lu  size:%lu\n",	      lvm_name, minor,	      kdevname(bh->b_rdev),	      rsector_org, size);	if (rsector_org + size > lv->lv_size) {		printk(KERN_ALERT		       "%s - lvm_map access beyond end of device; *rsector: "                       "%lu or size: %lu wrong for minor: %2d\n",                       lvm_name, rsector_org, size, minor);		goto bad;	}	if (lv->lv_stripes < 2) { /* linear mapping */		/* get the index */		index = rsector_org / vg_this->pe_size;		pe_start = lv->lv_current_pe[index].pe;		rsector_map = lv->lv_current_pe[index].pe +			(rsector_org % vg_this->pe_size);		rdev_map = lv->lv_current_pe[index].dev;		P_MAP("lv_current_pe[%ld].pe: %d  rdev: %s  rsector:%ld\n",		      index, lv->lv_current_pe[index].pe,		      kdevname(rdev_map), rsector_map);	} else {		/* striped mapping */		ulong stripe_index;		ulong stripe_length;		stripe_length = vg_this->pe_size * lv->lv_stripes;		stripe_index = (rsector_org % stripe_length) /			lv->lv_stripesize;		index = rsector_org / stripe_length +			(stripe_index % lv->lv_stripes) *			(lv->lv_allocated_le / lv->lv_stripes);		pe_start = lv->lv_current_pe[index].pe;		rsector_map = lv->lv_current_pe[index].pe +			(rsector_org % stripe_length) -			(stripe_index % lv->lv_stripes) * lv->lv_stripesize -			stripe_index / lv->lv_stripes *			(lv->lv_stripes - 1) * lv->lv_stripesize;		rdev_map = lv->lv_current_pe[index].dev;		P_MAP("lv_current_pe[%ld].pe: %d  rdev: %s  rsector:%ld\n"		      "stripe_length: %ld  stripe_index: %ld\n",		      index, lv->lv_current_pe[index].pe, kdevname(rdev_map),		      rsector_map, stripe_length, stripe_index);	}	/*	 * Queue writes to physical extents on the move until move completes.	 * Don't get _pe_lock until there is a reasonable expectation that	 * we need to queue this request, because this is in the fast path.	 */	if (rw == WRITE || rw == WRITEA) {		if(_defer_extent(bh, rw, rdev_map,				 rsector_map, vg_this->pe_size)) {			up_read(&lv->lv_lock);			return 0;		}		lv->lv_current_pe[index].writes++;	/* statistic */	} else		lv->lv_current_pe[index].reads++;	/* statistic */	/* snapshot volume exception handling on physical device address base */	if (!(lv->lv_access & (LV_SNAPSHOT|LV_SNAPSHOT_ORG)))		goto out;	if (lv->lv_access & LV_SNAPSHOT) { /* remap snapshot */		if (lv->lv_block_exception)			lvm_snapshot_remap_block(&rdev_map, &rsector_map,						 pe_start, lv);		else			goto bad;	} else if (rw == WRITE || rw == WRITEA) { /* snapshot origin */		lv_t *snap;		/* start with first snapshot and loop through all of		   them */		for (snap = lv->lv_snapshot_next; snap;		     snap = snap->lv_snapshot_next) {			/* Check for inactive snapshot */			if (!(snap->lv_status & LV_ACTIVE))				continue;			/* Serializes the COW with the accesses to the			   snapshot device */			_remap_snapshot(rdev_map, rsector_map,					 pe_start, snap, vg_this);		} 	} out:	bh->b_rdev = rdev_map;	bh->b_rsector = rsector_map;	up_read(&lv->lv_lock);	return 1; bad:	buffer_IO_error(bh);	up_read(&lv->lv_lock);	return -1;} /* lvm_map() *//* * internal support functions */#ifdef LVM_HD_NAME/* * generate "hard disk" name */void lvm_hd_name(char *buf, int minor){	int len = 0;	lv_t *lv_ptr;	if (vg[VG_BLK(minor)] == NULL ||	    (lv_ptr = vg[VG_BLK(minor)]->lv[LV_BLK(minor)]) == NULL)		return;	len = strlen(lv_ptr->lv_name) - 5;	memcpy(buf, &lv_ptr->lv_name[5], len);	buf[len] = 0;	return;}#endif/* * make request function */static int lvm_make_request_fn(request_queue_t *q,			       int rw,			       struct buffer_head *bh) {	return (lvm_map(bh, rw) <= 0) ? 0 : 1;}/******************************************************************** * * Character device support functions * ********************************************************************//* * character device support function logical volume manager lock */static int lvm_do_lock_lvm(void){lock_try_again:	spin_lock(&lvm_lock);	if (lock != 0 && lock != current->pid) {		P_DEV("lvm_do_lock_lvm: locked by pid %d ...\n", lock);		spin_unlock(&lvm_lock);		interruptible_sleep_on(&lvm_wait);		if (current->sigpending != 0)			return -EINTR;#ifdef LVM_TOTAL_RESET		if (lvm_reset_spindown > 0)			return -EACCES;#endif		goto lock_try_again;	}	lock = current->pid;	P_DEV("lvm_do_lock_lvm: locking LVM for pid %d\n", lock);	spin_unlock(&lvm_lock);	return 0;} /* lvm_do_lock_lvm *//* * character device support function lock/unlock physical extend */static int lvm_do_pe_lock_unlock(vg_t *vg_ptr, void *arg){	pe_lock_req_t new_lock;	struct buffer_head *bh;	uint p;	if (vg_ptr == NULL) return -ENXIO;	if (copy_from_user(&new_lock, arg, sizeof(new_lock)) != 0)		return -EFAULT;	switch (new_lock.lock) {	case LOCK_PE:		for (p = 0; p < vg_ptr->pv_max; p++) {			if (vg_ptr->pv[p] != NULL &&			    new_lock.data.pv_dev == vg_ptr->pv[p]->pv_dev)				break;		}		if (p == vg_ptr->pv_max) return -ENXIO;		/*		 * this sync releaves memory pressure to lessen the		 * likelyhood of pvmove being paged out - resulting in		 * deadlock.		 *		 * This method of doing a pvmove is broken		 */		fsync_dev(pe_lock_req.data.lv_dev);		down_write(&_pe_lock);		if (pe_lock_req.lock == LOCK_PE) {			up_write(&_pe_lock);			return -EBUSY;		}		/* Should we do to_kdev_t() on the pv_dev and lv_dev??? */		pe_lock_req.lock = LOCK_PE;		pe_lock_req.data.lv_dev = new_lock.data.lv_dev;		pe_lock_req.data.pv_dev = new_lock.data.pv_dev;		pe_lock_req.data.pv_offset = new_lock.data.pv_offset;		up_write(&_pe_lock);		/* some requests may have got through since the fsync */		fsync_dev(pe_lock_req.data.pv_dev);		break;	case UNLOCK_PE:		down_write(&_pe_lock);		pe_lock_req.lock = UNLOCK_PE;		pe_lock_req.data.lv_dev = 0;		pe_lock_req.data.pv_dev = 0;		pe_lock_req.data.pv_offset = 0;		bh = _dequeue_io();		up_write(&_pe_lock);		/* handle all deferred io for this PE */		_flush_io(bh);		break;	default:		return -EINVAL;	}	return 0;}/* * character device support function logical extend remap */static int lvm_do_le_remap(vg_t *vg_ptr, void *arg){	uint l, le;	lv_t *lv_ptr;	if (vg_ptr == NULL) return -ENXIO;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -