⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jfs_dmap.c

📁 jfs-2.4-1.1.7.tar.gz jfs 2.4-1.1.7 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
 *		the blocks will be updated in the persistent map one *		dmap at a time. * * PARAMETERS: *      ipbmap	-  pointer to in-core inode for the block map. *      free	- TRUE if block range is to be freed from the persistent *		  map; FALSE if it is to   be allocated. *      blkno	-  starting block number of the range. *      nblocks	-  number of contiguous blocks in the range. *      tblk	-  transaction block; * * RETURN VALUES: *      0	- success *      -EIO	- i/o error */intdbUpdatePMap(struct inode *ipbmap,	     int free, s64 blkno, s64 nblocks, struct tblock * tblk){	int nblks, dbitno, wbitno, rbits;	int word, nbits, nwords;	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;	s64 lblkno, rem, lastlblkno;	u32 mask;	struct dmap *dp;	struct metapage *mp;	struct jfs_log *log;	int lsn, difft, diffp;	/* the blocks better be within the mapsize. */	if (blkno + nblocks > bmp->db_mapsize) {		printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",		       (unsigned long long) blkno,		       (unsigned long long) nblocks);		jfs_error(ipbmap->i_sb,			  "dbUpdatePMap: blocks are outside the map");		return -EIO;	}	/* compute delta of transaction lsn from log syncpt */	lsn = tblk->lsn;	log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;	logdiff(difft, lsn, log);	/*	 * update the block state a dmap at a time.	 */	mp = NULL;	lastlblkno = 0;	for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {		/* get the buffer for the current dmap. */		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);		if (lblkno != lastlblkno) {			if (mp) {				write_metapage(mp);			}			mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,					   0);			if (mp == NULL)				return -EIO;		}		dp = (struct dmap *) mp->data;		/* determine the bit number and word within the dmap of		 * the starting block.  also determine how many blocks		 * are to be updated within this dmap.		 */		dbitno = blkno & (BPERDMAP - 1);		word = dbitno >> L2DBWORD;		nblks = min(rem, (s64)BPERDMAP - dbitno);		/* update the bits of the dmap words. the first and last		 * words may only have a subset of their bits updated. if		 * this is the case, we'll work against that word (i.e.		 * partial first and/or last) only in a single pass.  a 		 * single pass will also be used to update all words that		 * are to have all their bits updated.		 */		for (rbits = nblks; rbits > 0;		     rbits -= nbits, dbitno += nbits) {			/* determine the bit number within the word and			 * the number of bits within the word.			 */			wbitno = dbitno & (DBWORD - 1);			nbits = min(rbits, DBWORD - wbitno);			/* check if only part of the word is to be updated. */			if (nbits < DBWORD) {				/* update (free or allocate) the bits				 * in this word.				 */				mask =				    (ONES << (DBWORD - nbits) >> wbitno);				if (free)					dp->pmap[word] &=					    cpu_to_le32(~mask);				else					dp->pmap[word] |=					    cpu_to_le32(mask);				word += 1;			} else {				/* one or more words are to have all				 * their bits updated.  determine how				 * many words and how many bits.				 */				nwords = rbits >> L2DBWORD;				nbits = nwords << L2DBWORD;				/* update (free or allocate) the bits				 * in these words.				 */				if (free)					memset(&dp->pmap[word], 0,					       nwords * 4);				else					memset(&dp->pmap[word], (int) ONES,					       nwords * 4);				word += nwords;			}		}		/*		 * update dmap lsn		 */		if (lblkno == lastlblkno)			continue;		lastlblkno = lblkno;		if (mp->lsn != 0) {			/* inherit older/smaller lsn */			logdiff(diffp, mp->lsn, log);			if (difft < diffp) {				mp->lsn = lsn;				/* move bp after tblock in logsync list */				LOGSYNC_LOCK(log);				list_del(&mp->synclist);				list_add(&mp->synclist, &tblk->synclist);				LOGSYNC_UNLOCK(log);			}			/* inherit younger/larger clsn */			LOGSYNC_LOCK(log);			logdiff(difft, tblk->clsn, log);			logdiff(diffp, mp->clsn, log);			if (difft > diffp)				mp->clsn = tblk->clsn;			LOGSYNC_UNLOCK(log);		} else {			mp->log = log;			mp->lsn = lsn;			/* insert bp after tblock in logsync list */			LOGSYNC_LOCK(log);			log->count++;			list_add(&mp->synclist, &tblk->synclist);			mp->clsn = tblk->clsn;			LOGSYNC_UNLOCK(log);		}	}	/* write the last buffer. */	if (mp) {		write_metapage(mp);	}	return (0);}/* * NAME:	dbNextAG() * * FUNCTION:    find the preferred allocation group for new allocations. * *		Within the allocation groups, we maintain a preferred *		allocation group which consists of a group with at least *		average free space.  It is the preferred group that we target *		new inode allocation towards.  The tie-in between inode *		allocation and block allocation occurs as we allocate the *		first (data) block of an inode and specify the inode (block) *		as the allocation hint for this block. * *		We try to avoid having more than one open file growing in *		an allocation group, as this will lead to fragmentation. *		This differs from the old OS/2 method of trying to keep *		empty ags around for large allocations. * * PARAMETERS: *      ipbmap	-  pointer to in-core inode for the block map. * * RETURN VALUES: *      the preferred allocation group number. */int dbNextAG(struct inode *ipbmap){	s64 avgfree;	int agpref;	s64 hwm = 0;	int i;	int next_best = -1;	struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;	BMAP_LOCK(bmp);	/* determine the average number of free blocks within the ags. */	avgfree = (u32)bmp->db_nfree / bmp->db_numag;	/*	 * if the current preferred ag does not have an active allocator	 * and has at least average freespace, return it	 */	agpref = bmp->db_agpref;	if ((atomic_read(&bmp->db_active[agpref]) == 0) &&	    (bmp->db_agfree[agpref] >= avgfree))		goto unlock;	/* From the last preferred ag, find the next one with at least	 * average free space.	 */	for (i = 0 ; i < bmp->db_numag; i++, agpref++) {		if (agpref == bmp->db_numag)			agpref = 0;		if (atomic_read(&bmp->db_active[agpref]))			/* open file is currently growing in this ag */			continue;		if (bmp->db_agfree[agpref] >= avgfree) {			/* Return this one */			bmp->db_agpref = agpref;			goto unlock;		} else if (bmp->db_agfree[agpref] > hwm) {			/* Less than avg. freespace, but best so far */			hwm = bmp->db_agfree[agpref];			next_best = agpref;		}	}	/*	 * If no inactive ag was found with average freespace, use the	 * next best	 */	if (next_best != -1)		bmp->db_agpref = next_best;	/* else leave db_agpref unchanged */unlock:	BMAP_UNLOCK(bmp);	/* return the preferred group.	 */	return (bmp->db_agpref);}/* * NAME:	dbAlloc() * * FUNCTION:    attempt to allocate a specified number of contiguous free *		blocks from the working allocation block map. * *		the block allocation policy uses hints and a multi-step *		approach. * *	  	for allocation requests smaller than the number of blocks *		per dmap, we first try to allocate the new blocks *		immediately following the hint.  if these blocks are not *		available, we try to allocate blocks near the hint.  if *		no blocks near the hint are available, we next try to  *		allocate within the same dmap as contains the hint. * *		if no blocks are available in the dmap or the allocation *		request is larger than the dmap size, we try to allocate *		within the same allocation group as contains the hint. if *		this does not succeed, we finally try to allocate anywhere *		within the aggregate. * *		we also try to allocate anywhere within the aggregate for *		for allocation requests larger than the allocation group *		size or requests that specify no hint value. * * PARAMETERS: *      ip	-  pointer to in-core inode; *      hint	- allocation hint. *      nblocks	- number of contiguous blocks in the range. *      results	- on successful return, set to the starting block number *		  of the newly allocated contiguous range. * * RETURN VALUES: *      0	- success *      -ENOSPC	- insufficient disk resources *      -EIO	- i/o error */int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results){	int rc, agno;	struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;	struct bmap *bmp;	struct metapage *mp;	s64 lblkno, blkno;	struct dmap *dp;	int l2nb;	s64 mapSize;	int writers;	/* assert that nblocks is valid */	assert(nblocks > 0);#ifdef _STILL_TO_PORT	/* DASD limit check                                     F226941 */	if (OVER_LIMIT(ip, nblocks))		return -ENOSPC;#endif				/* _STILL_TO_PORT */	/* get the log2 number of blocks to be allocated.	 * if the number of blocks is not a log2 multiple, 	 * it will be rounded up to the next log2 multiple.	 */	l2nb = BLKSTOL2(nblocks);	bmp = JFS_SBI(ip->i_sb)->bmap;//retry:        /* serialize w.r.t.extendfs() */	mapSize = bmp->db_mapsize;	/* the hint should be within the map */	if (hint >= mapSize) {		jfs_error(ip->i_sb, "dbAlloc: the hint is outside the map");		return -EIO;	}	/* if the number of blocks to be allocated is greater than the	 * allocation group size, try to allocate anywhere.	 */	if (l2nb > bmp->db_agl2size) {		IWRITE_LOCK(ipbmap);		rc = dbAllocAny(bmp, nblocks, l2nb, results);		if (rc == 0) {			DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results,				nblocks);		}		goto write_unlock;	}	/*	 * If no hint, let dbNextAG recommend an allocation group	 */	if (hint == 0)		goto pref_ag;	/* we would like to allocate close to the hint.  adjust the	 * hint to the block following the hint since the allocators	 * will start looking for free space starting at this point.	 */	blkno = hint + 1;	if (blkno >= bmp->db_mapsize)		goto pref_ag;	agno = blkno >> bmp->db_agl2size;	/* check if blkno crosses over into a new allocation group.	 * if so, check if we should allow allocations within this	 * allocation group.	 */	if ((blkno & (bmp->db_agsize - 1)) == 0)		/* check if the AG is currenly being written to.		 * if so, call dbNextAG() to find a non-busy		 * AG with sufficient free space.		 */		if (atomic_read(&bmp->db_active[agno]))			goto pref_ag;	/* check if the allocation request size can be satisfied from a	 * single dmap.  if so, try to allocate from the dmap containing	 * the hint using a tiered strategy.	 */	if (nblocks <= BPERDMAP) {		IREAD_LOCK(ipbmap);		/* get the buffer for the dmap containing the hint.		 */		rc = -EIO;		lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);		mp = read_metapage(ipbmap, lblkno, PSIZE, 0);		if (mp == NULL)			goto read_unlock;		dp = (struct dmap *) mp->data;		/* first, try to satisfy the allocation request with the		 * blocks beginning at the hint.		 */		if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))		    != -ENOSPC) {			if (rc == 0) {				*results = blkno;				DBALLOC(bmp->db_DBmap, bmp->db_mapsize,					*results, nblocks);				mark_metapage_dirty(mp);			}			release_metapage(mp);			goto read_unlock;		}		writers = atomic_read(&bmp->db_active[agno]);		if ((writers > 1) ||		    ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {			/*			 * Someone else is writing in this allocation			 * group.  To avoid fragmenting, try another ag			 */			release_metapage(mp);			IREAD_UNLOCK(ipbmap);			goto pref_ag;		}		/* next, try to satisfy the allocation request with blocks		 * near the hint.		 */		if ((rc =		     dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))		    != -ENOSPC) {			if (rc == 0) {				DBALLOC(bmp->db_DBmap, bmp->db_mapsize,					*results, nblocks);				mark_metapage_dirty(mp);			}			release_metapage(mp);			goto read_unlock;		}		/* try to satisfy the allocation request with blocks within		 * the same dmap as the hint.		 */		if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))		    != -ENOSPC) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -