xfs_itable.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 807 行 · 第 1/2 页
C
807 行
while (irbp < irbufend && icount < ubcount) { /* * Loop as long as we're unable to read the * inode btree. */ while (error) { agino += XFS_INODES_PER_CHUNK; if (XFS_AGINO_TO_AGBNO(mp, agino) >= INT_GET(agi->agi_length, ARCH_CONVERT)) break; error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); } /* * If ran off the end of the ag either with an error, * or the normal way, set end and stop collecting. */ if (error || (error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, &i, ARCH_NOCONVERT)) || i == 0) { end_of_ag = 1; break; } /* * If this chunk has any allocated inodes, save it. */ if (gcnt < XFS_INODES_PER_CHUNK) { INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); irbp++; icount += XFS_INODES_PER_CHUNK - gcnt; } /* * Set agino to after this chunk and bump the cursor. */ agino = gino + XFS_INODES_PER_CHUNK; error = xfs_inobt_increment(cur, 0, &tmp); } /* * Drop the btree buffers and the agi buffer. * We can't hold any of the locks these represent * when calling iget. */ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_buf_relse(agbp); /* * Now format all the good inodes into the user's buffer. */ irbufend = irbp; for (irbp = irbuf; irbp < irbufend && ubleft >= statstruct_size; irbp++) { /* * Read-ahead the next chunk's worth of inodes. */ if (&irbp[1] < irbufend) { /* * Loop over all clusters in the next chunk. * Do a readahead if there are any allocated * inodes in that cluster. */ for (agbno = XFS_AGINO_TO_AGBNO(mp, INT_GET(irbp[1].ir_startino, ARCH_CONVERT)), chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; chunkidx += nicluster, agbno += nbcluster) { if (XFS_INOBT_MASKN(chunkidx, nicluster) & ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT))) xfs_btree_reada_bufs(mp, agno, agbno, nbcluster); } } /* * Now process this chunk of inodes. */ for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0; ubleft > 0 && INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK; chunkidx++, clustidx++, agino++) { ASSERT(chunkidx < XFS_INODES_PER_CHUNK); /* * Recompute agbno if this is the * first inode of the cluster. * * Careful with clustidx. There can be * multple clusters per chunk, a single * cluster per chunk or a cluster that has * inodes represented from several different * chunks (if blocksize is large). * * Because of this, the starting clustidx is * initialized to zero in this loop but must * later be reset after reading in the cluster * buffer. */ if ((chunkidx & (nicluster - 1)) == 0) { agbno = XFS_AGINO_TO_AGBNO(mp, INT_GET(irbp->ir_startino, ARCH_CONVERT)) + ((chunkidx & nimask) >> mp->m_sb.sb_inopblog); if (flags & BULKSTAT_FG_QUICK) { ino = XFS_AGINO_TO_INO(mp, agno, agino); bno = XFS_AGB_TO_DADDR(mp, agno, agbno); /* * Get the inode cluster buffer */ ASSERT(xfs_inode_zone != NULL); ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); ip->i_ino = ino; ip->i_mount = mp; if (bp) xfs_buf_relse(bp); error = xfs_itobp(mp, NULL, ip, &dip, &bp, bno); if (!error) clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; kmem_zone_free(xfs_inode_zone, ip); if (XFS_TEST_ERROR(error != 0, mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, XFS_RANDOM_BULKSTAT_READ_CHUNK)) { bp = NULL; break; } } } /* * Skip if this inode is free. */ if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT)) continue; /* * Count used inodes as free so we can tell * when the chunk is used up. */ INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1); ino = XFS_AGINO_TO_INO(mp, agno, agino); bno = XFS_AGB_TO_DADDR(mp, agno, agbno); if (flags & BULKSTAT_FG_QUICK) { dip = (xfs_dinode_t *)xfs_buf_offset(bp, (clustidx << mp->m_sb.sb_inodelog)); if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC || !XFS_DINODE_GOOD_VERSION( INT_GET(dip->di_core.di_version, ARCH_CONVERT))) continue; } /* * Get the inode and fill in a single buffer. * BULKSTAT_FG_QUICK uses dip to fill it in. * BULKSTAT_FG_IGET uses igets. * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. * This is also used to count inodes/blks, etc * in xfs_qm_quotacheck. */ ubused = statstruct_size; error = formatter(mp, ino, ubufp, ubleft, private_data, bno, &ubused, dip, &fmterror); if (fmterror == BULKSTAT_RV_NOTHING) { if (error == ENOMEM) ubleft = 0; continue; } if (fmterror == BULKSTAT_RV_GIVEUP) { ubleft = 0; ASSERT(error); rval = error; break; } if (ubufp) ubufp += ubused; ubleft -= ubused; ubelem++; lastino = ino; } } if (bp) xfs_buf_relse(bp); /* * Set up for the next loop iteration. */ if (ubleft > 0) { if (end_of_ag) { agno++; agino = 0; } else agino = XFS_INO_TO_AGINO(mp, lastino); } else break; } /* * Done, we're either out of filesystem or space to put the data. */ kmem_free(irbuf, NBPC); if (ubuffer) unuseracc(ubuffer, ubcount * statstruct_size, (B_READ|B_PHYS)); *ubcountp = ubelem; if (agno >= mp->m_sb.sb_agcount) { /* * If we ran out of filesystem, mark lastino as off * the end of the filesystem, so the next call * will return immediately. */ *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); *done = 1; } else *lastinop = (xfs_ino_t)lastino; return rval;}/* * Return stat information in bulk (by-inode) for the filesystem. * Special case for non-sequential one inode bulkstat. */int /* error status */xfs_bulkstat_single( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t *lastinop, /* inode to return */ char __user *buffer, /* buffer with inode stats */ int *done) /* 1 if there're more stats to get */{ int count; /* count value for bulkstat call */ int error; /* return value */ xfs_ino_t ino; /* filesystem inode number */ int res; /* result from bs1 */ /* * note that requesting valid inode numbers which are not allocated * to inodes will most likely cause xfs_itobp to generate warning * messages about bad magic numbers. This is ok. The fact that * the inode isn't actually an inode is handled by the * error check below. Done this way to make the usual case faster * at the expense of the error case. */ ino = (xfs_ino_t)*lastinop; error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), NULL, 0, NULL, NULL, &res); if (error) { /* * Special case way failed, do it the "long" way * to see if that works. */ (*lastinop)--; count = 1; if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, NULL, sizeof(xfs_bstat_t), buffer, BULKSTAT_FG_IGET, done)) return error; if (count == 0 || (xfs_ino_t)*lastinop != ino) return error == EFSCORRUPTED ? XFS_ERROR(EINVAL) : error; else return 0; } *done = 0; return 0;}/* * Return inode number table for the filesystem. */int /* error status */xfs_inumbers( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t *lastino, /* last inode returned */ int *count, /* size of buffer/count returned */ xfs_inogrp_t __user *ubuffer)/* buffer with inode descriptions */{ xfs_buf_t *agbp; xfs_agino_t agino; xfs_agnumber_t agno; int bcount; xfs_inogrp_t *buffer; int bufidx; xfs_btree_cur_t *cur; int error; __int32_t gcnt; xfs_inofree_t gfree; xfs_agino_t gino; int i; xfs_ino_t ino; int left; int tmp; ino = (xfs_ino_t)*lastino; agno = XFS_INO_TO_AGNO(mp, ino); agino = XFS_INO_TO_AGINO(mp, ino); left = *count; *count = 0; bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); error = bufidx = 0; cur = NULL; agbp = NULL; while (left > 0 && agno < mp->m_sb.sb_agcount) { if (agbp == NULL) { down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); up_read(&mp->m_peraglock); if (error) { /* * If we can't read the AGI of this ag, * then just skip to the next one. */ ASSERT(cur == NULL); agbp = NULL; agno++; agino = 0; continue; } cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, (xfs_inode_t *)0, 0); error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); cur = NULL; xfs_buf_relse(agbp); agbp = NULL; /* * Move up the the last inode in the current * chunk. The lookup_ge will always get * us the first inode in the next chunk. */ agino += XFS_INODES_PER_CHUNK - 1; continue; } } if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, &i, ARCH_NOCONVERT)) || i == 0) { xfs_buf_relse(agbp); agbp = NULL; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); cur = NULL; agno++; agino = 0; continue; } agino = gino + XFS_INODES_PER_CHUNK - 1; buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; buffer[bufidx].xi_allocmask = ~gfree; bufidx++; left--; if (bufidx == bcount) { if (copy_to_user(ubuffer, buffer, bufidx * sizeof(*buffer))) { error = XFS_ERROR(EFAULT); break; } ubuffer += bufidx; *count += bufidx; bufidx = 0; } if (left) { error = xfs_inobt_increment(cur, 0, &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); cur = NULL; xfs_buf_relse(agbp); agbp = NULL; /* * The agino value has already been bumped. * Just try to skip up to it. */ agino += XFS_INODES_PER_CHUNK; continue; } } } if (!error) { if (bufidx) { if (copy_to_user(ubuffer, buffer, bufidx * sizeof(*buffer))) error = XFS_ERROR(EFAULT); else *count += bufidx; } *lastino = XFS_AGINO_TO_INO(mp, agno, agino); } kmem_free(buffer, bcount * sizeof(*buffer)); if (cur) xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR)); if (agbp) xfs_buf_relse(agbp); return error;}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?