📄 xfs_mount.c
字号:
* on the indicated field. Apply the delta to the * proper field. If the fields value would dip below * 0, then do not apply the delta and return EINVAL. */ switch (field) { case XFS_SBS_ICOUNT: lcounter = (long long)mp->m_sb.sb_icount; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_icount = lcounter; return 0; case XFS_SBS_IFREE: lcounter = (long long)mp->m_sb.sb_ifree; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_ifree = lcounter; return 0; case XFS_SBS_FDBLOCKS: lcounter = (long long) mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); if (delta > 0) { /* Putting blocks back */ if (res_used > delta) { mp->m_resblks_avail += delta; } else { rem = delta - res_used; mp->m_resblks_avail = mp->m_resblks; lcounter += rem; } } else { /* Taking blocks away */ lcounter += delta; /* * If were out of blocks, use any available reserved blocks if * were allowed to. */ if (lcounter < 0) { if (rsvd) { lcounter = (long long)mp->m_resblks_avail + delta; if (lcounter < 0) { return XFS_ERROR(ENOSPC); } mp->m_resblks_avail = lcounter; return 0; } else { /* not reserved */ return XFS_ERROR(ENOSPC); } } } mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); return 0; case XFS_SBS_FREXTENTS: lcounter = (long long)mp->m_sb.sb_frextents; lcounter += delta; if (lcounter < 0) { return XFS_ERROR(ENOSPC); } mp->m_sb.sb_frextents = lcounter; return 0; case XFS_SBS_DBLOCKS: lcounter = (long long)mp->m_sb.sb_dblocks; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_dblocks = lcounter; return 0; case XFS_SBS_AGCOUNT: scounter = mp->m_sb.sb_agcount; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_agcount = scounter; return 0; case XFS_SBS_IMAX_PCT: scounter = mp->m_sb.sb_imax_pct; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_imax_pct = scounter; return 0; case XFS_SBS_REXTSIZE: scounter = mp->m_sb.sb_rextsize; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rextsize = scounter; return 0; case XFS_SBS_RBMBLOCKS: scounter = mp->m_sb.sb_rbmblocks; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rbmblocks = scounter; return 0; case XFS_SBS_RBLOCKS: lcounter = (long long)mp->m_sb.sb_rblocks; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rblocks = lcounter; return 0; case XFS_SBS_REXTENTS: lcounter = (long long)mp->m_sb.sb_rextents; lcounter += delta; if (lcounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rextents = lcounter; return 0; case XFS_SBS_REXTSLOG: scounter = mp->m_sb.sb_rextslog; scounter += delta; if (scounter < 0) { ASSERT(0); return XFS_ERROR(EINVAL); } mp->m_sb.sb_rextslog = scounter; return 0; default: ASSERT(0); return XFS_ERROR(EINVAL); }}/* * xfs_mod_incore_sb() is used to change a field in the in-core * superblock structure by the specified delta. This modification * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() * routine to do the work. */intxfs_mod_incore_sb( xfs_mount_t *mp, xfs_sb_field_t field, int64_t delta, int rsvd){ unsigned long s; int status; /* check for per-cpu counters */ switch (field) {#ifdef HAVE_PERCPU_SB case XFS_SBS_ICOUNT: case XFS_SBS_IFREE: case XFS_SBS_FDBLOCKS: if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { status = xfs_icsb_modify_counters(mp, field, delta, rsvd); break; } /* FALLTHROUGH */#endif default: s = XFS_SB_LOCK(mp); status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); XFS_SB_UNLOCK(mp, s); break; } return status;}/* * xfs_mod_incore_sb_batch() is used to change more than one field * in the in-core superblock structure at a time. This modification * is protected by a lock internal to this module. The fields and * changes to those fields are specified in the array of xfs_mod_sb * structures passed in. * * Either all of the specified deltas will be applied or none of * them will. If any modified field dips below 0, then all modifications * will be backed out and EINVAL will be returned. */intxfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd){ unsigned long s; int status=0; xfs_mod_sb_t *msbp; /* * Loop through the array of mod structures and apply each * individually. If any fail, then back out all those * which have already been applied. Do all of this within * the scope of the SB_LOCK so that all of the changes will * be atomic. */ s = XFS_SB_LOCK(mp); msbp = &msb[0]; for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { /* * Apply the delta at index n. If it fails, break * from the loop so we'll fall into the undo loop * below. */ switch (msbp->msb_field) {#ifdef HAVE_PERCPU_SB case XFS_SBS_ICOUNT: case XFS_SBS_IFREE: case XFS_SBS_FDBLOCKS: if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { XFS_SB_UNLOCK(mp, s); status = xfs_icsb_modify_counters(mp, msbp->msb_field, msbp->msb_delta, rsvd); s = XFS_SB_LOCK(mp); break; } /* FALLTHROUGH */#endif default: status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, msbp->msb_delta, rsvd); break; } if (status != 0) { break; } } /* * If we didn't complete the loop above, then back out * any changes made to the superblock. If you add code * between the loop above and here, make sure that you * preserve the value of status. Loop back until * we step below the beginning of the array. Make sure * we don't touch anything back there. */ if (status != 0) { msbp--; while (msbp >= msb) { switch (msbp->msb_field) {#ifdef HAVE_PERCPU_SB case XFS_SBS_ICOUNT: case XFS_SBS_IFREE: case XFS_SBS_FDBLOCKS: if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { XFS_SB_UNLOCK(mp, s); status = xfs_icsb_modify_counters(mp, msbp->msb_field, -(msbp->msb_delta), rsvd); s = XFS_SB_LOCK(mp); break; } /* FALLTHROUGH */#endif default: status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, -(msbp->msb_delta), rsvd); break; } ASSERT(status == 0); msbp--; } } XFS_SB_UNLOCK(mp, s); return status;}/* * xfs_getsb() is called to obtain the buffer for the superblock. * The buffer is returned locked and read in from disk. * The buffer should be released with a call to xfs_brelse(). * * If the flags parameter is BUF_TRYLOCK, then we'll only return * the superblock buffer if it can be locked without sleeping. * If it can't then we'll return NULL. */xfs_buf_t *xfs_getsb( xfs_mount_t *mp, int flags){ xfs_buf_t *bp; ASSERT(mp->m_sb_bp != NULL); bp = mp->m_sb_bp; if (flags & XFS_BUF_TRYLOCK) { if (!XFS_BUF_CPSEMA(bp)) { return NULL; } } else { XFS_BUF_PSEMA(bp, PRIBIO); } XFS_BUF_HOLD(bp); ASSERT(XFS_BUF_ISDONE(bp)); return bp;}/* * Used to free the superblock along various error paths. */voidxfs_freesb( xfs_mount_t *mp){ xfs_buf_t *bp; /* * Use xfs_getsb() so that the buffer will be locked * when we call xfs_buf_relse(). */ bp = xfs_getsb(mp, 0); XFS_BUF_UNMANAGE(bp); xfs_buf_relse(bp); mp->m_sb_bp = NULL;}/* * See if the UUID is unique among mounted XFS filesystems. * Mount fails if UUID is nil or a FS with the same UUID is already mounted. */STATIC intxfs_uuid_mount( xfs_mount_t *mp){ if (uuid_is_nil(&mp->m_sb.sb_uuid)) { cmn_err(CE_WARN, "XFS: Filesystem %s has nil UUID - can't mount", mp->m_fsname); return -1; } if (!uuid_table_insert(&mp->m_sb.sb_uuid)) { cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount", mp->m_fsname); return -1; } return 0;}/* * Remove filesystem from the UUID table. */STATIC voidxfs_uuid_unmount( xfs_mount_t *mp){ uuid_table_remove(&mp->m_sb.sb_uuid);}/* * Used to log changes to the superblock unit and width fields which could * be altered by the mount options. Only the first superblock is updated. */STATIC voidxfs_mount_log_sbunit( xfs_mount_t *mp, __int64_t fields){ xfs_trans_t *tp; ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID)); tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT)) { xfs_trans_cancel(tp, 0); return; } xfs_mod_sb(tp, fields); xfs_trans_commit(tp, 0);}#ifdef HAVE_PERCPU_SB/* * Per-cpu incore superblock counters * * Simple concept, difficult implementation * * Basically, replace the incore superblock counters with a distributed per cpu * counter for contended fields (e.g. free block count). * * Difficulties arise in that the incore sb is used for ENOSPC checking, and * hence needs to be accurately read when we are running low on space. Hence * there is a method to enable and disable the per-cpu counters based on how * much "stuff" is available in them. * * Basically, a counter is enabled if there is enough free resource to justify * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local * ENOSPC), then we disable the counters to synchronise all callers and * re-distribute the available resources. * * If, once we redistributed the available resources, we still get a failure, * we disable the per-cpu counter and go through the slow path. * * The slow path is the current xfs_mod_incore_sb() function. This means that * when we disable a per-cpu counter, we need to drain it's resources back to * the global superblock. We do this after disabling the counter to prevent * more threads from queueing up on the counter. * * Essentially, this means that we still need a lock in the fast path to enable * synchronisation between the global counters and the per-cpu counters. This * is not a problem because the lock will be local to a CPU almost all the time * and have little contention except when we get to ENOSPC conditions. * * Basically, this lock becomes a barrier that enables us to lock out the fast * path while we do things like enabling and disabling counters and * synchronising the counters. * * Locking rules: * * 1. XFS_SB_LOCK() before picking up per-cpu locks * 2. per-cpu locks always picked up via for_each_online_cpu() order * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks * 4. modifying per-cpu counters requires holding per-cpu lock * 5. modifying global counters requires holding XFS_SB_LOCK * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK * and _none_ of the per-cpu locks. * * Disabled counters are only ever re-enabled by a balance operation * that results in more free resources per CPU than a given threshold. * To ensure counters don't remain disabled, they are rebalanced when * the global resource goes above a higher threshold (i.e. some hysteresis * is present to prevent thrashing). */#ifdef CONFIG_HOTPLUG_CPU/* * hot-plug CPU notifier support. * * We need a notifier per filesystem as we need to be able to identify * the filesystem to balance the counters out. This is achieved by * having a notifier block embedded in the xfs_mount_t and doing pointer * magic to get the mount pointer from the notifier block address. */STATIC intxfs_icsb_cpu_notify( struct notifier_block *nfb, unsigned long action, void *hcpu){ xfs_icsb_cnts_t *cntp; xfs_mount_t *mp; int s; mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); cntp = (xfs_icsb_cnts_t *) per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: /* Easy Case - initialize the area and locks, and * then rebalance when online does everything else for us. */ memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: xfs_icsb_lock(mp); xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0); xfs_icsb_unlock(mp); break; case CPU_DEAD: case CPU_DEAD_FROZEN:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -