📄 xfs_mount.c
字号:
/* Disable all the counters, then fold the dead cpu's * count into the total on the global superblock and * re-enable the counters. */ xfs_icsb_lock(mp); s = XFS_SB_LOCK(mp); xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); mp->m_sb.sb_icount += cntp->icsb_icount; mp->m_sb.sb_ifree += cntp->icsb_ifree; mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED, 0); XFS_SB_UNLOCK(mp, s); xfs_icsb_unlock(mp); break; } return NOTIFY_OK;}#endif /* CONFIG_HOTPLUG_CPU */intxfs_icsb_init_counters( xfs_mount_t *mp){ xfs_icsb_cnts_t *cntp; int i; mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); if (mp->m_sb_cnts == NULL) return -ENOMEM;#ifdef CONFIG_HOTPLUG_CPU mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; mp->m_icsb_notifier.priority = 0; register_hotcpu_notifier(&mp->m_icsb_notifier);#endif /* CONFIG_HOTPLUG_CPU */ for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); } mutex_init(&mp->m_icsb_mutex); /* * start with all counters disabled so that the * initial balance kicks us off correctly */ mp->m_icsb_counters = -1; return 0;}voidxfs_icsb_reinit_counters( xfs_mount_t *mp){ xfs_icsb_lock(mp); /* * start with all counters disabled so that the * initial balance kicks us off correctly */ mp->m_icsb_counters = -1; xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0); xfs_icsb_unlock(mp);}STATIC voidxfs_icsb_destroy_counters( xfs_mount_t *mp){ if (mp->m_sb_cnts) { unregister_hotcpu_notifier(&mp->m_icsb_notifier); free_percpu(mp->m_sb_cnts); } mutex_destroy(&mp->m_icsb_mutex);}STATIC_INLINE voidxfs_icsb_lock_cntr( xfs_icsb_cnts_t *icsbp){ while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { ndelay(1000); }}STATIC_INLINE voidxfs_icsb_unlock_cntr( xfs_icsb_cnts_t *icsbp){ clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);}STATIC_INLINE voidxfs_icsb_lock_all_counters( xfs_mount_t *mp){ xfs_icsb_cnts_t *cntp; int i; for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); xfs_icsb_lock_cntr(cntp); }}STATIC_INLINE voidxfs_icsb_unlock_all_counters( xfs_mount_t *mp){ xfs_icsb_cnts_t *cntp; int i; for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); xfs_icsb_unlock_cntr(cntp); }}STATIC voidxfs_icsb_count( xfs_mount_t *mp, xfs_icsb_cnts_t *cnt, int flags){ xfs_icsb_cnts_t *cntp; int i; memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); if (!(flags & XFS_ICSB_LAZY_COUNT)) xfs_icsb_lock_all_counters(mp); for_each_online_cpu(i) { cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); cnt->icsb_icount += cntp->icsb_icount; cnt->icsb_ifree += cntp->icsb_ifree; cnt->icsb_fdblocks += cntp->icsb_fdblocks; } if (!(flags & XFS_ICSB_LAZY_COUNT)) xfs_icsb_unlock_all_counters(mp);}STATIC intxfs_icsb_counter_disabled( xfs_mount_t *mp, xfs_sb_field_t field){ ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); return test_bit(field, &mp->m_icsb_counters);}STATIC intxfs_icsb_disable_counter( xfs_mount_t *mp, xfs_sb_field_t field){ xfs_icsb_cnts_t cnt; ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); /* * If we are already disabled, then there is nothing to do * here. We check before locking all the counters to avoid * the expensive lock operation when being called in the * slow path and the counter is already disabled. This is * safe because the only time we set or clear this state is under * the m_icsb_mutex. */ if (xfs_icsb_counter_disabled(mp, field)) return 0; xfs_icsb_lock_all_counters(mp); if (!test_and_set_bit(field, &mp->m_icsb_counters)) { /* drain back to superblock */ xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); switch(field) { case XFS_SBS_ICOUNT: mp->m_sb.sb_icount = cnt.icsb_icount; break; case XFS_SBS_IFREE: mp->m_sb.sb_ifree = cnt.icsb_ifree; break; case XFS_SBS_FDBLOCKS: mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; break; default: BUG(); } } xfs_icsb_unlock_all_counters(mp); return 0;}STATIC voidxfs_icsb_enable_counter( xfs_mount_t *mp, xfs_sb_field_t field, uint64_t count, uint64_t resid){ xfs_icsb_cnts_t *cntp; int i; ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); xfs_icsb_lock_all_counters(mp); for_each_online_cpu(i) { cntp = per_cpu_ptr(mp->m_sb_cnts, i); switch (field) { case XFS_SBS_ICOUNT: cntp->icsb_icount = count + resid; break; case XFS_SBS_IFREE: cntp->icsb_ifree = count + resid; break; case XFS_SBS_FDBLOCKS: cntp->icsb_fdblocks = count + resid; break; default: BUG(); break; } resid = 0; } clear_bit(field, &mp->m_icsb_counters); xfs_icsb_unlock_all_counters(mp);}voidxfs_icsb_sync_counters_flags( xfs_mount_t *mp, int flags){ xfs_icsb_cnts_t cnt; int s; /* Pass 1: lock all counters */ if ((flags & XFS_ICSB_SB_LOCKED) == 0) s = XFS_SB_LOCK(mp); xfs_icsb_count(mp, &cnt, flags); /* Step 3: update mp->m_sb fields */ if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) mp->m_sb.sb_icount = cnt.icsb_icount; if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) mp->m_sb.sb_ifree = cnt.icsb_ifree; if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; if ((flags & XFS_ICSB_SB_LOCKED) == 0) XFS_SB_UNLOCK(mp, s);}/* * Accurate update of per-cpu counters to incore superblock */STATIC voidxfs_icsb_sync_counters( xfs_mount_t *mp){ xfs_icsb_sync_counters_flags(mp, 0);}/* * Balance and enable/disable counters as necessary. * * Thresholds for re-enabling counters are somewhat magic. inode counts are * chosen to be the same number as single on disk allocation chunk per CPU, and * free blocks is something far enough zero that we aren't going thrash when we * get near ENOSPC. We also need to supply a minimum we require per cpu to * prevent looping endlessly when xfs_alloc_space asks for more than will * be distributed to a single CPU but each CPU has enough blocks to be * reenabled. * * Note that we can be called when counters are already disabled. * xfs_icsb_disable_counter() optimises the counter locking in this case to * prevent locking every per-cpu counter needlessly. */#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))STATIC voidxfs_icsb_balance_counter( xfs_mount_t *mp, xfs_sb_field_t field, int flags, int min_per_cpu){ uint64_t count, resid; int weight = num_online_cpus(); int s; uint64_t min = (uint64_t)min_per_cpu; if (!(flags & XFS_ICSB_SB_LOCKED)) s = XFS_SB_LOCK(mp); /* disable counter and sync counter */ xfs_icsb_disable_counter(mp, field); /* update counters - first CPU gets residual*/ switch (field) { case XFS_SBS_ICOUNT: count = mp->m_sb.sb_icount; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) goto out; break; case XFS_SBS_IFREE: count = mp->m_sb.sb_ifree; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) goto out; break; case XFS_SBS_FDBLOCKS: count = mp->m_sb.sb_fdblocks; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) goto out; break; default: BUG(); count = resid = 0; /* quiet, gcc */ break; } xfs_icsb_enable_counter(mp, field, count, resid);out: if (!(flags & XFS_ICSB_SB_LOCKED)) XFS_SB_UNLOCK(mp, s);}intxfs_icsb_modify_counters( xfs_mount_t *mp, xfs_sb_field_t field, int64_t delta, int rsvd){ xfs_icsb_cnts_t *icsbp; long long lcounter; /* long counter for 64 bit fields */ int cpu, ret = 0, s; might_sleep();again: cpu = get_cpu(); icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu); /* * if the counter is disabled, go to slow path */ if (unlikely(xfs_icsb_counter_disabled(mp, field))) goto slow_path; xfs_icsb_lock_cntr(icsbp); if (unlikely(xfs_icsb_counter_disabled(mp, field))) { xfs_icsb_unlock_cntr(icsbp); goto slow_path; } switch (field) { case XFS_SBS_ICOUNT: lcounter = icsbp->icsb_icount; lcounter += delta; if (unlikely(lcounter < 0)) goto balance_counter; icsbp->icsb_icount = lcounter; break; case XFS_SBS_IFREE: lcounter = icsbp->icsb_ifree; lcounter += delta; if (unlikely(lcounter < 0)) goto balance_counter; icsbp->icsb_ifree = lcounter; break; case XFS_SBS_FDBLOCKS: BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); lcounter += delta; if (unlikely(lcounter < 0)) goto balance_counter; icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); break; default: BUG(); break; } xfs_icsb_unlock_cntr(icsbp); put_cpu(); return 0;slow_path: put_cpu(); /* * serialise with a mutex so we don't burn lots of cpu on * the superblock lock. We still need to hold the superblock * lock, however, when we modify the global structures. */ xfs_icsb_lock(mp); /* * Now running atomically. * * If the counter is enabled, someone has beaten us to rebalancing. * Drop the lock and try again in the fast path.... */ if (!(xfs_icsb_counter_disabled(mp, field))) { xfs_icsb_unlock(mp); goto again; } /* * The counter is currently disabled. Because we are * running atomically here, we know a rebalance cannot * be in progress. Hence we can go straight to operating * on the global superblock. We do not call xfs_mod_incore_sb() * here even though we need to get the SB_LOCK. Doing so * will cause us to re-enter this function and deadlock. * Hence we get the SB_LOCK ourselves and then call * xfs_mod_incore_sb_unlocked() as the unlocked path operates * directly on the global counters. */ s = XFS_SB_LOCK(mp); ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); XFS_SB_UNLOCK(mp, s); /* * Now that we've modified the global superblock, we * may be able to re-enable the distributed counters * (e.g. lots of space just got freed). After that * we are done. */ if (ret != ENOSPC) xfs_icsb_balance_counter(mp, field, 0, 0); xfs_icsb_unlock(mp); return ret;balance_counter: xfs_icsb_unlock_cntr(icsbp); put_cpu(); /* * We may have multiple threads here if multiple per-cpu * counters run dry at the same time. This will mean we can * do more balances than strictly necessary but it is not * the common slowpath case. */ xfs_icsb_lock(mp); /* * running atomically. * * This will leave the counter in the correct state for future * accesses. After the rebalance, we simply try again and our retry * will either succeed through the fast path or slow path without * another balance operation being required. */ xfs_icsb_balance_counter(mp, field, 0, delta); xfs_icsb_unlock(mp); goto again;}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -